diff --git a/Cargo.lock b/Cargo.lock index e4a71bd9..54d8c7a0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2833,6 +2833,7 @@ dependencies = [ "fendermint_vm_interpreter", "fendermint_vm_message", "fendermint_vm_resolver", + "fendermint_vm_topdown", "fvm", "fvm_ipld_blockstore", "fvm_ipld_car", @@ -2893,8 +2894,10 @@ dependencies = [ "config 0.13.3", "dirs", "fendermint_vm_encoding", + "fendermint_vm_topdown", "fvm_ipld_encoding 0.3.3", "fvm_shared", + "ipc-provider", "ipc-sdk", "multiaddr 0.16.0", "serde", @@ -3138,6 +3141,7 @@ dependencies = [ "fendermint_vm_genesis", "fendermint_vm_message", "fendermint_vm_resolver", + "fendermint_vm_topdown", "futures-core", "futures-util", "fvm", @@ -3146,6 +3150,7 @@ dependencies = [ "fvm_ipld_encoding 0.3.3", "fvm_shared", "hex", + "ipc-sdk", "ipc_actors_abis", "libipld", "num-traits", @@ -3160,7 +3165,7 @@ dependencies = [ "thiserror", "tokio", "tokio-stream", - "tokio-util 0.7.9", + "tokio-util 0.7.10", "tracing", ] @@ -4105,7 +4110,7 @@ dependencies = [ "indexmap 1.9.3", "slab", "tokio", - "tokio-util 0.7.9", + "tokio-util 0.7.10", "tracing", ] @@ -4632,7 +4637,7 @@ dependencies = [ [[package]] name = "ipc-identity" version = "0.1.0" -source = "git+https://github.com/consensus-shipyard/ipc.git?branch=dev#25e060715fc03859ae5db4a6d1326a6e7fdf2df0" +source = "git+https://github.com/consensus-shipyard/ipc.git?branch=dev#dcfb0e99eed4c9781b1d634cb122c521e718cd07" dependencies = [ "ahash 0.8.6", "anyhow", @@ -4659,7 +4664,7 @@ dependencies = [ [[package]] name = "ipc-provider" version = "0.1.0" -source = "git+https://github.com/consensus-shipyard/ipc.git?branch=dev#25e060715fc03859ae5db4a6d1326a6e7fdf2df0" +source = "git+https://github.com/consensus-shipyard/ipc.git?branch=dev#dcfb0e99eed4c9781b1d634cb122c521e718cd07" dependencies = [ "anyhow", "async-channel", @@ -4667,6 +4672,7 @@ dependencies = [ "base64 0.21.5", "bytes", "cid", + "dirs", "ethers", "ethers-contract", "fil_actors_runtime 0.0.1", @@ -4698,7 +4704,7 @@ dependencies = [ [[package]] name = "ipc-sdk" version = "0.1.0" -source = "git+https://github.com/consensus-shipyard/ipc.git?branch=dev#25e060715fc03859ae5db4a6d1326a6e7fdf2df0" +source = "git+https://github.com/consensus-shipyard/ipc.git?branch=dev#dcfb0e99eed4c9781b1d634cb122c521e718cd07" dependencies = [ "anyhow", "cid", @@ -4722,7 +4728,7 @@ dependencies = [ [[package]] name = "ipc_actors_abis" version = "0.1.0" -source = "git+https://github.com/consensus-shipyard/ipc-solidity-actors.git?branch=dev#7ecad9a249e9a93f5ea28dd3db1080ce5a7d6270" +source = "git+https://github.com/consensus-shipyard/ipc-solidity-actors.git?branch=dev#3f72d21d0ad601fbb673b6a9bc650d14f7be9c9f" dependencies = [ "anyhow", "ethers", @@ -5523,7 +5529,7 @@ dependencies = [ "thiserror", "tinytemplate", "tokio", - "tokio-util 0.7.9", + "tokio-util 0.7.10", "webrtc", ] @@ -9194,9 +9200,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.9" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d68074620f57a0b21594d9735eb2e98ab38b17f80d3fcb189fca266771ca60d" +checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" dependencies = [ "bytes", "futures-core", @@ -9265,7 +9271,7 @@ dependencies = [ "rand 0.8.5", "slab", "tokio", - "tokio-util 0.7.9", + "tokio-util 0.7.10", "tower-layer", "tower-service", "tracing", @@ -10658,18 +10664,18 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.7.14" +version = "0.7.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69c48d63854f77746c68a5fbb4aa17f3997ece1cb301689a257af8cb80610d21" +checksum = "81ba595b9f2772fbee2312de30eeb80ec773b4cb2f1e8098db024afadda6c06f" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.14" +version = "0.7.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c258c1040279e4f88763a113de72ce32dde2d50e2a94573f15dd534cea36a16d" +checksum = "772666c41fb6dceaf520b564b962d738a8e1a83b41bd48945f50837aed78bb1d" dependencies = [ "proc-macro2", "quote", diff --git a/docs/running.md b/docs/running.md index 82930acf..e58bfc61 100644 --- a/docs/running.md +++ b/docs/running.md @@ -26,6 +26,11 @@ In the following sections we will create a Genesis file for a network named `tes mkdir test-network ``` +If you are running in test network, define the network using env variable. +```shell +export FM_NETWORK=test +``` + ### Create a new Genesis file First, create a new `genesis.json` file devoid of accounts and validators. The `--base-fee` here is completely arbitrary. @@ -33,7 +38,7 @@ The `--power-scale` value of `0` means we'll grant 1 voting power per 1 FIL; to to use milliFIL for example. ```shell -cargo run -p fendermint_app -- \ +cargo run -p fendermint_app --release -- \ genesis --genesis-file test-network/genesis.json \ new \ --chain-name test \ @@ -64,7 +69,7 @@ Next, let's create some cryptographic key pairs we want want to use either for a ```shell mkdir test-network/keys for NAME in alice bob charlie dave; do - cargo run -p fendermint_app -- key gen --out-dir test-network/keys --name $NAME; + cargo run -p fendermint_app --release -- key gen --out-dir test-network/keys --name $NAME; done ``` @@ -83,7 +88,7 @@ Ak5Juk793ZAg/7Ojj4bzOmIFGpwLhET1vg2ROihUJFkq Add one of the keys we created to the Genesis file as a stand-alone account: ```shell - cargo run -p fendermint_app -- \ + cargo run -p fendermint_app --release -- \ genesis --genesis-file test-network/genesis.json \ add-account --public-key test-network/keys/alice.pk --balance 10 ``` @@ -110,7 +115,7 @@ but it has to be one based on a public key, otherwise we would not be able to va Let's add an example of the other possible account type, a multi-sig account: ```shell -cargo run -p fendermint_app -- \ +cargo run -p fendermint_app --release -- \ genesis --genesis-file test-network/genesis.json \ add-multisig --public-key test-network/keys/bob.pk --public-key test-network/keys/charlie.pk --public-key test-network/keys/dave.pk \ --threshold 2 --vesting-start 0 --vesting-duration 1000000 --balance 30 @@ -142,7 +147,7 @@ $ cat test-network/genesis.json | jq .accounts[1] Finally, let's add one validator to the Genesis, with a monopoly on voting power, so we can run a standalone node: ```shell -cargo run -p fendermint_app -- \ +cargo run -p fendermint_app --release -- \ genesis --genesis-file test-network/genesis.json \ add-validator --public-key test-network/keys/bob.pk --power 1; ``` @@ -166,6 +171,32 @@ The public key was spliced in as it was, in base64 format, which is how it would own genesis file format. Note that here we don't have the option to use `Address`, because we have to return these as actual `PublicKey` types to Tendermint through ABCI, not as a hash of a key. +### (Optional) Add ipc to the Genesis file + +If you need ipc related function, let's add the subnet info to the Genesis with deployed subnet id: /r31415926 + +```shell +cargo run -p fendermint_app --release -- \ + genesis --genesis-file test-network/genesis.json \ + ipc \ + gateway --subnet-id /r31415926 \ + --bottom-up-check-period 10 \ + --msg-fee 1 --majority-percentage 65 --min-collateral 1 +``` +Check the result: +```console +$ cat test-network/genesis.json | jq .ipc +{ + "gateway": { + "subnet_id": "/r31415926", + "bottom_up_check_period": 10, + "top_down_check_period": 10, + "msg_fee": "1", + "majority_percentage": 65 + } +} +``` + ### Configure CometBFT First, follow the instructions in [getting started with CometBFT](./tendermint.md) to install the binary, @@ -191,7 +222,7 @@ file we created earlier to the format CometBFT accepts. Start with the genesis f ```shell mv ~/.cometbft/config/genesis.json ~/.cometbft/config/genesis.json.orig -cargo run -p fendermint_app -- \ +cargo run -p fendermint_app --release -- \ genesis --genesis-file test-network/genesis.json \ into-tendermint --out ~/.cometbft/config/genesis.json ``` @@ -281,7 +312,7 @@ one of the validators we created. ```shell mv ~/.cometbft/config/priv_validator_key.json ~/.cometbft/config/priv_validator_key.json.orig -cargo run -p fendermint_app -- \ +cargo run -p fendermint_app --release -- \ key into-tendermint --secret-key test-network/keys/bob.sk --out ~/.cometbft/config/priv_validator_key.json ``` diff --git a/fendermint/app/Cargo.toml b/fendermint/app/Cargo.toml index dbbb8d7a..3010abe9 100644 --- a/fendermint/app/Cargo.toml +++ b/fendermint/app/Cargo.toml @@ -48,6 +48,7 @@ fendermint_vm_genesis = { path = "../vm/genesis" } fendermint_vm_interpreter = { path = "../vm/interpreter", features = ["bundle"] } fendermint_vm_message = { path = "../vm/message" } fendermint_vm_resolver = { path = "../vm/resolver" } +fendermint_vm_topdown = { path = "../vm/topdown" } fvm = { workspace = true } fvm_ipld_blockstore = { workspace = true } @@ -55,8 +56,8 @@ fvm_ipld_car = { workspace = true } fvm_ipld_encoding = { workspace = true } fvm_shared = { workspace = true } ipc-sdk = { workspace = true } -ipc_ipld_resolver = { workspace = true } ipc-provider = { workspace = true } +ipc_ipld_resolver = { workspace = true } [dev-dependencies] tempfile = { workspace = true } diff --git a/fendermint/app/config/default.toml b/fendermint/app/config/default.toml index 6b08442e..a4553b30 100644 --- a/fendermint/app/config/default.toml +++ b/fendermint/app/config/default.toml @@ -168,3 +168,7 @@ event_buffer_capacity = 100 rate_limit_bytes = 0 # Length of the time period at which the consumption limit fills. 0 means no limit. rate_limit_period = 0 + +# IPC related configuration parameters +[ipc] +subnet_id = "/r0" \ No newline at end of file diff --git a/fendermint/app/settings/Cargo.toml b/fendermint/app/settings/Cargo.toml index 18c12e12..e27b83e5 100644 --- a/fendermint/app/settings/Cargo.toml +++ b/fendermint/app/settings/Cargo.toml @@ -21,5 +21,7 @@ tendermint-rpc = { workspace = true } fvm_shared = { workspace = true } fvm_ipld_encoding = { workspace = true } ipc-sdk = { workspace = true } +ipc-provider = { workspace = true } fendermint_vm_encoding = { path = "../../vm/encoding" } +fendermint_vm_topdown = { path = "../../vm/topdown" } diff --git a/fendermint/app/settings/src/lib.rs b/fendermint/app/settings/src/lib.rs index 1f3c3df3..d8652020 100644 --- a/fendermint/app/settings/src/lib.rs +++ b/fendermint/app/settings/src/lib.rs @@ -1,19 +1,24 @@ // Copyright 2022-2023 Protocol Labs // SPDX-License-Identifier: Apache-2.0, MIT -use anyhow::Context; +use anyhow::{anyhow, Context}; use config::{Config, ConfigError, Environment, File}; +use fvm_shared::address::Address; use fvm_shared::econ::TokenAmount; use ipc_sdk::subnet_id::SubnetID; use serde::Deserialize; +use serde_with::{serde_as, DurationSeconds}; use std::path::{Path, PathBuf}; +use std::time::Duration; use tendermint_rpc::Url; use fendermint_vm_encoding::{human_readable_delegate, human_readable_str}; +use fendermint_vm_topdown::BlockHeight; use self::eth::EthSettings; use self::fvm::FvmSettings; use self::resolver::ResolverSettings; +use ipc_provider::config::deserialize::deserialize_eth_address_from_str; pub mod eth; pub mod fvm; @@ -93,6 +98,54 @@ pub struct BroadcastSettings { pub max_retries: u8, } +#[serde_as] +#[derive(Debug, Deserialize, Clone)] +pub struct TopDownConfig { + /// The number of blocks to delay before reporting a height as final on the parent chain. + /// To propose a certain number of epochs delayed from the latest height, we see to be + /// conservative and avoid other from rejecting the proposal because they don't see the + /// height as final yet. + pub chain_head_delay: BlockHeight, + /// Parent syncing cron period, in seconds + #[serde_as(as = "DurationSeconds")] + pub polling_interval: Duration, + /// Top down exponential back off retry base + #[serde_as(as = "DurationSeconds")] + pub exponential_back_off: Duration, + /// The max number of retries for exponential backoff before giving up + pub exponential_retry_limit: usize, + /// The parent rpc http endpoint + pub parent_http_endpoint: Url, + /// The parent registry address + #[serde(deserialize_with = "deserialize_eth_address_from_str")] + pub parent_registry: Address, + /// The parent gateway address + #[serde(deserialize_with = "deserialize_eth_address_from_str")] + pub parent_gateway: Address, +} + +#[serde_as] +#[derive(Debug, Deserialize, Clone)] +pub struct IpcSettings { + #[serde_as(as = "IsHumanReadable")] + pub subnet_id: SubnetID, + /// The config for top down checkpoint. It's None if subnet id is root or not activating + /// any top down checkpoint related operations + pub topdown: Option, +} + +impl IpcSettings { + pub fn is_topdown_enabled(&self) -> bool { + !self.subnet_id.is_root() && self.topdown.is_some() + } + + pub fn topdown_config(&self) -> anyhow::Result<&TopDownConfig> { + self.topdown + .as_ref() + .ok_or_else(|| anyhow!("top down config missing")) + } +} + #[derive(Debug, Deserialize, Clone)] pub struct Settings { /// Home directory configured on the CLI, to which all paths in settings can be set relative. @@ -115,6 +168,7 @@ pub struct Settings { pub fvm: FvmSettings, pub resolver: ResolverSettings, pub broadcast: BroadcastSettings, + pub ipc: IpcSettings, } #[macro_export] diff --git a/fendermint/app/src/app.rs b/fendermint/app/src/app.rs index 1717ff92..0d6927c7 100644 --- a/fendermint/app/src/app.rs +++ b/fendermint/app/src/app.rs @@ -17,7 +17,9 @@ use fendermint_vm_genesis::{Power, Validator}; use fendermint_vm_interpreter::bytes::{ BytesMessageApplyRes, BytesMessageCheckRes, BytesMessageQuery, BytesMessageQueryRes, }; -use fendermint_vm_interpreter::chain::{ChainMessageApplyRet, CheckpointPool, IllegalMessage}; +use fendermint_vm_interpreter::chain::{ + ChainMessageApplyRet, CheckpointPool, IllegalMessage, TopDownFinalityProvider, +}; use fendermint_vm_interpreter::fvm::state::{ empty_state_tree, CheckStateRef, FvmExecState, FvmGenesisState, FvmQueryState, FvmStateParams, }; @@ -31,6 +33,7 @@ use fendermint_vm_message::query::FvmQueryHeight; use fvm::engine::MultiEngine; use fvm_ipld_blockstore::Blockstore; use fvm_shared::chainid::ChainID; +use fvm_shared::clock::ChainEpoch; use fvm_shared::econ::TokenAmount; use fvm_shared::version::NetworkVersion; use num_traits::Zero; @@ -157,6 +160,8 @@ where interpreter: Arc, /// CID resolution pool. resolve_pool: CheckpointPool, + /// The parent finality provider for top down checkpoint + parent_finality_provider: TopDownFinalityProvider, /// State accumulating changes during block execution. exec_state: Arc>>>, /// Projected (partial) state accumulating during transaction checks. @@ -183,6 +188,7 @@ where state_store: SS, interpreter: I, resolve_pool: CheckpointPool, + parent_finality_provider: TopDownFinalityProvider, ) -> Result { let app = Self { db: Arc::new(db), @@ -194,6 +200,7 @@ where state_hist_size: config.state_hist_size, interpreter: Arc::new(interpreter), resolve_pool, + parent_finality_provider, exec_state: Arc::new(Mutex::new(None)), check_state: Arc::new(tokio::sync::Mutex::new(None)), }; @@ -298,15 +305,55 @@ where /// Take the execution state, update it, put it back, return the output. async fn modify_exec_state(&self, f: F) -> Result where - F: FnOnce((CheckpointPool, FvmExecState)) -> R, - R: Future), T)>>, + F: FnOnce((CheckpointPool, TopDownFinalityProvider, FvmExecState)) -> R, + R: Future< + Output = Result<( + (CheckpointPool, TopDownFinalityProvider, FvmExecState), + T, + )>, + >, { let state = self.take_exec_state(); - let ((_pool, state), ret) = f((self.resolve_pool.clone(), state)).await?; + let ((_pool, _provider, state), ret) = f(( + self.resolve_pool.clone(), + self.parent_finality_provider.clone(), + state, + )) + .await?; self.put_exec_state(state); Ok(ret) } + /// Get a read only fvm execution state. This is useful to perform query commands targeting + /// the latest state. + pub fn new_read_only_exec_state( + &self, + ) -> Result>>>> { + let maybe_app_state = self.get_committed_state()?; + + Ok(if let Some(app_state) = maybe_app_state { + let block_height = app_state.block_height; + let state_params = app_state.state_params; + + // wait for block production + if block_height == 0 { + return Ok(None); + } + + let exec_state = FvmExecState::new( + ReadOnlyBlockstore::new(self.state_store.clone()), + self.multi_engine.as_ref(), + block_height as ChainEpoch, + state_params, + ) + .context("error creating execution state")?; + + Some(exec_state) + } else { + None + }) + } + /// Look up a past state at a particular height Tendermint Core is looking for. /// /// A height of zero means we are looking for the latest state. @@ -356,9 +403,9 @@ where Genesis = Vec, Output = FvmGenesisOutput, >, - I: ProposalInterpreter>, + I: ProposalInterpreter>, I: ExecInterpreter< - State = (CheckpointPool, FvmExecState), + State = (CheckpointPool, TopDownFinalityProvider, FvmExecState), Message = Vec, BeginOutput = FvmApplyRet, DeliverOutput = BytesMessageApplyRes, @@ -554,7 +601,13 @@ where let txs = self .interpreter - .prepare(self.resolve_pool.clone(), txs) + .prepare( + ( + self.resolve_pool.clone(), + self.parent_finality_provider.clone(), + ), + txs, + ) .await .context("failed to prepare proposal")?; @@ -573,7 +626,13 @@ where let accept = self .interpreter - .process(self.resolve_pool.clone(), txs) + .process( + ( + self.resolve_pool.clone(), + self.parent_finality_provider.clone(), + ), + txs, + ) .await .context("failed to process proposal")?; @@ -630,6 +689,7 @@ where invalid_deliver_tx(AppError::InvalidSignature, d) } ChainMessageApplyRet::Signed(Ok(ret)) => to_deliver_tx(ret.fvm, ret.domain_hash), + ChainMessageApplyRet::Ipc(ret) => to_deliver_tx(ret, None), }, }; diff --git a/fendermint/app/src/cmd/run.rs b/fendermint/app/src/cmd/run.rs index e728dd3e..093cc869 100644 --- a/fendermint/app/src/cmd/run.rs +++ b/fendermint/app/src/cmd/run.rs @@ -3,7 +3,7 @@ use anyhow::{anyhow, bail, Context}; use fendermint_abci::ApplicationService; -use fendermint_app::{App, AppConfig, AppStore, BitswapBlockstore}; +use fendermint_app::{App, AppConfig, AppParentFinalityQuery, AppStore, BitswapBlockstore}; use fendermint_app_settings::AccountKind; use fendermint_crypto::SecretKey; use fendermint_rocksdb::{blockstore::NamespaceBlockstore, namespaces, RocksDb, RocksDbConfig}; @@ -15,14 +15,45 @@ use fendermint_vm_interpreter::{ signed::SignedMessageInterpreter, }; use fendermint_vm_resolver::ipld::IpldResolver; +use fendermint_vm_topdown::proxy::IPCProviderProxy; +use fendermint_vm_topdown::sync::launch_polling_syncer; +use fendermint_vm_topdown::{CachedFinalityProvider, Toggle}; use fvm_shared::address::Address; +use ipc_provider::config::subnet::{EVMSubnet, SubnetConfig}; +use ipc_provider::IpcProvider; use libp2p::identity::secp256k1; use libp2p::identity::Keypair; +use std::sync::Arc; use tracing::info; use crate::cmd::key::read_secret_key; use crate::{cmd, options::run::RunArgs, settings::Settings}; +fn create_ipc_provider_proxy(settings: &Settings) -> anyhow::Result { + let topdown_config = settings.ipc.topdown_config()?; + let subnet = ipc_provider::config::Subnet { + id: settings + .ipc + .subnet_id + .parent() + .ok_or_else(|| anyhow!("subnet has no parent"))?, + config: SubnetConfig::Fevm(EVMSubnet { + provider_http: topdown_config + .parent_http_endpoint + .to_string() + .parse() + .unwrap(), + auth_token: None, + registry_addr: topdown_config.parent_registry, + gateway_addr: topdown_config.parent_gateway, + }), + }; + tracing::info!("init ipc provider with subnet: {subnet:?}"); + + let ipc_provider = IpcProvider::new_with_subnet(None, subnet)?; + IPCProviderProxy::new(ipc_provider, settings.ipc.subnet_id.clone()) +} + cmd! { RunArgs(self, settings) { run(settings).await @@ -78,7 +109,7 @@ async fn run(settings: Settings) -> anyhow::Result<()> { settings.fvm.exec_in_check, ); let interpreter = SignedMessageInterpreter::new(interpreter); - let interpreter = ChainMessageInterpreter::new(interpreter); + let interpreter = ChainMessageInterpreter::<_, NamespaceBlockstore>::new(interpreter); let interpreter = BytesMessageInterpreter::new(interpreter, ProposalPrepareMode::AppendOnly, false); @@ -123,6 +154,25 @@ async fn run(settings: Settings) -> anyhow::Result<()> { tracing::info!("IPLD Resolver disabled.") } + let (parent_finality_provider, ipc_tuple) = if settings.ipc.is_topdown_enabled() { + info!("topdown finality enabled"); + let topdown_config = settings.ipc.topdown_config()?; + let config = fendermint_vm_topdown::Config { + chain_head_delay: topdown_config.chain_head_delay, + polling_interval: topdown_config.polling_interval, + exponential_back_off: topdown_config.exponential_back_off, + exponential_retry_limit: topdown_config.exponential_retry_limit, + }; + let ipc_provider = Arc::new(create_ipc_provider_proxy(&settings)?); + let finality_provider = + CachedFinalityProvider::uninitialized(config.clone(), ipc_provider.clone()).await?; + let p = Arc::new(Toggle::enabled(finality_provider)); + (p, Some((ipc_provider, config))) + } else { + info!("topdown finality disabled"); + (Arc::new(Toggle::disabled()), None) + }; + let app: App<_, _, AppStore, _> = App::new( AppConfig { app_namespace: ns.app, @@ -134,8 +184,26 @@ async fn run(settings: Settings) -> anyhow::Result<()> { state_store, interpreter, resolve_pool, + parent_finality_provider.clone(), )?; + if let Some((agent_proxy, config)) = ipc_tuple { + let app_parent_finality_query = AppParentFinalityQuery::new(app.clone()); + tokio::spawn(async move { + match launch_polling_syncer( + app_parent_finality_query, + config, + parent_finality_provider, + agent_proxy, + ) + .await + { + Ok(_) => {} + Err(e) => tracing::error!("cannot launch polling syncer: {e}"), + } + }); + } + let service = ApplicationService(app); // Split it into components. diff --git a/fendermint/app/src/ipc.rs b/fendermint/app/src/ipc.rs new file mode 100644 index 00000000..fbf61e6c --- /dev/null +++ b/fendermint/app/src/ipc.rs @@ -0,0 +1,69 @@ +// Copyright 2022-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +//! IPC related execution + +use crate::app::{AppState, AppStoreKey}; +use crate::{App, BlockHeight}; +use fendermint_storage::{Codec, Encode, KVReadable, KVStore, KVWritable}; +use fendermint_vm_interpreter::fvm::state::ipc::GatewayCaller; +use fendermint_vm_interpreter::fvm::state::FvmStateParams; +use fendermint_vm_interpreter::fvm::store::ReadOnlyBlockstore; +use fendermint_vm_topdown::sync::ParentFinalityStateQuery; +use fendermint_vm_topdown::IPCParentFinality; +use fvm_ipld_blockstore::Blockstore; +use std::sync::Arc; + +/// Queries the LATEST COMMITTED parent finality from the storage +pub struct AppParentFinalityQuery +where + SS: Blockstore + 'static, + S: KVStore, +{ + /// The app to get state + app: App, + gateway_caller: GatewayCaller>>, +} + +impl AppParentFinalityQuery +where + S: KVStore + + Codec + + Encode + + Encode + + Codec, + DB: KVWritable + KVReadable + 'static + Clone, + SS: Blockstore + 'static + Clone, +{ + pub fn new(app: App) -> Self { + Self { + app, + gateway_caller: GatewayCaller::default(), + } + } +} + +impl ParentFinalityStateQuery for AppParentFinalityQuery +where + S: KVStore + + Codec + + Encode + + Encode + + Codec, + DB: KVWritable + KVReadable + 'static + Clone, + SS: Blockstore + 'static + Clone, +{ + fn get_latest_committed_finality(&self) -> anyhow::Result> { + let maybe_exec_state = self.app.new_read_only_exec_state()?; + + let finality = if let Some(mut exec_state) = maybe_exec_state { + let finality = self + .gateway_caller + .get_latest_parent_finality(&mut exec_state)?; + Some(finality) + } else { + None + }; + + Ok(finality) + } +} diff --git a/fendermint/app/src/lib.rs b/fendermint/app/src/lib.rs index 9ceab654..44908943 100644 --- a/fendermint/app/src/lib.rs +++ b/fendermint/app/src/lib.rs @@ -1,10 +1,12 @@ // Copyright 2022-2023 Protocol Labs // SPDX-License-Identifier: Apache-2.0, MIT mod app; +mod ipc; mod store; mod tmconv; pub use app::{App, AppConfig}; +pub use ipc::AppParentFinalityQuery; pub use store::{AppStore, BitswapBlockstore}; // Different type from `ChainEpoch` just because we might use epoch in a more traditional sense for checkpointing. diff --git a/fendermint/vm/interpreter/Cargo.toml b/fendermint/vm/interpreter/Cargo.toml index 614a7b1e..7fdac801 100644 --- a/fendermint/vm/interpreter/Cargo.toml +++ b/fendermint/vm/interpreter/Cargo.toml @@ -14,11 +14,14 @@ fendermint_vm_core = { path = "../core" } fendermint_vm_genesis = { path = "../genesis" } fendermint_vm_message = { path = "../message" } fendermint_vm_resolver = { path = "../resolver" } +fendermint_vm_topdown = { path = "../topdown" } fendermint_crypto = { path = "../../crypto" } fendermint_eth_hardhat = { path = "../../eth/hardhat" } fendermint_rpc = { path = "../../rpc" } ipc_actors_abis = { workspace = true } +ipc-sdk = { workspace = true } + async-trait = { workspace = true } async-stm = { workspace = true } anyhow = { workspace = true } diff --git a/fendermint/vm/interpreter/src/chain.rs b/fendermint/vm/interpreter/src/chain.rs index 5189b4ae..4875683b 100644 --- a/fendermint/vm/interpreter/src/chain.rs +++ b/fendermint/vm/interpreter/src/chain.rs @@ -1,25 +1,37 @@ // Copyright 2022-2023 Protocol Labs // SPDX-License-Identifier: Apache-2.0, MIT +use crate::fvm::state::ipc::GatewayCaller; +use crate::fvm::{topdown, FvmApplyRet}; use crate::{ + fvm::state::FvmExecState, fvm::FvmMessage, signed::{SignedMessageApplyRes, SignedMessageCheckRes, SyntheticMessage, VerifiableMessage}, CheckInterpreter, ExecInterpreter, GenesisInterpreter, ProposalInterpreter, QueryInterpreter, }; -use anyhow::Context; +use anyhow::{anyhow, Context}; use async_stm::atomically; use async_trait::async_trait; use fendermint_vm_actor_interface::ipc; +use fendermint_vm_message::ipc::ParentFinality; use fendermint_vm_message::{ chain::ChainMessage, ipc::{BottomUpCheckpoint, CertifiedMessage, IpcMessage, SignedRelayedMessage}, }; use fendermint_vm_resolver::pool::{ResolveKey, ResolvePool}; +use fendermint_vm_topdown::proxy::IPCProviderProxy; +use fendermint_vm_topdown::{ + CachedFinalityProvider, IPCParentFinality, ParentFinalityProvider, ParentViewProvider, Toggle, +}; +use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::RawBytes; +use fvm_shared::clock::ChainEpoch; use fvm_shared::econ::TokenAmount; use num_traits::Zero; +use std::sync::Arc; /// A resolution pool for bottom-up and top-down checkpoints. pub type CheckpointPool = ResolvePool; +pub type TopDownFinalityProvider = Arc>>; #[derive(Clone, Hash, PartialEq, Eq)] pub enum CheckpointPoolItem { @@ -46,6 +58,8 @@ pub struct IllegalMessage; // For now this is the only option, later we can expand. pub enum ChainMessageApplyRet { Signed(SignedMessageApplyRes), + /// The IPC chain message execution result + Ipc(FvmApplyRet), } /// We only allow signed messages into the mempool. @@ -54,22 +68,27 @@ pub type ChainMessageCheckRes = Result; /// Interpreter working on chain messages; in the future it will schedule /// CID lookups to turn references into self-contained user or cross messages. #[derive(Clone)] -pub struct ChainMessageInterpreter { +pub struct ChainMessageInterpreter { inner: I, + gateway_caller: GatewayCaller, } -impl ChainMessageInterpreter { +impl ChainMessageInterpreter { pub fn new(inner: I) -> Self { - Self { inner } + Self { + inner, + gateway_caller: GatewayCaller::default(), + } } } #[async_trait] -impl ProposalInterpreter for ChainMessageInterpreter +impl ProposalInterpreter for ChainMessageInterpreter where + DB: Blockstore + Clone + 'static + Send + Sync, I: Sync + Send, { - type State = CheckpointPool; + type State = (CheckpointPool, TopDownFinalityProvider); type Message = ChainMessage; /// Check whether there are any "ready" messages in the IPLD resolution mempool which can be appended to the proposal. @@ -78,7 +97,7 @@ where /// account the transactions which are part of top-down or bottom-up checkpoints, to stay within gas limits. async fn prepare( &self, - pool: Self::State, + (pool, finality_provider): Self::State, mut msgs: Vec, ) -> anyhow::Result> { // Collect resolved CIDs ready to be proposed from the pool. @@ -89,47 +108,80 @@ where CheckpointPoolItem::BottomUp(ckpt) => ChainMessage::Ipc(IpcMessage::BottomUpExec(ckpt)), }); + // Prepare top down proposals + if let Some(proposal) = atomically(|| finality_provider.next_proposal()).await { + msgs.push(ChainMessage::Ipc(IpcMessage::TopDownExec(ParentFinality { + height: proposal.height as ChainEpoch, + block_hash: proposal.block_hash, + }))) + } + // Append at the end - if we run out of block space, these are going to be reproposed in the next block. msgs.extend(ckpts); Ok(msgs) } /// Perform finality checks on top-down transactions and availability checks on bottom-up transactions. - async fn process(&self, pool: Self::State, msgs: Vec) -> anyhow::Result { + async fn process( + &self, + (pool, finality_provider): Self::State, + msgs: Vec, + ) -> anyhow::Result { for msg in msgs { - if let ChainMessage::Ipc(IpcMessage::BottomUpExec(msg)) = msg { - let item = CheckpointPoolItem::BottomUp(msg); - - // We can just look in memory because when we start the application, we should retrieve any - // pending checkpoints (relayed but not executed) from the ledger, so they should be there. - // We don't have to validate the checkpoint here, because - // 1) we validated it when it was relayed, and - // 2) if a validator proposes something invalid, we can make them pay during execution. - let is_resolved = atomically(|| match pool.get_status(&item)? { - None => Ok(false), - Some(status) => status.is_resolved(), - }) - .await; - - if !is_resolved { - return Ok(false); + match msg { + ChainMessage::Ipc(IpcMessage::BottomUpExec(msg)) => { + let item = CheckpointPoolItem::BottomUp(msg); + + // We can just look in memory because when we start the application, we should retrieve any + // pending checkpoints (relayed but not executed) from the ledger, so they should be there. + // We don't have to validate the checkpoint here, because + // 1) we validated it when it was relayed, and + // 2) if a validator proposes something invalid, we can make them pay during execution. + let is_resolved = atomically(|| match pool.get_status(&item)? { + None => Ok(false), + Some(status) => status.is_resolved(), + }) + .await; + + if !is_resolved { + return Ok(false); + } } - } + ChainMessage::Ipc(IpcMessage::TopDownExec(ParentFinality { + height, + block_hash, + })) => { + let prop = IPCParentFinality { + height: height as u64, + block_hash, + }; + let is_final = atomically(|| finality_provider.check_proposal(&prop)).await; + if !is_final { + return Ok(false); + } + } + _ => {} + }; } Ok(true) } } #[async_trait] -impl ExecInterpreter for ChainMessageInterpreter +impl ExecInterpreter for ChainMessageInterpreter where - I: ExecInterpreter, + DB: Blockstore + Clone + 'static + Send + Sync, + I: ExecInterpreter< + Message = VerifiableMessage, + DeliverOutput = SignedMessageApplyRes, + State = FvmExecState, + >, { // The state consists of the resolver pool, which this interpreter needs, and the rest of the // state which the inner interpreter uses. This is a technical solution because the pool doesn't // fit with the state we use for execution messages further down the stack, which depend on block // height and are used in queries as well. - type State = (CheckpointPool, I::State); + type State = (CheckpointPool, TopDownFinalityProvider, I::State); type Message = ChainMessage; type BeginOutput = I::BeginOutput; type DeliverOutput = ChainMessageApplyRet; @@ -137,7 +189,7 @@ where async fn deliver( &self, - (pool, state): Self::State, + (pool, provider, mut state): Self::State, msg: Self::Message, ) -> anyhow::Result<(Self::State, Self::DeliverOutput)> { match msg { @@ -146,7 +198,7 @@ where .inner .deliver(state, VerifiableMessage::Signed(msg)) .await?; - Ok(((pool, state), ChainMessageApplyRet::Signed(ret))) + Ok(((pool, provider, state), ChainMessageApplyRet::Signed(ret))) } ChainMessage::Ipc(msg) => match msg { IpcMessage::BottomUpResolve(msg) => { @@ -177,13 +229,49 @@ where } // We can use the same result type for now, it's isomorphic. - Ok(((pool, state), ChainMessageApplyRet::Signed(ret))) + Ok(((pool, provider, state), ChainMessageApplyRet::Signed(ret))) } IpcMessage::BottomUpExec(_) => { todo!("#197: implement BottomUp checkpoint execution") } - IpcMessage::TopDown => { - todo!("implement TopDown handling; this is just a placeholder") + IpcMessage::TopDownExec(p) => { + if !provider.is_enabled() { + return Err(anyhow!( + "cannot execute IPC top-down message: parent provider disabled" + )); + } + + // commit parent finality first + let finality = IPCParentFinality::new(p.height, p.block_hash); + let (prev_height, prev_finality) = topdown::commit_finality( + &self.gateway_caller, + &mut state, + finality.clone(), + &provider, + ) + .await?; + + // error happens if we cannot get the validator set from ipc agent after retries + let validator_changes = provider + .validator_changes_from(prev_height + 1, finality.height) + .await?; + self.gateway_caller + .store_validator_changes(&mut state, validator_changes)?; + + // error happens if we cannot get the cross messages from ipc agent after retries + let msgs = provider + .top_down_msgs_from(prev_height + 1, p.height as u64, &finality.block_hash) + .await?; + let ret = topdown::execute_topdown_msgs(&self.gateway_caller, &mut state, msgs) + .await?; + + atomically(|| { + provider.set_new_finality(finality.clone(), prev_finality.clone()) + }) + .await; + tracing::debug!("new finality updated: {:?}", finality); + + Ok(((pool, provider, state), ChainMessageApplyRet::Ipc(ret))) } }, } @@ -191,24 +279,25 @@ where async fn begin( &self, - (pool, state): Self::State, + (pool, provider, state): Self::State, ) -> anyhow::Result<(Self::State, Self::BeginOutput)> { let (state, out) = self.inner.begin(state).await?; - Ok(((pool, state), out)) + Ok(((pool, provider, state), out)) } async fn end( &self, - (pool, state): Self::State, + (pool, provider, state): Self::State, ) -> anyhow::Result<(Self::State, Self::EndOutput)> { let (state, out) = self.inner.end(state).await?; - Ok(((pool, state), out)) + Ok(((pool, provider, state), out)) } } #[async_trait] -impl CheckInterpreter for ChainMessageInterpreter +impl CheckInterpreter for ChainMessageInterpreter where + DB: Blockstore + Clone + 'static + Send + Sync, I: CheckInterpreter, { type State = I::State; @@ -243,7 +332,7 @@ where Ok((state, Ok(ret))) } - IpcMessage::TopDown | IpcMessage::BottomUpExec(_) => { + IpcMessage::TopDownExec(_) | IpcMessage::BottomUpExec(_) => { // Users cannot send these messages, only validators can propose them in blocks. Ok((state, Err(IllegalMessage))) } @@ -254,8 +343,9 @@ where } #[async_trait] -impl QueryInterpreter for ChainMessageInterpreter +impl QueryInterpreter for ChainMessageInterpreter where + DB: Blockstore + Clone + 'static + Send + Sync, I: QueryInterpreter, { type State = I::State; @@ -272,8 +362,9 @@ where } #[async_trait] -impl GenesisInterpreter for ChainMessageInterpreter +impl GenesisInterpreter for ChainMessageInterpreter where + DB: Blockstore + Clone + 'static + Send + Sync, I: GenesisInterpreter, { type State = I::State; diff --git a/fendermint/vm/interpreter/src/fvm/exec.rs b/fendermint/vm/interpreter/src/fvm/exec.rs index 43167c59..0360ea69 100644 --- a/fendermint/vm/interpreter/src/fvm/exec.rs +++ b/fendermint/vm/interpreter/src/fvm/exec.rs @@ -105,7 +105,11 @@ where let method_num = msg.method_num; let gas_limit = msg.gas_limit; - let (apply_ret, emitters) = state.execute_explicit(msg)?; + let (apply_ret, emitters) = if from == system::SYSTEM_ACTOR_ADDR { + state.execute_implicit(msg)? + } else { + state.execute_explicit(msg)? + }; tracing::info!( height = state.block_height(), diff --git a/fendermint/vm/interpreter/src/fvm/mod.rs b/fendermint/vm/interpreter/src/fvm/mod.rs index 77dea5d0..21f866ab 100644 --- a/fendermint/vm/interpreter/src/fvm/mod.rs +++ b/fendermint/vm/interpreter/src/fvm/mod.rs @@ -14,6 +14,7 @@ pub mod store; #[cfg(any(test, feature = "bundle"))] pub mod bundle; +pub(crate) mod topdown; use anyhow::Context; pub use check::FvmCheckRet; diff --git a/fendermint/vm/interpreter/src/fvm/state/fevm.rs b/fendermint/vm/interpreter/src/fvm/state/fevm.rs index 1ac8ebbd..23a3cd50 100644 --- a/fendermint/vm/interpreter/src/fvm/state/fevm.rs +++ b/fendermint/vm/interpreter/src/fvm/state/fevm.rs @@ -5,6 +5,7 @@ use std::any::type_name; use std::fmt::Debug; use std::{marker::PhantomData, sync::Arc}; +use crate::fvm::FvmApplyRet; use anyhow::{anyhow, bail, Context}; use ethers::abi::{AbiDecode, AbiEncode, Detokenize}; use ethers::core::types as et; @@ -56,6 +57,31 @@ where } } +pub struct ContractCallerReturn { + ret: FvmApplyRet, + call: MockContractCall, +} + +impl ContractCallerReturn { + pub fn into_decoded(self) -> anyhow::Result { + let data = self + .ret + .apply_ret + .msg_receipt + .return_data + .deserialize::() + .context("failed to deserialize return data")?; + + let value = decode_function_data(&self.call.function, data.0, false) + .context("failed to decode bytes")?; + Ok(value) + } + + pub fn into_return(self) -> FvmApplyRet { + self.ret + } +} + pub type ContractResult = Result>; /// Type we can use if a contract does not return revert errors, e.g. because it's all read-only views. @@ -150,7 +176,23 @@ where F: FnOnce(&C) -> MockContractCall, T: Detokenize, { - match self.try_call(state, f)? { + self.call_with_return(state, f)?.into_decoded() + } + + /// Call an EVM method implicitly to read its raw return value. + /// + /// Returns an error if the return code shows is not successful; + /// intended to be used with methods that are expected succeed. + pub fn call_with_return( + &self, + state: &mut FvmExecState, + f: F, + ) -> anyhow::Result> + where + F: FnOnce(&C) -> MockContractCall, + T: Detokenize, + { + match self.try_call_with_ret(state, f)? { Ok(value) => Ok(value), Err(CallError { exit_code, @@ -177,6 +219,25 @@ where state: &mut FvmExecState, f: F, ) -> anyhow::Result> + where + F: FnOnce(&C) -> MockContractCall, + T: Detokenize, + { + Ok(match self.try_call_with_ret(state, f)? { + Ok(r) => Ok(r.into_decoded()?), + Err(e) => Err(e), + }) + } + + /// Call an EVM method implicitly to read its return value and its original apply return. + /// + /// Returns either the result or the exit code if it's not successful; + /// intended to be used with methods that are expected to fail under certain conditions. + pub fn try_call_with_ret( + &self, + state: &mut FvmExecState, + f: F, + ) -> anyhow::Result, E>> where F: FnOnce(&C) -> MockContractCall, T: Detokenize, @@ -212,7 +273,7 @@ where }; //eprintln!("\nCALLING FVM: {msg:?}"); - let (ret, _) = state.execute_implicit(msg).context("failed to call FEVM")?; + let (ret, emitters) = state.execute_implicit(msg).context("failed to call FEVM")?; //eprintln!("\nRESULT FROM FVM: {ret:?}"); if !ret.msg_receipt.exit_code.is_success() { @@ -239,16 +300,15 @@ where error, })) } else { - let data = ret - .msg_receipt - .return_data - .deserialize::() - .context("failed to deserialize return data")?; - - let value = decode_function_data(&call.function, data.0, false) - .context("failed to decode bytes")?; - - Ok(Ok(value)) + let ret = FvmApplyRet { + apply_ret: ret, + from, + to: self.addr, + method_num: evm::Method::InvokeContract as u64, + gas_limit: fvm_shared::BLOCK_GAS_LIMIT, + emitters, + }; + Ok(Ok(ContractCallerReturn { call, ret })) } } } diff --git a/fendermint/vm/interpreter/src/fvm/state/ipc.rs b/fendermint/vm/interpreter/src/fvm/state/ipc.rs index b55849a6..87cbec3f 100644 --- a/fendermint/vm/interpreter/src/fvm/state/ipc.rs +++ b/fendermint/vm/interpreter/src/fvm/state/ipc.rs @@ -16,15 +16,21 @@ use fendermint_vm_actor_interface::{ }; use fendermint_vm_genesis::{Power, Validator}; use fendermint_vm_message::signed::sign_secp256k1; +use fendermint_vm_topdown::IPCParentFinality; use ipc_actors_abis::gateway_getter_facet as getter; use ipc_actors_abis::gateway_getter_facet::GatewayGetterFacet; use ipc_actors_abis::gateway_router_facet as router; use ipc_actors_abis::gateway_router_facet::GatewayRouterFacet; +use ipc_sdk::cross::CrossMsg; +use ipc_sdk::staking::StakingChangeRequest; use super::{ fevm::{ContractCaller, MockProvider, NoRevert}, FvmExecState, }; +use crate::fvm::FvmApplyRet; +use fendermint_vm_actor_interface::ipc; +use fvm_shared::econ::TokenAmount; #[derive(Clone)] pub struct GatewayCaller { @@ -188,6 +194,78 @@ impl GatewayCaller { Ok(calldata) } + + /// Commit the parent finality to the gateway and returns the previously committed finality. + /// None implies there is no previously committed finality. + pub fn commit_parent_finality( + &self, + state: &mut FvmExecState, + finality: IPCParentFinality, + ) -> anyhow::Result> { + let evm_finality = router::ParentFinality::try_from(finality)?; + let (has_committed, prev_finality) = self + .router + .call(state, |c| c.commit_parent_finality(evm_finality))?; + Ok(if !has_committed { + None + } else { + Some(IPCParentFinality::try_from(prev_finality)?) + }) + } + + pub fn store_validator_changes( + &self, + state: &mut FvmExecState, + changes: Vec, + ) -> anyhow::Result<()> { + let mut change_requests = vec![]; + for c in changes { + change_requests.push(router::StakingChangeRequest::try_from(c)?); + } + + self.router + .call(state, |c| c.store_validator_changes(change_requests)) + } + + /// Call this function to mint some FIL to the gateway contract + pub fn mint_to_gateway( + &self, + state: &mut FvmExecState, + value: TokenAmount, + ) -> anyhow::Result<()> { + let state_tree = state.state_tree_mut(); + state_tree.mutate_actor(ipc::GATEWAY_ACTOR_ID, |actor_state| { + actor_state.balance += value; + Ok(()) + })?; + Ok(()) + } + + pub fn apply_cross_messages( + &self, + state: &mut FvmExecState, + cross_messages: Vec, + ) -> anyhow::Result { + let messages = cross_messages + .into_iter() + .map(router::CrossMsg::try_from) + .collect::, _>>() + .context("failed to convert cross messages")?; + let r = self + .router + .call_with_return(state, |c| c.apply_cross_messages(messages))?; + Ok(r.into_return()) + } + + pub fn get_latest_parent_finality( + &self, + state: &mut FvmExecState, + ) -> anyhow::Result { + let r = self + .getter + .call(state, |c| c.get_latest_parent_finality())?; + Ok(IPCParentFinality::try_from(r)?) + } } /// Hash some value in the same way we'd hash it in Solidity. diff --git a/fendermint/vm/interpreter/src/fvm/topdown.rs b/fendermint/vm/interpreter/src/fvm/topdown.rs new file mode 100644 index 00000000..81601d84 --- /dev/null +++ b/fendermint/vm/interpreter/src/fvm/topdown.rs @@ -0,0 +1,51 @@ +// Copyright 2022-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +//! Topdown finality related util functions + +use crate::chain::TopDownFinalityProvider; +use crate::fvm::state::ipc::GatewayCaller; +use crate::fvm::state::FvmExecState; +use crate::fvm::FvmApplyRet; +use fendermint_vm_topdown::{BlockHeight, IPCParentFinality, ParentViewProvider}; +use fvm_ipld_blockstore::Blockstore; +use fvm_shared::econ::TokenAmount; +use ipc_sdk::cross::CrossMsg; + +/// Commit the parent finality. Returns the height that the previous parent finality is committed and +/// the committed finality itself. If there is no parent finality committed, genesis epoch is returned. +pub async fn commit_finality( + gateway_caller: &GatewayCaller, + state: &mut FvmExecState, + finality: IPCParentFinality, + provider: &TopDownFinalityProvider, +) -> anyhow::Result<(BlockHeight, Option)> +where + DB: Blockstore + Sync + Send + 'static, +{ + let (prev_height, prev_finality) = + if let Some(prev_finality) = gateway_caller.commit_parent_finality(state, finality)? { + (prev_finality.height, Some(prev_finality)) + } else { + (provider.genesis_epoch()?, None) + }; + tracing::debug!( + "commit finality parsed: prev_height {prev_height}, prev_finality: {prev_finality:?}" + ); + Ok((prev_height, prev_finality)) +} + +/// Execute the top down messages implicitly. Before the execution, mint to the gateway of the funds +/// transferred in the messages. +pub async fn execute_topdown_msgs( + gateway_caller: &GatewayCaller, + state: &mut FvmExecState, + messages: Vec, +) -> anyhow::Result +where + DB: Blockstore + Sync + Send + 'static, +{ + let total_value: TokenAmount = messages.iter().map(|a| a.msg.value.clone()).sum(); + gateway_caller.mint_to_gateway(state, total_value)?; + + gateway_caller.apply_cross_messages(state, messages) +} diff --git a/fendermint/vm/interpreter/src/signed.rs b/fendermint/vm/interpreter/src/signed.rs index bd8fb373..ef79d0e1 100644 --- a/fendermint/vm/interpreter/src/signed.rs +++ b/fendermint/vm/interpreter/src/signed.rs @@ -38,6 +38,8 @@ pub enum VerifiableMessage { Signed(SignedMessage), /// Something we constructed to pass on to the FVM. Synthetic(SyntheticMessage), + /// Does not require verification + NotVerify(FvmMessage), } impl VerifiableMessage { @@ -45,6 +47,7 @@ impl VerifiableMessage { match self { Self::Signed(m) => m.verify(chain_id), Self::Synthetic(m) => m.verify(chain_id), + Self::NotVerify(_) => Ok(()), } } @@ -52,6 +55,7 @@ impl VerifiableMessage { match self { Self::Signed(m) => m.into_message(), Self::Synthetic(m) => m.message, + Self::NotVerify(m) => m, } } @@ -62,6 +66,7 @@ impl VerifiableMessage { match self { Self::Signed(m) => m.domain_hash(chain_id), Self::Synthetic(_) => Ok(None), + Self::NotVerify(_) => Ok(None), } } } diff --git a/fendermint/vm/message/golden/chain/ipc_top_down.cbor b/fendermint/vm/message/golden/chain/ipc_top_down.cbor index 48090ec9..a50440b2 100644 --- a/fendermint/vm/message/golden/chain/ipc_top_down.cbor +++ b/fendermint/vm/message/golden/chain/ipc_top_down.cbor @@ -1 +1 @@ -a16349706367546f70446f776e \ No newline at end of file +a163497063a16b546f70446f776e45786563a266686569676874006a626c6f636b5f6861736880 \ No newline at end of file diff --git a/fendermint/vm/message/golden/chain/ipc_top_down.txt b/fendermint/vm/message/golden/chain/ipc_top_down.txt index 1e8b2535..64e55057 100644 --- a/fendermint/vm/message/golden/chain/ipc_top_down.txt +++ b/fendermint/vm/message/golden/chain/ipc_top_down.txt @@ -1 +1 @@ -Ipc(TopDown) \ No newline at end of file +Ipc(TopDownExec(ParentFinality { height: 0, block_hash: [] })) \ No newline at end of file diff --git a/fendermint/vm/message/src/ipc.rs b/fendermint/vm/message/src/ipc.rs index 638a2e0f..9a14d007 100644 --- a/fendermint/vm/message/src/ipc.rs +++ b/fendermint/vm/message/src/ipc.rs @@ -25,8 +25,9 @@ pub enum IpcMessage { /// or we can gossip votes using the _IPLD Resolver_ and attach them as a quorum certificate. BottomUpExec(CertifiedMessage), - // TODO - TopDown, + /// A top-down checkpoint parent finality proposal. This proposal should contain the latest parent + /// state that to be checked and voted by validators. + TopDownExec(ParentFinality), } /// A message relayed by a user on the current subnet. @@ -93,9 +94,19 @@ pub struct BottomUpCheckpoint { pub bottom_up_messages: Cid, // TODO: Use TCid } +/// A proposal of the parent view that validators will be voting on. +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)] +pub struct ParentFinality { + /// Block height of this proposal. + pub height: ChainEpoch, + /// The block hash of the parent, expressed as bytes + pub block_hash: Vec, +} + #[cfg(feature = "arb")] mod arb { + use crate::ipc::ParentFinality; use fendermint_testing::arb::{ArbAddress, ArbCid, ArbSubnetID, ArbTokenAmount}; use fvm_shared::crypto::signature::Signature; use quickcheck::{Arbitrary, Gen}; @@ -110,7 +121,7 @@ mod arb { match u8::arbitrary(g) % 3 { 0 => IpcMessage::BottomUpResolve(Arbitrary::arbitrary(g)), 1 => IpcMessage::BottomUpExec(Arbitrary::arbitrary(g)), - _ => IpcMessage::TopDown, + _ => IpcMessage::TopDownExec(Arbitrary::arbitrary(g)), } } } @@ -175,4 +186,13 @@ mod arb { } } } + + impl Arbitrary for ParentFinality { + fn arbitrary(g: &mut Gen) -> Self { + Self { + height: u32::arbitrary(g).into(), + block_hash: Vec::arbitrary(g), + } + } + } } diff --git a/fendermint/vm/message/tests/golden.rs b/fendermint/vm/message/tests/golden.rs index 583bebf0..e40dc245 100644 --- a/fendermint/vm/message/tests/golden.rs +++ b/fendermint/vm/message/tests/golden.rs @@ -37,7 +37,7 @@ mod chain { golden_cbor! { "chain", ipc_top_down, |g| { loop { - if let msg @ ChainMessage::Ipc(IpcMessage::TopDown) = ChainMessage::arbitrary(g) { + if let msg @ ChainMessage::Ipc(IpcMessage::TopDownExec(_)) = ChainMessage::arbitrary(g) { return msg } } diff --git a/fendermint/vm/topdown/src/convert.rs b/fendermint/vm/topdown/src/convert.rs index ed9d6af4..fd279ff0 100644 --- a/fendermint/vm/topdown/src/convert.rs +++ b/fendermint/vm/topdown/src/convert.rs @@ -4,12 +4,9 @@ use crate::IPCParentFinality; use anyhow::anyhow; -use ethers::abi::Function; use ethers::types::U256; use ipc_actors_abis::{gateway_getter_facet, gateway_router_facet}; -const GET_LATEST_PARENT_FINALITY_FUNC_NAME: &str = "getLatestParentFinality"; - impl TryFrom for gateway_router_facet::ParentFinality { type Error = anyhow::Error; @@ -37,26 +34,11 @@ impl From for IPCParentFinality { } } -pub fn encode_get_latest_parent_finality() -> anyhow::Result> { - let function = get_evm_function(GET_LATEST_PARENT_FINALITY_FUNC_NAME)?; - let data = ethers::contract::encode_function_data(function, ())?; - - Ok(data.to_vec()) -} - -pub fn decode_parent_finality_return(bytes: &[u8]) -> anyhow::Result { - let function = get_evm_function(GET_LATEST_PARENT_FINALITY_FUNC_NAME)?; - let finality = ethers::contract::decode_function_data::( - function, bytes, false, - )?; - Ok(IPCParentFinality::from(finality)) -} - -fn get_evm_function(method_name: &str) -> anyhow::Result<&Function> { - gateway_getter_facet::GATEWAYGETTERFACET_ABI - .functions - .get(method_name) - .ok_or_else(|| anyhow!("report bug, abi function map does not have {}", method_name))? - .get(0) - .ok_or_else(|| anyhow!("report bug, abi vec does not have {}", method_name)) +impl From for IPCParentFinality { + fn from(value: gateway_router_facet::ParentFinality) -> Self { + IPCParentFinality { + height: value.height.as_u64(), + block_hash: value.block_hash.to_vec(), + } + } } diff --git a/fendermint/vm/topdown/src/error.rs b/fendermint/vm/topdown/src/error.rs index b26f6ada..6c75d631 100644 --- a/fendermint/vm/topdown/src/error.rs +++ b/fendermint/vm/topdown/src/error.rs @@ -7,8 +7,8 @@ use thiserror::Error; /// The errors for top down checkpointing #[derive(Error, Debug, Eq, PartialEq, Clone)] pub enum Error { - #[error("Incoming top down messages are not order by nonce sequentially")] - NonceNotSequential, + #[error("Incoming items are not order sequentially")] + NotSequential, #[error("The parent view update with block height is not sequential")] NonSequentialParentViewInsert(SequentialAppendError), #[error("Parent chain reorg detected")] diff --git a/fendermint/vm/topdown/src/finality.rs b/fendermint/vm/topdown/src/finality.rs index 8199b172..19d9d7ab 100644 --- a/fendermint/vm/topdown/src/finality.rs +++ b/fendermint/vm/topdown/src/finality.rs @@ -11,7 +11,6 @@ use async_stm::{abort, atomically, Stm, StmResult, TVar}; use ipc_sdk::cross::CrossMsg; use ipc_sdk::staking::StakingChangeRequest; use std::sync::Arc; -use std::time::Duration; type ParentViewPayload = (BlockHash, Vec, Vec); @@ -19,6 +18,7 @@ type ParentViewPayload = (BlockHash, Vec, Vec); #[derive(Clone)] pub struct CachedFinalityProvider { config: Config, + genesis_epoch: BlockHeight, /// Cached data that always syncs with the latest parent chain proactively cached_data: CachedData, /// This is a in memory view of the committed parent finality. We need this as a starting point @@ -44,13 +44,12 @@ macro_rules! retry { let res = $f; if let Err(e) = &res { tracing::warn!( - "cannot query ipc parent_client due to: {e}, retires: {retries}, wait: {wait}" + "cannot query ipc parent_client due to: {e}, retires: {retries}, wait: {wait:?}" ); if retries > 0 { retries -= 1; - let to_sleep = Duration::from_secs(wait); - tokio::time::sleep(to_sleep).await; + tokio::time::sleep(wait).await; wait *= 2; continue; @@ -64,6 +63,28 @@ macro_rules! retry { #[async_trait::async_trait] impl ParentViewProvider for CachedFinalityProvider { + fn genesis_epoch(&self) -> anyhow::Result { + Ok(self.genesis_epoch) + } + + async fn validator_changes_from( + &self, + from: BlockHeight, + to: BlockHeight, + ) -> anyhow::Result> { + let mut v = vec![]; + for h in from..=to { + let mut r = self.validator_changes(h).await?; + tracing::debug!( + "obtained validator change set (len {}) at height {h}", + r.len() + ); + v.append(&mut r); + } + + Ok(v) + } + /// Should always return the validator set, only when ipc parent_client is down after exponeitial /// retries async fn validator_changes( @@ -76,7 +97,7 @@ impl ParentViewProvider for CachedF } retry!( - self.config.exponential_back_off_secs, + self.config.exponential_back_off, self.config.exponential_retry_limit, self.parent_client .get_validator_changes(height) @@ -98,13 +119,27 @@ impl ParentViewProvider for CachedF } retry!( - self.config.exponential_back_off_secs, + self.config.exponential_back_off, self.config.exponential_retry_limit, self.parent_client .get_top_down_msgs_with_hash(height, block_hash) .await ) } + + async fn top_down_msgs_from( + &self, + from: BlockHeight, + to: BlockHeight, + block_hash: &BlockHash, + ) -> anyhow::Result> { + let mut v = vec![]; + for h in from..=to { + let mut r = self.top_down_msgs(h, block_hash).await?; + v.append(&mut r); + } + Ok(v) + } } impl ParentFinalityProvider @@ -114,13 +149,16 @@ impl ParentFinalityProvider let height = if let Some(h) = self.cached_data.latest_height()? { h } else { + tracing::debug!("no proposal yet as height not available"); return Ok(None); }; // safe to unwrap as latest height exists let block_hash = self.cached_data.block_hash(height)?.unwrap(); - Ok(Some(IPCParentFinality { height, block_hash })) + let proposal = IPCParentFinality { height, block_hash }; + tracing::debug!("new proposal: {proposal:?}"); + Ok(Some(proposal)) } fn check_proposal(&self, proposal: &IPCParentFinality) -> Stm { @@ -130,7 +168,13 @@ impl ParentFinalityProvider self.check_block_hash(proposal) } - fn set_new_finality(&self, finality: IPCParentFinality) -> Stm<()> { + fn set_new_finality( + &self, + finality: IPCParentFinality, + previous_finality: Option, + ) -> Stm<()> { + debug_assert!(previous_finality == self.last_committed_finality.read_clone()?); + // the height to clear let height = finality.height; @@ -143,23 +187,28 @@ impl ParentFinalityProvider } } -impl CachedFinalityProvider { +impl CachedFinalityProvider { /// Creates an uninitialized provider /// We need this because `fendermint` has yet to be initialized and might /// not be able to provide an existing finality from the storage. This provider requires an /// existing committed finality. Providing the finality will enable other functionalities. - pub fn uninitialized(config: Config, parent_client: Arc) -> Self { - Self::new(config, None, parent_client) + pub async fn uninitialized(config: Config, parent_client: Arc) -> anyhow::Result { + let genesis = parent_client.get_genesis_epoch().await?; + Ok(Self::new(config, genesis, None, parent_client)) } +} +impl CachedFinalityProvider { fn new( config: Config, + genesis_epoch: BlockHeight, committed_finality: Option, parent_client: Arc, ) -> Self { let height_data = SequentialKeyCache::sequential(); Self { config, + genesis_epoch, cached_data: CachedData { height_data: TVar::new(height_data), }, @@ -198,9 +247,11 @@ impl CachedFinalityProvider { ) -> StmResult<(), Error> { if !top_down_msgs.is_empty() { // make sure incoming top down messages are ordered by nonce sequentially + tracing::debug!("top down messages: {top_down_msgs:#?}"); ensure_sequential(&top_down_msgs, |msg| msg.msg.nonce)?; }; if !validator_changes.is_empty() { + tracing::debug!("validator changes: {validator_changes:#?}"); ensure_sequential(&validator_changes, |change| change.configuration_number)?; } @@ -284,7 +335,7 @@ fn ensure_sequential u64>(msgs: &[T], f: F) -> StmResult<(), Err let mut nonce = f(first); for msg in msgs.iter().skip(1) { if nonce + 1 != f(msg) { - return abort(Error::NonceNotSequential); + return abort(Error::NotSequential); } nonce += 1; } @@ -350,21 +401,22 @@ mod tests { Arc::new(MockedParentQuery) } + fn genesis_finality() -> IPCParentFinality { + IPCParentFinality { + height: 0, + block_hash: vec![0; 32], + } + } + fn new_provider() -> CachedFinalityProvider { let config = Config { chain_head_delay: 20, - polling_interval_secs: 10, - ipc_parent_endpoint: "".to_string(), - exponential_back_off_secs: 10, + polling_interval: Duration::from_secs(10), + exponential_back_off: Duration::from_secs(10), exponential_retry_limit: 10, }; - let genesis_finality = IPCParentFinality { - height: 0, - block_hash: vec![0; 32], - }; - - CachedFinalityProvider::new(config, Some(genesis_finality), mocked_agent_proxy()) + CachedFinalityProvider::new(config, 10, Some(genesis_finality()), mocked_agent_proxy()) } fn new_cross_msg(nonce: u64) -> CrossMsg { @@ -435,7 +487,7 @@ mod tests { height: target_block, block_hash: vec![1u8; 32], }; - provider.set_new_finality(finality.clone())?; + provider.set_new_finality(finality.clone(), Some(genesis_finality()))?; // all cache should be cleared let r = provider.next_proposal()?; @@ -459,10 +511,13 @@ mod tests { // inject data provider.new_parent_view(target_block, vec![1u8; 32], vec![], vec![])?; - provider.set_new_finality(IPCParentFinality { - height: target_block - 1, - block_hash: vec![1u8; 32], - })?; + provider.set_new_finality( + IPCParentFinality { + height: target_block - 1, + block_hash: vec![1u8; 32], + }, + Some(genesis_finality()), + )?; let finality = IPCParentFinality { height: target_block, @@ -481,9 +536,8 @@ mod tests { async fn test_top_down_msgs_works() { let config = Config { chain_head_delay: 2, - polling_interval_secs: 10, - ipc_parent_endpoint: "".to_string(), - exponential_back_off_secs: 10, + polling_interval: Duration::from_secs(10), + exponential_back_off: Duration::from_secs(10), exponential_retry_limit: 10, }; @@ -493,7 +547,7 @@ mod tests { }; let provider = - CachedFinalityProvider::new(config, Some(genesis_finality), mocked_agent_proxy()); + CachedFinalityProvider::new(config, 10, Some(genesis_finality), mocked_agent_proxy()); let cross_msgs_batch1 = vec![new_cross_msg(0), new_cross_msg(1), new_cross_msg(2)]; let cross_msgs_batch2 = vec![new_cross_msg(3), new_cross_msg(4), new_cross_msg(5)]; @@ -541,7 +595,7 @@ mod tests { nums_run: AtomicUsize::new(0), }; - let res = retry!(1, 2, t.run().await); + let res = retry!(Duration::from_secs(1), 2, t.run().await); assert!(res.is_err()); // execute the first time, retries twice assert_eq!(t.nums_run.load(Ordering::SeqCst), 3); diff --git a/fendermint/vm/topdown/src/lib.rs b/fendermint/vm/topdown/src/lib.rs index 60cab3d1..83edefdd 100644 --- a/fendermint/vm/topdown/src/lib.rs +++ b/fendermint/vm/topdown/src/lib.rs @@ -12,9 +12,11 @@ mod toggle; use async_stm::Stm; use async_trait::async_trait; +use fvm_shared::clock::ChainEpoch; use ipc_sdk::cross::CrossMsg; use ipc_sdk::staking::StakingChangeRequest; use serde::{Deserialize, Serialize}; +use std::time::Duration; pub use crate::cache::{SequentialAppendError, SequentialKeyCache, ValueIter}; pub use crate::error::Error; @@ -33,11 +35,9 @@ pub struct Config { /// height as final yet. pub chain_head_delay: BlockHeight, /// Parent syncing cron period, in seconds - pub polling_interval_secs: u64, - /// The endpoint to connect to the parent subnet - pub ipc_parent_endpoint: String, + pub polling_interval: Duration, /// Top down exponential back off retry base - pub exponential_back_off_secs: u64, + pub exponential_back_off: Duration, /// The max number of retries for exponential backoff before giving up pub exponential_retry_limit: usize, } @@ -52,19 +52,43 @@ pub struct IPCParentFinality { pub block_hash: BlockHash, } +impl IPCParentFinality { + pub fn new(height: ChainEpoch, hash: BlockHash) -> Self { + Self { + height: height as BlockHeight, + block_hash: hash, + } + } +} + #[async_trait] pub trait ParentViewProvider { + /// Obtain the genesis epoch of the current subnet in the parent + fn genesis_epoch(&self) -> anyhow::Result; + /// Get the validator changes from and to height. + async fn validator_changes_from( + &self, + from: BlockHeight, + to: BlockHeight, + ) -> anyhow::Result>; /// Get the validator changes at height. async fn validator_changes( &self, height: BlockHeight, ) -> anyhow::Result>; - /// Get the top down messages at height + /// Get the top down messages at height. async fn top_down_msgs( &self, height: BlockHeight, block_hash: &BlockHash, ) -> anyhow::Result>; + /// Get the top down messages from and to height. + async fn top_down_msgs_from( + &self, + from: BlockHeight, + to: BlockHeight, + block_hash: &BlockHash, + ) -> anyhow::Result>; } pub trait ParentFinalityProvider: ParentViewProvider { @@ -73,5 +97,9 @@ pub trait ParentFinalityProvider: ParentViewProvider { /// Check if the target proposal is valid fn check_proposal(&self, proposal: &IPCParentFinality) -> Stm; /// Called when finality is committed - fn set_new_finality(&self, finality: IPCParentFinality) -> Stm<()>; + fn set_new_finality( + &self, + finality: IPCParentFinality, + previous_finality: Option, + ) -> Stm<()>; } diff --git a/fendermint/vm/topdown/src/proxy.rs b/fendermint/vm/topdown/src/proxy.rs index 3eb8cdc4..86bd3f11 100644 --- a/fendermint/vm/topdown/src/proxy.rs +++ b/fendermint/vm/topdown/src/proxy.rs @@ -71,7 +71,7 @@ impl ParentQueryProxy for IPCProviderProxy { /// Get the genesis epoch of the child subnet, i.e. the epoch that the subnet was created in /// the parent subnet. async fn get_genesis_epoch(&self) -> anyhow::Result { - let height = self.ipc_provider.genesis_epoch(&self.parent_subnet).await?; + let height = self.ipc_provider.genesis_epoch(&self.child_subnet).await?; Ok(height as BlockHeight) } @@ -98,8 +98,15 @@ impl ParentQueryProxy for IPCProviderProxy { &self, height: BlockHeight, ) -> anyhow::Result>> { - self.ipc_provider + let mut v = self + .ipc_provider .get_validator_changeset(&self.child_subnet, height as ChainEpoch) - .await + .await?; + + // sort ascending, we dont assume the changes are ordered + v.value + .sort_by(|a, b| a.configuration_number.cmp(&b.configuration_number)); + + Ok(v) } } diff --git a/fendermint/vm/topdown/src/sync.rs b/fendermint/vm/topdown/src/sync.rs index cf5ff193..6543f53c 100644 --- a/fendermint/vm/topdown/src/sync.rs +++ b/fendermint/vm/topdown/src/sync.rs @@ -8,7 +8,7 @@ use crate::{ BlockHash, BlockHeight, CachedFinalityProvider, Config, IPCParentFinality, ParentFinalityProvider, Toggle, }; -use anyhow::{anyhow, Context}; +use anyhow::anyhow; use async_stm::{atomically, atomically_or_err}; use ipc_sdk::cross::CrossMsg; use ipc_sdk::staking::StakingChangeRequest; @@ -63,7 +63,10 @@ async fn query_starting_finality PollingParentSyncer let parent_client = self.parent_client; let query = self.committed_state_query; - let mut interval = tokio::time::interval(Duration::from_secs(config.polling_interval_secs)); + let mut interval = tokio::time::interval(config.polling_interval); tokio::spawn(async move { loop { @@ -158,10 +162,7 @@ async fn sync_with_parent( return Ok(()); }; - let parent_chain_head_height = parent_proxy - .get_chain_head_height() - .await - .context("cannot fetch parent chain head")?; + let parent_chain_head_height = parent_proxy.get_chain_head_height().await?; // sanity check if parent_chain_head_height < config.chain_head_delay { tracing::debug!("latest height not more than the chain head delay"); @@ -188,6 +189,9 @@ async fn sync_with_parent( // than our previously fetched head. It could be a chain reorg. We clear all the cache // in `provider` and start from scratch if last_recorded_height > ending_height { + tracing::warn!( + "last recorded height: {last_recorded_height} more than ending height: {ending_height}" + ); return reset_cache(parent_proxy, provider, query).await; } @@ -279,30 +283,52 @@ async fn get_new_parent_views( let block_hash_res = parent_proxy .get_block_hash(h) .await - .context("cannot fetch block hash") .map_err(|e| Error::CannotQueryParent(e.to_string()))?; if block_hash_res.parent_block_hash != previous_hash { + tracing::warn!( + "parent block hash at {h} is {:02x?} diff than previous hash: {previous_hash:02x?}", + block_hash_res.parent_block_hash + ); return Err(Error::ParentChainReorgDetected); } let changes_res = parent_proxy .get_validator_changes(h) .await - .context("cannot fetch validator set") .map_err(|e| Error::CannotQueryParent(e.to_string()))?; if changes_res.block_hash != block_hash_res.block_hash { + tracing::warn!( + "change set block hash at {h} is {:02x?} diff than hash: {:02x?}", + block_hash_res.parent_block_hash, + block_hash_res.block_hash + ); return Err(Error::ParentChainReorgDetected); } + // for `lotus`, the state at height h is only finalized at h + 1. The block hash + // at height h will return empty top down messages. In this case, we need to get + // the block hash at height h + 1 to query the top down messages. + let next_hash = parent_proxy + .get_block_hash(h + 1) + .await + .map_err(|e| Error::CannotQueryParent(e.to_string()))?; + if next_hash.parent_block_hash != block_hash_res.block_hash { + tracing::warn!( + "next block hash at {} is {:02x?} diff than hash: {:02x?}", + h + 1, + next_hash.parent_block_hash, + block_hash_res.block_hash + ); + return Err(Error::ParentChainReorgDetected); + } let top_down_msgs_res = parent_proxy - .get_top_down_msgs_with_hash(h, &block_hash_res.block_hash) + .get_top_down_msgs_with_hash(h, &next_hash.block_hash) .await - .context("cannot fetch top down messages") .map_err(|e| Error::CannotQueryParent(e.to_string()))?; total_top_down_msgs += top_down_msgs_res.len(); - previous_hash = block_hash_res.parent_block_hash; + previous_hash = block_hash_res.block_hash.clone(); block_height_to_update.push(( h, @@ -314,5 +340,8 @@ async fn get_new_parent_views( break; } } + + tracing::debug!("obtained updates: {block_height_to_update:?}"); + Ok(block_height_to_update) } diff --git a/fendermint/vm/topdown/src/toggle.rs b/fendermint/vm/topdown/src/toggle.rs index e63261f0..1426537e 100644 --- a/fendermint/vm/topdown/src/toggle.rs +++ b/fendermint/vm/topdown/src/toggle.rs @@ -42,6 +42,24 @@ impl

Toggle

{ #[async_trait::async_trait] impl ParentViewProvider for Toggle

{ + fn genesis_epoch(&self) -> anyhow::Result { + match self.inner.as_ref() { + Some(p) => p.genesis_epoch(), + None => Err(anyhow!("provider is toggled off")), + } + } + + async fn validator_changes_from( + &self, + from: BlockHeight, + to: BlockHeight, + ) -> anyhow::Result> { + match self.inner.as_ref() { + Some(p) => p.validator_changes_from(from, to).await, + None => Err(anyhow!("provider is toggled off")), + } + } + async fn validator_changes( &self, height: BlockHeight, @@ -62,6 +80,18 @@ impl ParentViewProvider for Toggl None => Err(anyhow!("provider is toggled off")), } } + + async fn top_down_msgs_from( + &self, + from: BlockHeight, + to: BlockHeight, + block_hash: &BlockHash, + ) -> anyhow::Result> { + match self.inner.as_ref() { + Some(p) => p.top_down_msgs_from(from, to, block_hash).await, + None => Err(anyhow!("provider is toggled off")), + } + } } impl ParentFinalityProvider for Toggle

{ @@ -73,8 +103,12 @@ impl ParentFinalityProvider f self.perform_or_else(|p| p.check_proposal(proposal), false) } - fn set_new_finality(&self, finality: IPCParentFinality) -> Stm<()> { - self.perform_or_else(|p| p.set_new_finality(finality), ()) + fn set_new_finality( + &self, + finality: IPCParentFinality, + previous_finality: Option, + ) -> Stm<()> { + self.perform_or_else(|p| p.set_new_finality(finality, previous_finality), ()) } }