diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index aea2903cc..fa20e4007 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -9,6 +9,15 @@ on: - specs/quint/** - .github/workflows/coverage.yml +env: + CARGO_TERM_COLOR: always + CARGO_INCREMENTAL: 0 + CARGO_PROFILE_DEV_DEBUG: 1 + CARGO_PROFILE_RELEASE_DEBUG: 1 + RUST_BACKTRACE: short + CARGO_NET_RETRY: 10 + RUSTUP_MAX_RETRIES: 10 + jobs: integration: name: Integration @@ -16,8 +25,6 @@ jobs: defaults: run: working-directory: code - env: - CARGO_TERM_COLOR: always steps: - name: Checkout uses: actions/checkout@v4 @@ -36,17 +43,16 @@ jobs: with: toolchain: nightly components: llvm-tools-preview - - name: Install cargo-nextest - uses: taiki-e/install-action@cargo-nextest - name: Install cargo-llvm-cov uses: taiki-e/install-action@cargo-llvm-cov - name: Generate code coverage run: | - cargo llvm-cov nextest \ + cargo llvm-cov test \ --workspace \ --exclude malachite-test-mbt \ --ignore-filename-regex crates/cli \ --all-features \ + --jobs 1 \ --ignore-run-fail \ --lcov \ --output-path lcov.info diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index f0853565f..260867fb4 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -15,6 +15,7 @@ on: env: CARGO_INCREMENTAL: 0 + CARGO_TERM_COLOR: always CARGO_PROFILE_DEV_DEBUG: 1 CARGO_PROFILE_RELEASE_DEBUG: 1 RUST_BACKTRACE: short @@ -23,7 +24,7 @@ env: jobs: test: - name: Test + name: Unit Tests runs-on: ubuntu-latest defaults: run: @@ -47,10 +48,38 @@ jobs: cache-workspaces: "code" - name: Install cargo-nextest uses: taiki-e/install-action@cargo-nextest - - name: Build code - run: cargo nextest run --workspace --all-features --no-run - name: Run tests - run: cargo nextest run --workspace --all-features --no-fail-fast --failure-output final + run: cargo nextest run --workspace --all-features --no-fail-fast --failure-output final --test-threads 1 --exclude malachite-starknet-test --exclude malachite-discovery-test + + integration: + name: Integration Tests + runs-on: ubuntu-latest + defaults: + run: + working-directory: code + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Install Protoc + uses: arduino/setup-protoc@v3 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + - name: Setup Node + uses: actions/setup-node@v3 + with: + node-version: "18" + - name: Install Quint + run: npm install -g @informalsystems/quint + - name: Setup Rust toolchain + uses: actions-rust-lang/setup-rust-toolchain@v1 + with: + cache-workspaces: "code" + - name: Install cargo-nextest + uses: taiki-e/install-action@cargo-nextest + - name: Run Starknet tests + run: cargo nextest run --all-features --no-fail-fast --failure-output final --test-threads 1 -p malachite-starknet-test + - name: Run Discovery tests + run: cargo nextest run --all-features --no-fail-fast --failure-output final --test-threads 1 -p malachite-discovery-test clippy: name: Clippy diff --git a/README.md b/README.md index 9dac4524e..7b22a6b33 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ [![Code coverage][coverage-image]][coverage-link] [![Apache 2.0 Licensed][license-image]][license-link] ![Rust Stable][rustc-image] -![Rust 1.74+][rustc-version] +![Rust 1.82+][rustc-version] [![Quint 0.18][quint-version]][quint-repo] Tendermint consensus in Rust @@ -21,7 +21,7 @@ The repository is split in three areas, each covering one of the important areas ## Requirements -- Rust v1.74+ ([rustup.rs](https://rustup.rs)) +- Rust v1.82+ ([rustup.rs](https://rustup.rs)) - Quint v0.18+ ([github.com](https://github.com/informalsystems/quint)) ## License @@ -46,6 +46,6 @@ Unless required by applicable law or agreed to in writing, software distributed [license-image]: https://img.shields.io/badge/license-Apache_2.0-blue.svg [license-link]: https://github.com/informalsystems/hermes/blob/master/LICENSE [rustc-image]: https://img.shields.io/badge/Rust-stable-orange.svg -[rustc-version]: https://img.shields.io/badge/Rust-1.74+-orange.svg +[rustc-version]: https://img.shields.io/badge/Rust-1.82+-orange.svg [quint-version]: https://img.shields.io/badge/Quint-0.18-purple.svg [quint-repo]: https://github.com/informalsystems/quint diff --git a/code/Cargo.lock b/code/Cargo.lock index f749d05ab..4ffce89d5 100644 --- a/code/Cargo.lock +++ b/code/Cargo.lock @@ -58,18 +58,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "ahash" -version = "0.8.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" -dependencies = [ - "cfg-if", - "once_cell", - "version_check", - "zerocopy", -] - [[package]] name = "aho-corasick" version = "1.1.3" @@ -191,7 +179,7 @@ checksum = "965c2d33e53cb6b267e148a4cb0760bc01f4904c1cd4bb4002a085bb016d1490" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", "synstructure", ] @@ -203,7 +191,7 @@ checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -244,7 +232,7 @@ checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -255,7 +243,7 @@ checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -284,15 +272,15 @@ dependencies = [ [[package]] name = "autocfg" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" +checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "axum" -version = "0.7.6" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f43644eed690f5374f1af436ecd6aea01cd201f6fbdf0178adaf6907afb2cec" +checksum = "504e3947307ac8326a5437504c517c4b56716c9d98fac0028c2acc7ca47d70ae" dependencies = [ "async-trait", "axum-core", @@ -301,7 +289,7 @@ dependencies = [ "http 1.1.0", "http-body 1.0.1", "http-body-util", - "hyper 1.4.1", + "hyper 1.5.0", "hyper-util", "itoa", "matchit", @@ -316,7 +304,7 @@ dependencies = [ "serde_urlencoded", "sync_wrapper 1.0.1", "tokio", - "tower 0.5.1", + "tower", "tower-layer", "tower-service", "tracing", @@ -324,9 +312,9 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e6b8ba012a258d63c9adfa28b9ddcf66149da6f986c5b5452e629d5ee64bf00" +checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199" dependencies = [ "async-trait", "bytes", @@ -483,9 +471,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.21" +version = "1.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07b1695e2c7e8fc85310cde85aeaab7e3097f593c91d209d3f9df76c928100f0" +checksum = "b16803a61b81d9eabb7eae2588776c4c1e584b738ede45fdbb4c972cec1e9945" dependencies = [ "shlex", ] @@ -546,9 +534,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.17" +version = "4.5.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e5a21b8495e732f1b3c364c9949b201ca7bae518c502c80256c96ad79eaf6ac" +checksum = "b97f376d85a664d5837dbae44bf546e6477a679ff6610010f17276f686d867e8" dependencies = [ "clap_builder", "clap_derive", @@ -556,9 +544,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.17" +version = "4.5.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cf2dd12af7a047ad9d6da2b6b249759a22a7abc0f474c1dae1777afa4b21a73" +checksum = "19bc80abd44e4bed93ca373a0704ccbd1b710dc5749406201bb018272808dc54" dependencies = [ "anstream", "anstyle", @@ -568,14 +556,14 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.13" +version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "501d359d5f3dcaf6ecdeee48833ae73ec6e42723a1e52419c79abf9507eec0a0" +checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -764,7 +752,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -801,7 +789,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -812,7 +800,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -898,7 +886,7 @@ checksum = "62d671cc41a825ebabc75757b62d3d168c577f9149b2d49ece1dad1f72119d25" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -957,7 +945,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -1055,7 +1043,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -1135,9 +1123,9 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] name = "flate2" -version = "1.0.33" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "324a1be68054ef05ad64b861cc9eaf1d623d2d8cb25b4bf2cb9cdd902b4bf253" +checksum = "a1b589b4dc103969ad3cf85c950899926ec64300a1a46d76c03a6072957036f0" dependencies = [ "crc32fast", "miniz_oxide 0.8.0", @@ -1149,6 +1137,12 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foldhash" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f81ec6369c545a7d40e4589b5597581fa1c441fe1cce96dd1de43159910a36a2" + [[package]] name = "form_urlencoded" version = "1.2.1" @@ -1235,7 +1229,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -1378,7 +1372,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.5.0", + "indexmap 2.6.0", "slab", "tokio", "tokio-util", @@ -1396,9 +1390,16 @@ name = "hashbrown" version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" + +[[package]] +name = "hashbrown" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb" dependencies = [ - "ahash", "allocator-api2", + "equivalent", + "foldhash", ] [[package]] @@ -1567,9 +1568,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.9.4" +version = "1.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9" +checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946" [[package]] name = "httpdate" @@ -1595,9 +1596,9 @@ dependencies = [ [[package]] name = "hyper" -version = "0.14.30" +version = "0.14.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a152ddd61dfaec7273fe8419ab357f33aee0d914c5f4efbf0d96fa749eea5ec9" +checksum = "8c08302e8fa335b151b788c775ff56e7a03ae64ff85c548ee820fecb70356e85" dependencies = [ "bytes", "futures-channel", @@ -1619,9 +1620,9 @@ dependencies = [ [[package]] name = "hyper" -version = "1.4.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50dfd22e0e76d0f662d429a5f80fcaf3855009297eab6a0a9f8543834744ba05" +checksum = "bbbff0a806a4728c99295b254c8838933b5b082d75e3cb70c8dab21fdfbcfa9a" dependencies = [ "bytes", "futures-channel", @@ -1638,18 +1639,17 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da62f120a8a37763efb0cf8fdf264b884c7b8b9ac8660b900c8661030c00e6ba" +checksum = "41296eb09f183ac68eec06e03cdbea2e759633d4067b2f6552fc2e009bcad08b" dependencies = [ "bytes", "futures-util", "http 1.1.0", "http-body 1.0.1", - "hyper 1.4.1", + "hyper 1.5.0", "pin-project-lite", "tokio", - "tower 0.4.13", "tower-service", ] @@ -1742,7 +1742,7 @@ dependencies = [ "bytes", "futures", "http 0.2.12", - "hyper 0.14.30", + "hyper 0.14.31", "log", "rand", "tokio", @@ -1769,12 +1769,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68b900aa2f7301e21c36462b170ee99994de34dff39a4a6a528e80e7376d07e5" +checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" dependencies = [ "equivalent", - "hashbrown 0.14.5", + "hashbrown 0.15.0", "serde", ] @@ -1810,9 +1810,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.10.0" +version = "2.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "187674a687eed5fe42285b40c6291f9a01517d415fad1c3cbc6a9f778af7fcd4" +checksum = "ddc24109865250148c2e0f3d25d4f0f479571723792d3802153c60922a4fb708" [[package]] name = "is_terminal_polyfill" @@ -1839,7 +1839,7 @@ dependencies = [ "num-traits", "serde", "serde_json", - "serde_with 3.9.0", + "serde_with 3.11.0", ] [[package]] @@ -1850,9 +1850,9 @@ checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "js-sys" -version = "0.3.70" +version = "0.3.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" +checksum = "6a88f1bda2bd75b0452a14784937d796722fdebfe50df998aeb3f0b7603019a9" dependencies = [ "wasm-bindgen", ] @@ -1868,9 +1868,9 @@ dependencies = [ [[package]] name = "lambdaworks-crypto" -version = "0.7.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fb5d4f22241504f7c7b8d2c3a7d7835d7c07117f10bff2a7d96a9ef6ef217c3" +checksum = "bbc2a4da0d9e52ccfe6306801a112e81a8fc0c76aa3e4449fefeda7fef72bb34" dependencies = [ "lambdaworks-math", "serde", @@ -1880,9 +1880,9 @@ dependencies = [ [[package]] name = "lambdaworks-math" -version = "0.7.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "358e172628e713b80a530a59654154bfc45783a6ed70ea284839800cebdf8f97" +checksum = "d1bd2632acbd9957afc5aeec07ad39f078ae38656654043bf16e046fa2730e23" dependencies = [ "serde", "serde_json", @@ -1896,9 +1896,9 @@ checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" -version = "0.2.158" +version = "0.2.159" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439" +checksum = "561d97a539a36e26a9a5fad1ea11a3039a67714694aaa379433e580854bc3dc5" [[package]] name = "libp2p" @@ -2286,7 +2286,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -2396,11 +2396,11 @@ checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" [[package]] name = "lru" -version = "0.12.4" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37ee39891760e7d94734f6f63fedc29a2e4a152f836120753a72503f09fcf904" +checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" dependencies = [ - "hashbrown 0.14.5", + "hashbrown 0.15.0", ] [[package]] @@ -2417,9 +2417,11 @@ name = "malachite-actors" version = "0.1.0" dependencies = [ "async-trait", + "bytes", "derive-where", "eyre", "libp2p", + "malachite-blocksync", "malachite-common", "malachite-config", "malachite-consensus", @@ -2428,10 +2430,28 @@ dependencies = [ "malachite-metrics", "malachite-proto", "ractor", + "rand", "tokio", "tracing", ] +[[package]] +name = "malachite-blocksync" +version = "0.1.0" +dependencies = [ + "bytes", + "dashmap", + "derive-where", + "displaydoc", + "genawaiter", + "libp2p", + "malachite-common", + "malachite-metrics", + "rand", + "serde", + "tracing", +] + [[package]] name = "malachite-cli" version = "0.1.0" @@ -2485,6 +2505,7 @@ name = "malachite-consensus" version = "0.1.0" dependencies = [ "async-recursion", + "bytes", "derive-where", "genawaiter", "libp2p-identity", @@ -2539,9 +2560,11 @@ version = "0.1.0" dependencies = [ "bytes", "either", + "eyre", "futures", "libp2p", "libp2p-broadcast", + "malachite-blocksync", "malachite-discovery", "malachite-metrics", "seahash", @@ -2610,6 +2633,7 @@ dependencies = [ "bytesize", "libp2p-identity", "malachite-actors", + "malachite-blocksync", "malachite-common", "malachite-config", "malachite-consensus", @@ -2624,6 +2648,7 @@ dependencies = [ "malachite-test", "prost", "rand", + "serde", "serde_json", "tokio", "tracing", @@ -2639,7 +2664,9 @@ dependencies = [ "bytesize", "derive-where", "eyre", + "itertools", "malachite-actors", + "malachite-blocksync", "malachite-common", "malachite-config", "malachite-gossip-mempool", @@ -2686,6 +2713,7 @@ dependencies = [ "malachite-config", "malachite-starknet-app", "malachite-starknet-host", + "ractor", "rand", "tokio", "tracing", @@ -2738,7 +2766,7 @@ dependencies = [ "rand", "serde", "serde_json", - "serde_with 3.9.0", + "serde_with 3.11.0", "tempfile", ] @@ -3048,9 +3076,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.19.0" +version = "1.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" +checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" [[package]] name = "opaque-debug" @@ -3125,9 +3153,9 @@ checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" [[package]] name = "pathdiff" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8835116a5c179084a830efb3adc117ab007512b535bc1a21c991d3b32a6b44dd" +checksum = "d61c5ce1153ab5b689d0c074c4e7fc613e942dfb7dd9eea5ab202d2ad91fe361" [[package]] name = "pem" @@ -3161,27 +3189,27 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ "fixedbitset", - "indexmap 2.5.0", + "indexmap 2.6.0", ] [[package]] name = "pin-project" -version = "1.1.5" +version = "1.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" +checksum = "baf123a161dde1e524adf36f90bc5d8d3462824a9c43553ad07a8183161189ec" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.5" +version = "1.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" +checksum = "a4502d8515ca9f32f1fb543d987f63d95a14934883db45bdb48060b6b69257f8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -3276,7 +3304,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "479cf940fbbb3426c32c5d5176f62ad57549a0bb84773423ba8be9d089f5faba" dependencies = [ "proc-macro2", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -3290,9 +3318,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.86" +version = "1.0.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" +checksum = "b3e4daa0dcf6feba26f985457cdf104d4b4256fc5a09547140f3631bb076b19a" dependencies = [ "unicode-ident", ] @@ -3317,7 +3345,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -3332,9 +3360,9 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.13.2" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8650aabb6c35b860610e9cff5dc1af886c9e25073b7b1712a68972af4281302" +checksum = "0c1318b19085f08681016926435853bbf7858f9c082d0999b80550ff5d9abe15" dependencies = [ "bytes", "heck", @@ -3347,7 +3375,7 @@ dependencies = [ "prost", "prost-types", "regex", - "syn 2.0.77", + "syn 2.0.79", "tempfile", ] @@ -3361,7 +3389,7 @@ dependencies = [ "itertools", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -3518,9 +3546,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.4" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0884ad60e090bf1345b93da0a5de8923c93884cd03f40dfcfddd3b4bee661853" +checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" dependencies = [ "bitflags 2.6.0", ] @@ -3538,14 +3566,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.6" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" +checksum = "38200e5ee88914975b69f657f0801b6f6dccafd44fd9326302a4aaeecfacb1d8" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.7", - "regex-syntax 0.8.4", + "regex-automata 0.4.8", + "regex-syntax 0.8.5", ] [[package]] @@ -3559,13 +3587,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" +checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.4", + "regex-syntax 0.8.5", ] [[package]] @@ -3576,9 +3604,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "resolv-conf" @@ -3690,9 +3718,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.13" +version = "0.23.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2dabaac7466917e566adb06783a81ca48944c6898a1b08b9374106dd671f4c8" +checksum = "5fbb44d7acc4e873d613422379f69f237a1b141928c02f6bc6ccfddddc2d7993" dependencies = [ "once_cell", "ring 0.17.8", @@ -3704,9 +3732,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.8.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc0a2ce646f8655401bb81e7927b812614bd5d91dbc968696be50603510fcaf0" +checksum = "16f1201b3c9a7ee8039bcadc17b7e605e2945b27eee7631788c1bd2b0643674b" [[package]] name = "rustls-webpki" @@ -3731,9 +3759,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" +checksum = "0e819f2bc632f285be6d7cd36e25940d45b2391dd6d9b939e79de557f7014248" [[package]] name = "rw-stream-sink" @@ -3801,7 +3829,7 @@ checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -3839,9 +3867,9 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "0.6.7" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb5b1b31579f3811bf615c144393417496f152e12ac8b7663bf664f4a815306d" +checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" dependencies = [ "serde", ] @@ -3875,19 +3903,19 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.9.0" +version = "3.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69cecfa94848272156ea67b2b1a53f20fc7bc638c4a46d2f8abde08f05f4b857" +checksum = "8e28bdad6db2b8340e449f7108f020b3b092e8583a9e3fb82713e1d4e71fe817" dependencies = [ "base64 0.22.1", "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.5.0", + "indexmap 2.6.0", "serde", "serde_derive", "serde_json", - "serde_with_macros 3.9.0", + "serde_with_macros 3.11.0", "time", ] @@ -3900,19 +3928,19 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] name = "serde_with_macros" -version = "3.9.0" +version = "3.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8fee4991ef4f274617a51ad4af30519438dacb2f56ac773b08a1922ff743350" +checksum = "9d846214a9854ef724f3da161b426242d8de7c1fc7de2f89bb1efcb154dca79d" dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -4096,9 +4124,9 @@ dependencies = [ [[package]] name = "starknet-types-core" -version = "0.1.5" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce6bacf0ba19bc721e518bc4bf389ff13daa8a7c5db5fd320600473b8aa9fcbd" +checksum = "fa1b9e01ccb217ab6d475c5cda05dbb22c30029f7bb52b192a010a00d77a3d74" dependencies = [ "lambdaworks-crypto", "lambdaworks-math", @@ -4139,7 +4167,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -4167,9 +4195,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.77" +version = "2.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f35bcdf61fd8e7be6caf75f429fdca8beb3ed76584befb503b1569faee373ed" +checksum = "89132cd0bf050864e1d38dc3bbc07a0eb8e7530af26344d3d2bbbef83499f590" dependencies = [ "proc-macro2", "quote", @@ -4196,7 +4224,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -4250,7 +4278,7 @@ checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -4356,7 +4384,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -4395,32 +4423,17 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.21" +version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b072cee73c449a636ffd6f32bd8de3a9f7119139aff882f44943ce2986dc5cf" +checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ - "indexmap 2.5.0", + "indexmap 2.6.0", "serde", "serde_spanned", "toml_datetime", "winnow", ] -[[package]] -name = "tower" -version = "0.4.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" -dependencies = [ - "futures-core", - "futures-util", - "pin-project", - "pin-project-lite", - "tokio", - "tower-layer", - "tower-service", -] - [[package]] name = "tower" version = "0.5.1" @@ -4481,7 +4494,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -4572,9 +4585,9 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.15" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" +checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" [[package]] name = "unicode-ident" @@ -4677,9 +4690,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.93" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" +checksum = "128d1e363af62632b8eb57219c8fd7877144af57558fb2ef0368d0087bddeb2e" dependencies = [ "cfg-if", "once_cell", @@ -4688,24 +4701,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.93" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" +checksum = "cb6dd4d3ca0ddffd1dd1c9c04f94b868c37ff5fac97c30b97cff2d74fce3a358" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.93" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" +checksum = "e79384be7f8f5a9dd5d7167216f022090cf1f9ec128e6e6a482a2cb5c5422c56" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -4713,28 +4726,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.93" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" +checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.93" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" +checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d" [[package]] name = "web-sys" -version = "0.3.70" +version = "0.3.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26fdeaafd9bd129f65e7c031593c24d62186301e0c72c8978fa1678be7d532c0" +checksum = "f6488b90108c040df0fe62fa815cbdee25124641df01814dd7282749234c6112" dependencies = [ "js-sys", "wasm-bindgen", @@ -4956,9 +4969,9 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" -version = "0.6.18" +version = "0.6.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68a9bda4691f099d435ad181000724da8e5899daa10713c2d432552b9ccd3a6f" +checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" dependencies = [ "memchr", ] @@ -5081,7 +5094,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -5101,5 +5114,5 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] diff --git a/code/Cargo.toml b/code/Cargo.toml index 791b613b0..1e4f69568 100644 --- a/code/Cargo.toml +++ b/code/Cargo.toml @@ -3,6 +3,7 @@ resolver = "2" members = [ "crates/actors", + "crates/blocksync", "crates/cli", "crates/common", "crates/config", @@ -15,10 +16,15 @@ members = [ "crates/node", "crates/proto", "crates/round", - "crates/starknet/*", + "crates/vote", + + # Test "crates/test", "crates/test/mbt", - "crates/vote", + + # Starknet + "crates/starknet/*", + "crates/starknet/*" ] [workspace.package] @@ -27,7 +33,10 @@ edition = "2021" repository = "https://github.com/informalsystems/malachite" license = "Apache-2.0" publish = false -rust-version = "1.77" +rust-version = "1.82" + +[profile.dev] +opt-level = 1 [profile.release] lto = "thin" @@ -42,6 +51,7 @@ unused_crate_dependencies = "warn" [workspace.dependencies] malachite-actors = { version = "0.1.0", path = "crates/actors" } +malachite-blocksync = { version = "0.1.0", path = "crates/blocksync" } malachite-cli = { version = "0.1.0", path = "crates/cli" } malachite-common = { version = "0.1.0", path = "crates/common" } malachite-config = { version = "0.1.0", path = "crates/config" } @@ -72,11 +82,13 @@ async-recursion = "1.1" async-trait = "0.1.83" axum = "0.7" base64 = "0.22.0" +bon = "2.3.0" bytesize = "1.3" bytes = { version = "1", default-features = false } clap = "4.5" color-eyre = "0.6" config = { version = "0.14", features = ["toml"], default-features = false } +dashmap = "6.1.0" derive-where = "1.2.7" displaydoc = { version = "0.2", default-features = false } directories = "5.0.1" diff --git a/code/clippy.toml b/code/clippy.toml index 118bbe28e..c3aa6421b 100644 --- a/code/clippy.toml +++ b/code/clippy.toml @@ -1 +1 @@ -msrv = "1.77.0" +msrv = "1.82.0" diff --git a/code/config.toml b/code/config.toml index 29f20e833..1b11a8634 100644 --- a/code/config.toml +++ b/code/config.toml @@ -150,15 +150,34 @@ transport = "quic" # Broadcast is an experimental protocol with no additional configuration options. type = "gossipsub" + +####################################################### +### BlockSync Configuration Options ### +####################################################### +[blocksync] +# Enable BlockSync +# Override with MALACHITE__BLOCKSYNC__ENABLED env variable +enabled = true + +# Interval at which to update other peers of our status +# Override with MALACHITE__BLOCKSYNC__STATUS_UPDATE_INTERVAL env variable +status_update_interval = "10s" + +# Timeout duration for block sync requests +# Override with MALACHITE__BLOCKSYNC__REQUEST_TIMEOUT env variable +request_timeout = "10s" + ####################################################### ### Metrics Configuration Options ### ####################################################### [metrics] # Enable the metrics server +# Override with MALACHITE__METRICS__ENABLED env variable enabled = true # Metrics are exported at `http://127.0.0.1:9000/metrics` +# Override with MALACHITE__METRICS__LISTEN_ADDR env variable listen_addr = "127.0.0.1:9000" ####################################################### @@ -170,11 +189,13 @@ listen_addr = "127.0.0.1:9000" # Possible values: # - "single_threaded": A single threaded runtime (default) # - "multi_threaded": A multi-threaded runtime +# Override with MALACHITE__RUNTIME__FLAVOR env variable flavor = "single_threaded" # For the multi-threaded runtime only. # Sets the number of worker threads the Runtime will use. # If set to 0, defaults to the number of cores available to the system. +# Override with MALACHITE__RUNTIME__WORKER_THREADS env variable # worker_threads = 4 @@ -191,5 +212,9 @@ txs_per_part = 256 time_allowance_factor = 0.5 # Override with MALACHITE__TEST__EXEC_TIME_PER_TX env variable exec_time_per_tx = "1ms" +# Maximum number of blocks, relative to the current block, that can be kept in the block store. +# A value of 0 indicates that no blocks are pruned. +# Override with MALACHITE__TEST__MAX_RETAIN_BLOCKS env variable +max_retain_blocks = 1000 # Override with MALACHITE__TEST__VOTE_EXTENSIONS__ENABLED and MALACHITE__TEST__VOTE_EXTENSIONS__SIZE env variables vote_extensions = { enabled = false, size = "0 KB" } diff --git a/code/crates/actors/Cargo.toml b/code/crates/actors/Cargo.toml index b1a73c8b4..e8536420d 100644 --- a/code/crates/actors/Cargo.toml +++ b/code/crates/actors/Cargo.toml @@ -15,6 +15,8 @@ debug = ["std"] [dependencies] malachite-common.workspace = true +malachite-blocksync.workspace = true +bytes = { workspace = true, features = ["serde"] } malachite-consensus.workspace = true malachite-gossip-consensus.workspace = true malachite-gossip-mempool.workspace = true @@ -27,5 +29,6 @@ derive-where = { workspace = true } eyre = { workspace = true } libp2p = { workspace = true } ractor = { workspace = true, features = ["async-trait"] } +rand = { workspace = true } tokio = { workspace = true, features = ["full"] } tracing = { workspace = true } diff --git a/code/crates/actors/src/block_sync.rs b/code/crates/actors/src/block_sync.rs new file mode 100644 index 000000000..e69a2e199 --- /dev/null +++ b/code/crates/actors/src/block_sync.rs @@ -0,0 +1,388 @@ +use std::collections::HashMap; +use std::time::Duration; + +use async_trait::async_trait; +use bytes::Bytes; +use derive_where::derive_where; +use eyre::eyre; +use libp2p::request_response::InboundRequestId; +use libp2p::PeerId; +use ractor::{Actor, ActorProcessingErr, ActorRef}; +use rand::SeedableRng; +use tokio::task::JoinHandle; + +use malachite_blocksync::{self as blocksync, OutboundRequestId}; +use malachite_blocksync::{Request, SyncedBlock}; +use malachite_common::{Certificate, Context}; +use tracing::{debug, error, warn}; + +use crate::gossip_consensus::{GossipConsensusMsg, GossipConsensusRef, GossipEvent, Status}; +use crate::host::{HostMsg, HostRef}; +use crate::util::forward::forward; +use crate::util::ticker::ticker; +use crate::util::timers::{TimeoutElapsed, TimerScheduler}; + +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +pub enum Timeout { + Request(OutboundRequestId), +} + +type Timers = TimerScheduler>; + +pub type BlockSyncRef = ActorRef>; + +#[derive_where(Clone, Debug)] +pub struct RawDecidedBlock { + pub height: Ctx::Height, + pub certificate: Certificate, + pub block_bytes: Bytes, +} + +#[derive_where(Clone, Debug)] +pub struct InflightRequest { + pub peer_id: PeerId, + pub request_id: OutboundRequestId, + pub request: Request, +} + +pub type InflightRequests = HashMap>; + +#[derive_where(Debug)] +pub enum Msg { + /// Internal tick + Tick, + + /// Receive an even from gossip layer + GossipEvent(GossipEvent), + + /// Consensus has decided on a value at the given height + Decided(Ctx::Height), + + /// Consensus has started a new height + StartHeight(Ctx::Height), + + /// Host has a response for the blocks request + GotDecidedBlock(Ctx::Height, InboundRequestId, Option>), + + /// A timeout has elapsed + TimeoutElapsed(TimeoutElapsed), +} + +impl From> for Msg { + fn from(elapsed: TimeoutElapsed) -> Self { + Msg::TimeoutElapsed(elapsed) + } +} + +#[derive(Debug)] +pub struct Params { + pub status_update_interval: Duration, + pub request_timeout: Duration, +} + +impl Default for Params { + fn default() -> Self { + Self { + status_update_interval: Duration::from_secs(10), + request_timeout: Duration::from_secs(10), + } + } +} + +pub struct Args { + pub initial_height: Ctx::Height, +} + +pub struct State { + /// The state of the blocksync state machine + blocksync: blocksync::State, + + /// Scheduler for timers + timers: Timers, + + /// In-flight requests + inflight: InflightRequests, + + /// Task for sending status updates + ticker: JoinHandle<()>, +} + +#[allow(dead_code)] +pub struct BlockSync { + ctx: Ctx, + gossip: GossipConsensusRef, + host: HostRef, + params: Params, + metrics: blocksync::Metrics, +} + +impl BlockSync +where + Ctx: Context, +{ + pub fn new( + ctx: Ctx, + gossip: GossipConsensusRef, + host: HostRef, + params: Params, + metrics: blocksync::Metrics, + ) -> Self { + Self { + ctx, + gossip, + host, + params, + metrics, + } + } + + pub async fn spawn( + self, + initial_height: Ctx::Height, + ) -> Result<(BlockSyncRef, JoinHandle<()>), ractor::SpawnErr> { + Actor::spawn(None, self, Args { initial_height }).await + } + + async fn process_input( + &self, + myself: &ActorRef>, + state: &mut State, + input: blocksync::Input, + ) -> Result<(), ActorProcessingErr> { + malachite_blocksync::process!( + input: input, + state: &mut state.blocksync, + metrics: &self.metrics, + with: effect => { + self.handle_effect(myself, &mut state.timers, &mut state.inflight, effect).await + } + ) + } + + async fn get_earliest_block_height(&self) -> Result { + ractor::call!(self.host, |reply_to| HostMsg::GetEarliestBlockHeight { + reply_to + }) + .map_err(|e| eyre!("Failed to get earliest block height: {e:?}").into()) + } + + async fn handle_effect( + &self, + myself: &ActorRef>, + timers: &mut Timers, + inflight: &mut InflightRequests, + effect: blocksync::Effect, + ) -> Result, ActorProcessingErr> { + use blocksync::Effect; + + match effect { + Effect::PublishStatus(height) => { + let earliest_block_height = self.get_earliest_block_height().await?; + + self.gossip + .cast(GossipConsensusMsg::PublishStatus(Status::new( + height, + earliest_block_height, + )))?; + } + + Effect::SendRequest(peer_id, request) => { + let result = ractor::call!(self.gossip, |reply_to| { + GossipConsensusMsg::OutgoingBlockSyncRequest(peer_id, request.clone(), reply_to) + }); + + match result { + Ok(request_id) => { + timers + .start_timer(Timeout::Request(request_id), self.params.request_timeout); + + inflight.insert( + request_id, + InflightRequest { + peer_id, + request_id, + request, + }, + ); + } + Err(e) => { + error!("Failed to send request to gossip layer: {e}"); + } + } + } + + Effect::SendResponse(request_id, response) => { + self.gossip + .cast(GossipConsensusMsg::OutgoingBlockSyncResponse( + request_id, response, + ))?; + } + + Effect::GetBlock(request_id, height) => { + self.host.call_and_forward( + |reply_to| HostMsg::GetDecidedBlock { height, reply_to }, + myself, + move |block| Msg::::GotDecidedBlock(height, request_id, block), + None, + )?; + } + } + + Ok(blocksync::Resume::default()) + } + + async fn handle_msg( + &self, + myself: ActorRef>, + msg: Msg, + state: &mut State, + ) -> Result<(), ActorProcessingErr> { + match msg { + Msg::Tick => { + self.process_input(&myself, state, blocksync::Input::Tick) + .await?; + } + + Msg::GossipEvent(GossipEvent::Status(peer_id, status)) => { + let status = blocksync::Status { + peer_id, + height: status.height, + earliest_block_height: status.earliest_block_height, + }; + + self.process_input(&myself, state, blocksync::Input::Status(status)) + .await?; + } + + Msg::GossipEvent(GossipEvent::BlockSyncRequest(request_id, from, request)) => { + self.process_input( + &myself, + state, + blocksync::Input::Request(request_id, from, request), + ) + .await?; + } + + Msg::GossipEvent(GossipEvent::BlockSyncResponse(request_id, response)) => { + // Cancel the timer associated with the request for which we just received a response + state.timers.cancel(&Timeout::Request(request_id)); + + self.process_input( + &myself, + state, + blocksync::Input::Response(request_id, response), + ) + .await?; + } + + Msg::GossipEvent(_) => { + // Ignore other gossip events + } + + Msg::Decided(height) => { + self.process_input(&myself, state, blocksync::Input::Decided(height)) + .await?; + } + + Msg::StartHeight(height) => { + self.process_input(&myself, state, blocksync::Input::StartHeight(height)) + .await?; + } + + Msg::GotDecidedBlock(height, request_id, block) => { + self.process_input( + &myself, + state, + blocksync::Input::GotBlock(request_id, height, block), + ) + .await?; + } + + Msg::TimeoutElapsed(elapsed) => { + let Some(timeout) = state.timers.intercept_timer_msg(elapsed) else { + // Timer was cancelled or already processed, ignore + return Ok(()); + }; + + warn!(?timeout, "Timeout elapsed"); + + match timeout { + Timeout::Request(request_id) => { + if let Some(inflight) = state.inflight.remove(&request_id) { + self.process_input( + &myself, + state, + blocksync::Input::RequestTimedOut( + inflight.peer_id, + inflight.request, + ), + ) + .await?; + } else { + debug!(%request_id, "Timeout for unknown request"); + } + } + } + } + } + + Ok(()) + } +} + +#[async_trait] +impl Actor for BlockSync +where + Ctx: Context, +{ + type Msg = Msg; + type State = State; + type Arguments = Args; + + async fn pre_start( + &self, + myself: ActorRef, + args: Args, + ) -> Result { + let forward = forward(myself.clone(), Some(myself.get_cell()), Msg::GossipEvent).await?; + self.gossip.cast(GossipConsensusMsg::Subscribe(forward))?; + + let ticker = tokio::spawn(ticker( + self.params.status_update_interval, + myself.clone(), + || Msg::Tick, + )); + + let rng = Box::new(rand::rngs::StdRng::from_entropy()); + + Ok(State { + blocksync: blocksync::State::new(rng, args.initial_height), + timers: Timers::new(myself.clone()), + inflight: HashMap::new(), + ticker, + }) + } + + #[tracing::instrument(name = "blocksync", skip_all)] + async fn handle( + &self, + myself: ActorRef, + msg: Self::Msg, + state: &mut Self::State, + ) -> Result<(), ActorProcessingErr> { + if let Err(e) = self.handle_msg(myself, msg, state).await { + error!("Error handling message: {e:?}"); + } + + Ok(()) + } + + async fn post_stop( + &self, + _myself: ActorRef, + state: &mut Self::State, + ) -> Result<(), ActorProcessingErr> { + state.ticker.abort(); + Ok(()) + } +} diff --git a/code/crates/actors/src/consensus.rs b/code/crates/actors/src/consensus.rs index 597822807..b82426eb1 100644 --- a/code/crates/actors/src/consensus.rs +++ b/code/crates/actors/src/consensus.rs @@ -4,27 +4,32 @@ use std::time::Duration; use async_trait::async_trait; use eyre::eyre; use libp2p::PeerId; -use ractor::{Actor, ActorProcessingErr, ActorRef}; -use tokio::sync::mpsc; +use ractor::{Actor, ActorProcessingErr, ActorRef, RpcReplyPort}; +use tokio::sync::broadcast; use tokio::time::Instant; use tracing::{debug, error, info, warn}; -use malachite_common::{Context, Extension, Round, Timeout, TimeoutStep, ValidatorSet}; +use malachite_blocksync as blocksync; +use malachite_common::{ + Context, Extension, Proposal, Round, SignedProposal, Timeout, TimeoutStep, ValidatorSet, +}; use malachite_config::TimeoutConfig; use malachite_consensus::{Effect, Resume}; use malachite_metrics::Metrics; -use crate::gossip_consensus::{GossipConsensusRef, GossipEvent, Msg as GossipConsensusMsg}; +use crate::block_sync::Msg as BlockSyncMsg; +use crate::gossip_consensus::{GossipConsensusRef, GossipEvent, Msg as GossipConsensusMsg, Status}; use crate::host::{HostMsg, HostRef, LocallyProposedValue, ProposedValue}; use crate::util::forward::forward; use crate::util::timers::{TimeoutElapsed, TimerScheduler}; +use crate::block_sync::BlockSyncRef; pub use malachite_consensus::Params as ConsensusParams; pub use malachite_consensus::State as ConsensusState; pub type ConsensusRef = ActorRef>; -pub type TxDecision = mpsc::Sender<(::Height, Round, ::Value)>; +pub type TxDecision = broadcast::Sender>; pub struct Consensus where @@ -35,6 +40,7 @@ where timeout_config: TimeoutConfig, gossip_consensus: GossipConsensusRef, host: HostRef, + block_sync: Option>, metrics: Metrics, tx_decision: Option>, } @@ -56,6 +62,9 @@ pub enum Msg { /// Received and assembled the full value proposed by a validator ReceivedProposedValue(ProposedValue), + + /// Get the status of the consensus state machine + GetStatus(RpcReplyPort>), } type ConsensusInput = malachite_consensus::Input; @@ -119,12 +128,14 @@ impl Consensus where Ctx: Context, { + #[allow(clippy::too_many_arguments)] pub fn new( ctx: Ctx, params: ConsensusParams, timeout_config: TimeoutConfig, gossip_consensus: GossipConsensusRef, host: HostRef, + block_sync: Option>, metrics: Metrics, tx_decision: Option>, ) -> Self { @@ -134,6 +145,7 @@ where timeout_config, gossip_consensus, host, + block_sync, metrics, tx_decision, } @@ -146,6 +158,7 @@ where timeout_config: TimeoutConfig, gossip_consensus: GossipConsensusRef, host: HostRef, + block_sync: Option>, metrics: Metrics, tx_decision: Option>, ) -> Result>, ractor::SpawnErr> { @@ -155,6 +168,7 @@ where timeout_config, gossip_consensus, host, + block_sync, metrics, tx_decision, ); @@ -169,6 +183,14 @@ where state: &mut State, input: ConsensusInput, ) -> Result<(), ActorProcessingErr> { + if let ConsensusInput::StartHeight(height, _) = input { + if let Some(block_sync) = &self.block_sync { + block_sync + .cast(BlockSyncMsg::StartHeight(height)) + .map_err(|e| eyre!("Error when sending start height to BlockSync: {e:?}"))?; + } + } + malachite_consensus::process!( input: input, state: &mut state.consensus, @@ -224,7 +246,6 @@ where match event { GossipEvent::Listening(address) => { info!(%address, "Listening"); - Ok(()) } GossipEvent::PeerConnected(peer_id) => { @@ -261,8 +282,6 @@ where error!("Error when starting height {height}: {e:?}"); } } - - Ok(()) } GossipEvent::PeerDisconnected(peer_id) => { @@ -273,8 +292,33 @@ where // TODO: pause/stop consensus, if necessary } + } + + GossipEvent::BlockSyncResponse( + request_id, + blocksync::Response { height, block }, + ) => { + debug!(%height, %request_id, "Received BlockSync response"); + + let Some(block) = block else { + error!(%height, %request_id, "Received empty block sync response"); + return Ok(()); + }; - Ok(()) + if let Err(e) = self + .process_input( + &myself, + state, + ConsensusInput::ReceivedSyncedBlock( + block.proposal, + block.certificate, + block.block_bytes, + ), + ) + .await + { + error!(%height, %request_id, "Error when processing received synced block: {e:?}"); + } } GossipEvent::Vote(from, vote) => { @@ -284,8 +328,6 @@ where { error!(%from, "Error when processing vote: {e:?}"); } - - Ok(()) } GossipEvent::Proposal(from, proposal) => { @@ -295,8 +337,6 @@ where { error!(%from, "Error when processing proposal: {e:?}"); } - - Ok(()) } GossipEvent::ProposalPart(from, part) => { @@ -314,10 +354,12 @@ where .map_err(|e| { eyre!("Error when forwarding proposal parts to host: {e:?}") })?; - - Ok(()) } + + _ => {} } + + Ok(()) } Msg::TimeoutElapsed(elapsed) => { @@ -356,10 +398,20 @@ where Ok(()) } + + Msg::GetStatus(reply_to) => { + let earliest_block_height = self.get_earliest_block_height().await?; + let status = Status::new(state.consensus.driver.height(), earliest_block_height); + + if let Err(e) = reply_to.send(status) { + error!("Error when replying to GetStatus message: {e:?}"); + } + + Ok(()) + } } } - #[tracing::instrument(skip(self, myself))] fn get_value( &self, myself: &ActorRef>, @@ -392,7 +444,6 @@ where Ok(()) } - #[tracing::instrument(skip(self))] async fn get_validator_set( &self, height: Ctx::Height, @@ -406,6 +457,13 @@ where Ok(validator_set) } + async fn get_earliest_block_height(&self) -> Result { + ractor::call!(self.host, |reply_to| HostMsg::GetEarliestBlockHeight { + reply_to + }) + .map_err(|e| eyre!("Failed to get earliest block height: {e:?}").into()) + } + async fn handle_effect( &self, myself: &ActorRef>, @@ -465,7 +523,7 @@ where Effect::Broadcast(gossip_msg) => { self.gossip_consensus - .cast(GossipConsensusMsg::BroadcastMsg(gossip_msg)) + .cast(GossipConsensusMsg::Publish(gossip_msg)) .map_err(|e| eyre!("Error when broadcasting gossip message: {e:?}"))?; Ok(Resume::Continue) @@ -490,26 +548,53 @@ where Ok(Resume::ValidatorSet(height, validator_set)) } - Effect::Decide { - height, - round, - value, - commits, - } => { + Effect::Decide { proposal, commits } => { if let Some(tx_decision) = &self.tx_decision { - let _ = tx_decision.send((height, round, value.clone())).await; + let _ = tx_decision.send(proposal.clone()); } + let proposal_height = proposal.height(); + self.host .cast(HostMsg::Decide { - height, - round, - value, + proposal, commits, consensus: myself.clone(), }) .map_err(|e| eyre!("Error when sending decided value to host: {e:?}"))?; + if let Some(block_sync) = &self.block_sync { + block_sync + .cast(BlockSyncMsg::Decided(proposal_height)) + .map_err(|e| { + eyre!("Error when sending decided height to blocksync: {e:?}") + })?; + } + + Ok(Resume::Continue) + } + + Effect::SyncedBlock { + proposal, + block_bytes, + } => { + // TODO - add timeout? + debug!( + "Consensus received synced block for {}, sending to host", + proposal.height() + ); + + self.host.call_and_forward( + |reply_to| HostMsg::ProcessSyncedBlockBytes { + proposal, + block_bytes, + reply_to, + }, + myself, + |proposed| Msg::::ReceivedProposedValue(proposed), + None, + )?; + Ok(Resume::Continue) } } @@ -566,7 +651,11 @@ where msg: Msg, state: &mut State, ) -> Result<(), ActorProcessingErr> { - self.handle_msg(myself, state, msg).await + if let Err(e) = self.handle_msg(myself, state, msg).await { + error!("Error when handling message: {e:?}"); + } + + Ok(()) } async fn post_stop( diff --git a/code/crates/actors/src/gossip_consensus.rs b/code/crates/actors/src/gossip_consensus.rs index 397635dc3..f4f72d29d 100644 --- a/code/crates/actors/src/gossip_consensus.rs +++ b/code/crates/actors/src/gossip_consensus.rs @@ -4,10 +4,13 @@ use std::marker::PhantomData; use async_trait::async_trait; use derive_where::derive_where; use libp2p::identity::Keypair; +use libp2p::request_response::{InboundRequestId, OutboundRequestId}; use ractor::{Actor, ActorProcessingErr, ActorRef, RpcReplyPort}; use tokio::task::JoinHandle; -use tracing::{debug, error}; +use tracing::{error, trace}; +use malachite_blocksync::{self as blocksync, Response}; +use malachite_blocksync::{RawMessage, Request}; use malachite_common::{Context, SignedProposal, SignedVote}; use malachite_consensus::SignedConsensusMsg; use malachite_gossip_consensus::handle::CtrlHandle; @@ -70,11 +73,19 @@ pub struct Args { #[derive_where(Clone, Debug, PartialEq, Eq)] pub enum GossipEvent { Listening(Multiaddr), + PeerConnected(PeerId), PeerDisconnected(PeerId), + Vote(PeerId, SignedVote), + Proposal(PeerId, SignedProposal), ProposalPart(PeerId, StreamMessage), + + Status(PeerId, Status), + + BlockSyncRequest(InboundRequestId, PeerId, Request), + BlockSyncResponse(OutboundRequestId, Response), } pub enum State { @@ -88,20 +99,44 @@ pub enum State { }, } +#[derive_where(Clone, Debug, PartialEq, Eq)] +pub struct Status { + pub height: Ctx::Height, + pub earliest_block_height: Ctx::Height, +} + +impl Status { + pub fn new(height: Ctx::Height, earliest_block_height: Ctx::Height) -> Self { + Self { + height, + earliest_block_height, + } + } +} + pub enum Msg { /// Subscribe this actor to receive gossip events Subscribe(ActorRef>), - /// Broadcast a signed consensus message - BroadcastMsg(SignedConsensusMsg), + /// Publish a signed consensus message + Publish(SignedConsensusMsg), + + /// Publish a proposal part + PublishProposalPart(StreamMessage), - /// Broadcast a proposal part - BroadcastProposalPart(StreamMessage), + /// Publish status + PublishStatus(Status), + + /// Send a request to a peer, returning the outbound request ID + OutgoingBlockSyncRequest(PeerId, Request, RpcReplyPort), + + /// Send a response for a blocks request to a peer + OutgoingBlockSyncResponse(InboundRequestId, Response), /// Request for number of peers from gossip GetState { reply: RpcReplyPort }, - // Internal message + // Event emitted by the gossip layer #[doc(hidden)] NewEvent(Event), } @@ -172,13 +207,13 @@ where match msg { Msg::Subscribe(subscriber) => subscribers.push(subscriber), - Msg::BroadcastMsg(msg) => match Codec::encode_msg(msg) { - Ok(data) => ctrl_handle.broadcast(Channel::Consensus, data).await?, + Msg::Publish(msg) => match Codec::encode_msg(msg) { + Ok(data) => ctrl_handle.publish(Channel::Consensus, data).await?, Err(e) => error!("Failed to encode gossip message: {e:?}"), }, - Msg::BroadcastProposalPart(msg) => { - debug!( + Msg::PublishProposalPart(msg) => { + trace!( stream_id = %msg.stream_id, sequence = %msg.sequence, "Broadcasting proposal part" @@ -186,11 +221,48 @@ where let data = Codec::encode_stream_msg(msg); match data { - Ok(data) => ctrl_handle.broadcast(Channel::ProposalParts, data).await?, + Ok(data) => ctrl_handle.publish(Channel::ProposalParts, data).await?, Err(e) => error!("Failed to encode proposal part: {e:?}"), } } + Msg::PublishStatus(status) => { + let status = blocksync::Status { + peer_id: ctrl_handle.peer_id(), + height: status.height, + earliest_block_height: status.earliest_block_height, + }; + + let data = Codec::encode_status(status); + match data { + Ok(data) => ctrl_handle.publish(Channel::BlockSync, data).await?, + Err(e) => error!("Failed to encode status message: {e:?}"), + } + } + + Msg::OutgoingBlockSyncRequest(peer_id, request, reply_to) => { + let request = Codec::encode_request(request); + match request { + Ok(data) => { + let request_id = ctrl_handle.blocksync_request(peer_id, data).await?; + reply_to.send(request_id)?; + } + Err(e) => error!("Failed to encode request message: {e:?}"), + } + } + + Msg::OutgoingBlockSyncResponse(request_id, response) => { + let msg = match Codec::encode_response(response) { + Ok(msg) => msg, + Err(e) => { + error!(%request_id, "Failed to encode block response message: {e:?}"); + return Ok(()); + } + }; + + ctrl_handle.blocksync_reply(request_id, msg).await? + } + Msg::NewEvent(Event::Listening(addr)) => { self.publish(GossipEvent::Listening(addr), subscribers); } @@ -231,7 +303,7 @@ where } }; - debug!( + trace!( %from, stream_id = %msg.stream_id, sequence = %msg.sequence, @@ -241,6 +313,67 @@ where self.publish(GossipEvent::ProposalPart(from, msg), subscribers); } + Msg::NewEvent(Event::Message(Channel::BlockSync, from, data)) => { + let status = match Codec::decode_status(data) { + Ok(status) => status, + Err(e) => { + error!(%from, "Failed to decode status message: {e:?}"); + return Ok(()); + } + }; + + if from != status.peer_id { + error!(%from, %status.peer_id, "Mismatched peer ID in status message"); + return Ok(()); + } + + trace!(%from, height = %status.height, "Received status"); + + self.publish( + GossipEvent::Status( + status.peer_id, + Status::new(status.height, status.earliest_block_height), + ), + subscribers, + ); + } + + Msg::NewEvent(Event::BlockSync(raw_msg)) => match raw_msg { + RawMessage::Request { + request_id, + peer, + body, + } => { + let request = match Codec::decode_request(body) { + Ok(request) => request, + Err(e) => { + error!(%peer, "Failed to decode BlockSync request: {e:?}"); + return Ok(()); + } + }; + + self.publish( + GossipEvent::BlockSyncRequest(request_id, peer, request), + subscribers, + ); + } + + RawMessage::Response { request_id, body } => { + let response = match Codec::decode_response(body) { + Ok(response) => response, + Err(e) => { + error!("Failed to decode BlockSync response: {e:?}"); + return Ok(()); + } + }; + + self.publish( + GossipEvent::BlockSyncResponse(request_id, response), + subscribers, + ); + } + }, + Msg::GetState { reply } => { let number_peers = match state { State::Stopped => 0, diff --git a/code/crates/actors/src/host.rs b/code/crates/actors/src/host.rs index 647e3f7bf..2211e0783 100644 --- a/code/crates/actors/src/host.rs +++ b/code/crates/actors/src/host.rs @@ -1,10 +1,18 @@ +use bytes::Bytes; use std::time::Duration; use derive_where::derive_where; use libp2p::PeerId; use ractor::{ActorRef, RpcReplyPort}; -use malachite_common::{Context, Extension, Round, SignedVote}; +use malachite_blocksync::SyncedBlock; +use malachite_common::{Context, Extension, Round, SignedProposal, SignedVote}; + +use crate::consensus::ConsensusRef; +use crate::util::streaming::StreamMessage; + +/// A value to propose that has just been received. +pub use malachite_consensus::ProposedValue; /// This is the value that the application constructed /// and has finished streaming on gossip. @@ -34,12 +42,6 @@ impl LocallyProposedValue { } } -/// A value to propose that has just been received. -pub use malachite_consensus::ProposedValue; - -use crate::consensus::ConsensusRef; -use crate::util::streaming::StreamMessage; - /// A reference to the host actor. pub type HostRef = ActorRef>; @@ -61,6 +63,9 @@ pub enum HostMsg { reply_to: RpcReplyPort>, }, + /// Request the earliest block height in the block store + GetEarliestBlockHeight { reply_to: RpcReplyPort }, + /// ProposalPart received <-- consensus <-- gossip ReceivedProposalPart { from: PeerId, @@ -76,10 +81,21 @@ pub enum HostMsg { // Consensus has decided on a value Decide { - height: Ctx::Height, - round: Round, - value: Ctx::Value, + proposal: SignedProposal, commits: Vec>, consensus: ConsensusRef, }, + + // Retrieve decided block from the block store + GetDecidedBlock { + height: Ctx::Height, + reply_to: RpcReplyPort>>, + }, + + // Synced block + ProcessSyncedBlockBytes { + proposal: SignedProposal, + block_bytes: Bytes, + reply_to: RpcReplyPort>, + }, } diff --git a/code/crates/actors/src/lib.rs b/code/crates/actors/src/lib.rs index c79d0a346..601f7a0f1 100644 --- a/code/crates/actors/src/lib.rs +++ b/code/crates/actors/src/lib.rs @@ -2,6 +2,7 @@ #![allow(unexpected_cfgs)] #![cfg_attr(coverage_nightly, feature(coverage_attribute))] +pub mod block_sync; pub mod consensus; pub mod gossip_consensus; pub mod gossip_mempool; diff --git a/code/crates/actors/src/node.rs b/code/crates/actors/src/node.rs index af8a2d953..4540be3a6 100644 --- a/code/crates/actors/src/node.rs +++ b/code/crates/actors/src/node.rs @@ -5,6 +5,7 @@ use tracing::{error, info, warn}; use malachite_common::Context; +use crate::block_sync::BlockSyncRef; use crate::consensus::ConsensusRef; use crate::gossip_consensus::GossipConsensusRef; use crate::gossip_mempool::GossipMempoolRef; @@ -18,6 +19,7 @@ pub struct Node { gossip_consensus: GossipConsensusRef, consensus: ConsensusRef, gossip_mempool: GossipMempoolRef, + block_sync: Option>, mempool: ActorCell, host: HostRef, start_height: Ctx::Height, @@ -33,6 +35,7 @@ where gossip_consensus: GossipConsensusRef, consensus: ConsensusRef, gossip_mempool: GossipMempoolRef, + block_sync: Option>, mempool: ActorCell, host: HostRef, start_height: Ctx::Height, @@ -42,6 +45,7 @@ where gossip_consensus, consensus, gossip_mempool, + block_sync, mempool, host, start_height, @@ -70,9 +74,13 @@ where // Set ourselves as the supervisor of the other actors self.gossip_consensus.link(myself.get_cell()); self.consensus.link(myself.get_cell()); - self.gossip_mempool.link(myself.get_cell()); self.mempool.link(myself.get_cell()); self.host.link(myself.get_cell()); + self.gossip_mempool.link(myself.get_cell()); + + if let Some(actor) = &self.block_sync { + actor.link(myself.get_cell()); + } Ok(()) } diff --git a/code/crates/actors/src/util/codec.rs b/code/crates/actors/src/util/codec.rs index 78e0a31e6..094d72115 100644 --- a/code/crates/actors/src/util/codec.rs +++ b/code/crates/actors/src/util/codec.rs @@ -5,9 +5,10 @@ use malachite_proto::Protobuf; use super::streaming::StreamMessage; -pub trait NetworkCodec: Sync + Send + 'static { - type Error: std::error::Error + Send + Sync + 'static; - +pub trait NetworkCodec: Sync + Send + 'static +where + Self: malachite_blocksync::NetworkCodec, +{ fn decode_msg(bytes: Bytes) -> Result, Self::Error>; fn encode_msg(msg: SignedConsensusMsg) -> Result; diff --git a/code/crates/actors/src/util/mod.rs b/code/crates/actors/src/util/mod.rs index 7fbd6e0b2..d6146d1ab 100644 --- a/code/crates/actors/src/util/mod.rs +++ b/code/crates/actors/src/util/mod.rs @@ -1,4 +1,5 @@ pub mod codec; pub mod forward; pub mod streaming; +pub mod ticker; pub mod timers; diff --git a/code/crates/actors/src/util/ticker.rs b/code/crates/actors/src/util/ticker.rs new file mode 100644 index 000000000..07dfe8427 --- /dev/null +++ b/code/crates/actors/src/util/ticker.rs @@ -0,0 +1,18 @@ +use std::time::Duration; + +use ractor::message::Message; +use ractor::ActorRef; + +pub async fn ticker(interval: Duration, target: ActorRef, msg: impl Fn() -> Msg) +where + Msg: Message, +{ + loop { + tokio::time::sleep(interval).await; + + if let Err(e) = target.cast(msg()) { + tracing::error!(?e, ?target, "Failed to send tick message"); + break; + } + } +} diff --git a/code/crates/actors/src/util/timers.rs b/code/crates/actors/src/util/timers.rs index 16eb13977..9ec220b43 100644 --- a/code/crates/actors/src/util/timers.rs +++ b/code/crates/actors/src/util/timers.rs @@ -12,6 +12,7 @@ use tracing::trace; type TimerTask = JoinHandle>>; +#[derive(Debug)] struct Timer { /// Message to give to the actor when the timer expires key: Key, @@ -30,6 +31,7 @@ pub struct TimeoutElapsed { generation: u64, } +#[derive(Debug)] pub struct TimerScheduler where Key: Eq + Hash, diff --git a/code/crates/blocksync/Cargo.toml b/code/crates/blocksync/Cargo.toml new file mode 100644 index 000000000..1a5204bc2 --- /dev/null +++ b/code/crates/blocksync/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "malachite-blocksync" +version.workspace = true +edition.workspace = true +repository.workspace = true +license.workspace = true +publish.workspace = true +rust-version.workspace = true + +[dependencies] +malachite-common = { workspace = true } +malachite-metrics = { workspace = true } + +bytes = { workspace = true, features = ["serde"] } +dashmap = { workspace = true } +derive-where = { workspace = true } +displaydoc = { workspace = true } +genawaiter = { workspace = true } +libp2p = { workspace = true, features = ["request-response", "cbor"] } +rand = { workspace = true } +serde = { workspace = true } +tracing = { workspace = true } + +[lints] +workspace = true diff --git a/code/crates/blocksync/src/behaviour.rs b/code/crates/blocksync/src/behaviour.rs new file mode 100644 index 000000000..636c0da6c --- /dev/null +++ b/code/crates/blocksync/src/behaviour.rs @@ -0,0 +1,68 @@ +use bytes::Bytes; +use displaydoc::Display; +use libp2p::metrics::Registry; +use libp2p::request_response::{self as rpc, OutboundRequestId, ProtocolSupport}; +use libp2p::swarm::NetworkBehaviour; +use libp2p::{PeerId, StreamProtocol}; + +use crate::types::{RawRequest, RawResponse, ResponseChannel}; + +// use crate::Metrics; + +#[derive(NetworkBehaviour)] +#[behaviour(to_swarm = "Event")] +pub struct Behaviour { + rpc: rpc::cbor::Behaviour, +} + +pub type Event = rpc::Event; + +impl Behaviour { + pub const PROTOCOL: [(StreamProtocol, ProtocolSupport); 1] = [( + StreamProtocol::new("/malachite-blocksync/v1beta1"), + ProtocolSupport::Full, + )]; + + pub fn new() -> Self { + let config = rpc::Config::default(); + Self { + rpc: rpc::cbor::Behaviour::new(Self::PROTOCOL, config), + // metrics: None, + } + } + + pub fn new_with_metrics(_registry: &mut Registry) -> Self { + let config = rpc::Config::default(); + Self { + rpc: rpc::cbor::Behaviour::new(Self::PROTOCOL, config), + // metrics: Some(Metrics::new(registry)), + } + } + + pub fn send_response(&mut self, channel: ResponseChannel, data: Bytes) -> Result<(), Error> { + self.rpc + .send_response(channel, RawResponse(data)) + .map_err(|_| Error::SendResponse) + } + + pub fn send_request(&mut self, peer: PeerId, data: Bytes) -> OutboundRequestId { + self.rpc.send_request(&peer, RawRequest(data)) + } +} + +#[derive(Clone, Debug, Display)] +pub enum Error { + #[displaydoc("Failed to send response")] + SendResponse, + + #[displaydoc("Failed to send request")] + SendRequest, +} + +impl core::error::Error for Error {} + +impl Default for Behaviour { + fn default() -> Self { + Self::new() + } +} diff --git a/code/crates/blocksync/src/co.rs b/code/crates/blocksync/src/co.rs new file mode 100644 index 000000000..e9ede746a --- /dev/null +++ b/code/crates/blocksync/src/co.rs @@ -0,0 +1,8 @@ +use genawaiter::sync as gen; +use genawaiter::GeneratorState; + +use crate::{Effect, Error, Resume}; + +pub type Gen = gen::Gen, Resume, F>; +pub type Co = gen::Co, Resume>; +pub type CoState = GeneratorState, Result<(), Error>>; diff --git a/code/crates/blocksync/src/codec.rs b/code/crates/blocksync/src/codec.rs new file mode 100644 index 000000000..24f92611e --- /dev/null +++ b/code/crates/blocksync/src/codec.rs @@ -0,0 +1,17 @@ +use bytes::Bytes; +use malachite_common::Context; + +use crate::{Request, Response, Status}; + +pub trait NetworkCodec: Sync + Send + 'static { + type Error: std::error::Error + Send + Sync + 'static; + + fn decode_status(bytes: Bytes) -> Result, Self::Error>; + fn encode_status(status: Status) -> Result; + + fn decode_request(bytes: Bytes) -> Result, Self::Error>; + fn encode_request(request: Request) -> Result; + + fn decode_response(bytes: Bytes) -> Result, Self::Error>; + fn encode_response(response: Response) -> Result; +} diff --git a/code/crates/blocksync/src/handle.rs b/code/crates/blocksync/src/handle.rs new file mode 100644 index 000000000..de3512050 --- /dev/null +++ b/code/crates/blocksync/src/handle.rs @@ -0,0 +1,324 @@ +use core::marker::PhantomData; + +use derive_where::derive_where; +use displaydoc::Display; +use libp2p::request_response::OutboundRequestId; +use tracing::{debug, error, info, warn}; + +use malachite_common::{Context, Height, Proposal}; + +use crate::co::Co; +use crate::perform; +use crate::{InboundRequestId, Metrics, PeerId, Request, Response, State, Status, SyncedBlock}; + +#[derive_where(Debug)] +#[derive(Display)] +pub enum Error { + /// The coroutine was resumed with a value which + /// does not match the expected type of resume value. + #[displaydoc("Unexpected resume: {0:?}, expected one of: {1}")] + UnexpectedResume(Resume, &'static str), +} + +impl core::error::Error for Error {} + +#[derive_where(Debug)] +pub enum Resume { + Continue(PhantomData), +} + +impl Default for Resume { + fn default() -> Self { + Self::Continue(PhantomData) + } +} + +#[derive_where(Debug)] +pub enum Effect { + /// Publish our status to the network + PublishStatus(Ctx::Height), + + /// Send a BlockSync request to a peer + SendRequest(PeerId, Request), + + /// Send a response to a BlockSync request + SendResponse(InboundRequestId, Response), + + /// Retrieve a block from the application + GetBlock(InboundRequestId, Ctx::Height), +} + +#[derive_where(Debug)] +pub enum Input { + /// A tick has occurred + Tick, + + /// A status update has been received from a peer + Status(Status), + + /// Consensus just started a new height + StartHeight(Ctx::Height), + + /// Consensus just decided on a new block + Decided(Ctx::Height), + + /// A BlockSync request has been received from a peer + Request(InboundRequestId, PeerId, Request), + + /// A BlockSync response has been received + Response(OutboundRequestId, Response), + + /// Got a response from the application to our `GetBlock` request + GotBlock(InboundRequestId, Ctx::Height, Option>), + + /// A request timed out + RequestTimedOut(PeerId, Request), +} + +pub async fn handle( + co: Co, + state: &mut State, + metrics: &Metrics, + input: Input, +) -> Result<(), Error> +where + Ctx: Context, +{ + match input { + Input::Tick => on_tick(co, state, metrics).await, + Input::Status(status) => on_status(co, state, metrics, status).await, + Input::StartHeight(height) => on_start_height(co, state, metrics, height).await, + Input::Decided(height) => on_decided(co, state, metrics, height).await, + Input::Request(request_id, peer_id, request) => { + on_request(co, state, metrics, request_id, peer_id, request).await + } + Input::Response(request_id, response) => { + on_response(co, state, metrics, request_id, response).await + } + Input::GotBlock(request_id, height, block) => { + on_block(co, state, metrics, request_id, height, block).await + } + Input::RequestTimedOut(peer_id, request) => { + on_request_timed_out(co, state, metrics, peer_id, request).await + } + } +} + +#[tracing::instrument(skip_all)] +pub async fn on_tick( + co: Co, + state: &mut State, + _metrics: &Metrics, +) -> Result<(), Error> +where + Ctx: Context, +{ + debug!(height = %state.tip_height, "Publishing status"); + + perform!(co, Effect::PublishStatus(state.tip_height)); + + Ok(()) +} + +#[tracing::instrument( + skip_all, + fields( + sync_height = %state.sync_height, + tip_height = %state.tip_height + ) +)] +pub async fn on_status( + co: Co, + state: &mut State, + metrics: &Metrics, + status: Status, +) -> Result<(), Error> +where + Ctx: Context, +{ + debug!(%status.peer_id, %status.height, "Received peer status"); + + let peer_height = status.height; + + state.update_status(status); + + if peer_height > state.tip_height { + info!( + tip.height = %state.tip_height, + sync.height = %state.sync_height, + peer.height = %peer_height, + "SYNC REQUIRED: Falling behind" + ); + + // We are lagging behind one of our peer at least, + // request sync from any peer already at or above that peer's height. + request_sync(co, state, metrics).await?; + } + + Ok(()) +} + +#[tracing::instrument(skip_all)] +pub async fn on_request( + co: Co, + _state: &mut State, + metrics: &Metrics, + request_id: InboundRequestId, + peer: PeerId, + request: Request, +) -> Result<(), Error> +where + Ctx: Context, +{ + debug!(height = %request.height, %peer, "Received request for block"); + + metrics.request_received(request.height.as_u64()); + + perform!(co, Effect::GetBlock(request_id, request.height)); + + Ok(()) +} + +#[tracing::instrument(skip_all)] +pub async fn on_response( + _co: Co, + _state: &mut State, + metrics: &Metrics, + request_id: OutboundRequestId, + response: Response, +) -> Result<(), Error> +where + Ctx: Context, +{ + debug!(height = %response.height, %request_id, "Received response"); + + metrics.response_received(response.height.as_u64()); + + Ok(()) +} + +pub async fn on_start_height( + co: Co, + state: &mut State, + metrics: &Metrics, + height: Ctx::Height, +) -> Result<(), Error> +where + Ctx: Context, +{ + debug!(%height, "Starting new height"); + + state.sync_height = height; + + // Check if there is any peer already at or above the height we just started, + // and request sync from that peer in order to catch up. + request_sync(co, state, metrics).await?; + + Ok(()) +} + +pub async fn on_decided( + _co: Co, + state: &mut State, + _metrics: &Metrics, + height: Ctx::Height, +) -> Result<(), Error> +where + Ctx: Context, +{ + debug!(%height, "Decided on a block"); + + state.tip_height = height; + state.remove_pending_request(height); + + Ok(()) +} + +pub async fn on_block( + co: Co, + _state: &mut State, + metrics: &Metrics, + request_id: InboundRequestId, + height: Ctx::Height, + block: Option>, +) -> Result<(), Error> +where + Ctx: Context, +{ + let response = match block { + None => { + error!(%height, "Received empty response"); + None + } + Some(block) if block.proposal.height() != height => { + error!( + %height, block.height = %block.proposal.height(), + "Received block for wrong height" + ); + None + } + Some(block) => { + debug!(%height, "Received decided block"); + Some(block) + } + }; + + perform!( + co, + Effect::SendResponse(request_id, Response::new(height, response)) + ); + + metrics.response_sent(height.as_u64()); + + Ok(()) +} + +pub async fn on_request_timed_out( + _co: Co, + state: &mut State, + metrics: &Metrics, + peer_id: PeerId, + request: Request, +) -> Result<(), Error> +where + Ctx: Context, +{ + warn!(%peer_id, %request.height, "Request timed out"); + + metrics.request_timed_out(request.height.as_u64()); + + state.remove_pending_request(request.height); + + Ok(()) +} + +/// If there are no pending requests for the sync height, +/// and there is peer at a higher height than our sync height, +/// then sync from that peer. +async fn request_sync( + co: Co, + state: &mut State, + metrics: &Metrics, +) -> Result<(), Error> +where + Ctx: Context, +{ + let sync_height = state.sync_height; + + if state.has_pending_request(&sync_height) { + debug!(sync.height = %sync_height, "Already have a pending request for this height"); + return Ok(()); + } + + if let Some(peer) = state.random_peer_with_block(sync_height) { + debug!(sync.height = %sync_height, %peer, "Requesting block from peer"); + + metrics.request_sent(sync_height.as_u64()); + + perform!(co, Effect::SendRequest(peer, Request::new(sync_height))); + + state.store_pending_request(sync_height, peer); + } + + Ok(()) +} diff --git a/code/crates/blocksync/src/lib.rs b/code/crates/blocksync/src/lib.rs new file mode 100644 index 000000000..0827f2a5c --- /dev/null +++ b/code/crates/blocksync/src/lib.rs @@ -0,0 +1,29 @@ +mod behaviour; +pub use behaviour::{Behaviour, Event}; + +mod codec; +pub use codec::NetworkCodec; + +mod metrics; +pub use metrics::Metrics; + +mod state; +pub use state::State; + +mod types; +pub use types::{ + InboundRequestId, OutboundRequestId, PeerId, RawMessage, Request, Response, ResponseChannel, + Status, SyncedBlock, +}; + +mod macros; + +#[doc(hidden)] +pub mod handle; +pub use handle::{Effect, Error, Input, Resume}; + +#[doc(hidden)] +pub mod co; + +#[doc(hidden)] +pub use tracing; diff --git a/code/crates/blocksync/src/macros.rs b/code/crates/blocksync/src/macros.rs new file mode 100644 index 000000000..f6939976f --- /dev/null +++ b/code/crates/blocksync/src/macros.rs @@ -0,0 +1,92 @@ +/// Process an [`Input`][input] and handle the emitted [`Effects`][effect]. +/// +/// [input]: crate::handle::Input +/// [effect]: crate::handle::Effect +/// +/// # Example +/// +/// ```rust,ignore +/// malachite_consensus::process!( +/// // Input to process +/// input: input, +/// // Consensus state +/// state: &mut state, +/// // Metrics +/// metrics: &metrics, +/// // Effect handler +/// on: effect => handle_effect(effect).await +/// ) +/// ``` +#[macro_export] +macro_rules! process { + (input: $input:expr, state: $state:expr, metrics: $metrics:expr, with: $effect:ident => $handle:expr) => {{ + let mut gen = + $crate::co::Gen::new(|co| $crate::handle::handle(co, $state, $metrics, $input)); + + let mut co_result = gen.resume_with($crate::Resume::default()); + + loop { + match co_result { + $crate::co::CoState::Yielded($effect) => { + let resume = match $handle { + Ok(resume) => resume, + Err(error) => { + $crate::tracing::error!("Error when processing effect: {error:?}"); + $crate::Resume::default() + } + }; + co_result = gen.resume_with(resume) + } + $crate::co::CoState::Complete(result) => { + return result.map_err(Into::into); + } + } + } + }}; +} + +/// Yield an effect, expecting a specific type of resume value. +/// +/// Effects yielded by this macro must resume with a value that matches the provided pattern. +/// If not pattern is give, then the yielded effect must resume with [`Resume::Continue`][continue]. +/// +/// # Errors +/// This macro will abort the current function with a [`Error::UnexpectedResume`][error] error +/// if the effect does not resume with a value that matches the provided pattern. +/// +/// # Example +/// ```rust,ignore +/// // If we do not need to extract the resume value +/// let () = perform!(co, effect, Resume::ProposeValue(_, _)); +/// +/// /// If we need to extract the resume value +/// let value: Ctx::Value = perform!(co, effect, Resume::ProposeValue(_, value) => value); +/// ``` +/// +/// [continue]: crate::handle::Resume::Continue +/// [error]: crate::handle::Error::UnexpectedResume +#[macro_export] +macro_rules! perform { + ($co:expr, $effect:expr) => { + perform!($co, $effect, $crate::handle::Resume::Continue(_)) + }; + + ($co:expr, $effect:expr, $pat:pat) => { + perform!($co, $effect, $pat => ()) + }; + + // TODO: Add support for multiple patterns + if guards + ($co:expr, $effect:expr, $pat:pat => $expr:expr $(,)?) => { + #[allow(unreachable_patterns)] + match $co.yield_($effect).await { + $pat => $expr, + resume => { + return ::core::result::Result::Err($crate::handle::Error::UnexpectedResume( + resume, + stringify!($pat) + ) + .into()) + } + } + }; +} diff --git a/code/crates/blocksync/src/metrics.rs b/code/crates/blocksync/src/metrics.rs new file mode 100644 index 000000000..f205a216a --- /dev/null +++ b/code/crates/blocksync/src/metrics.rs @@ -0,0 +1,148 @@ +use std::ops::Deref; +use std::sync::Arc; +use std::time::Instant; + +use dashmap::DashMap; +use malachite_metrics::prometheus::metrics::counter::Counter; +use malachite_metrics::prometheus::metrics::histogram::{exponential_buckets, Histogram}; +use malachite_metrics::SharedRegistry; + +#[derive(Clone, Debug)] +pub struct Metrics(Arc); + +impl Deref for Metrics { + type Target = Inner; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +#[derive(Debug)] +pub struct Inner { + requests_sent: Counter, + requests_received: Counter, + responses_sent: Counter, + responses_received: Counter, + client_latency: Histogram, + server_latency: Histogram, + request_timeouts: Counter, + + instant_request_sent: Arc>, + instant_request_received: Arc>, +} + +impl Inner { + pub fn new() -> Self { + Self { + requests_sent: Counter::default(), + requests_received: Counter::default(), + responses_sent: Counter::default(), + responses_received: Counter::default(), + client_latency: Histogram::new(exponential_buckets(0.1, 2.0, 20)), + server_latency: Histogram::new(exponential_buckets(0.1, 2.0, 20)), + request_timeouts: Counter::default(), + instant_request_sent: Arc::new(DashMap::new()), + instant_request_received: Arc::new(DashMap::new()), + } + } +} + +impl Default for Inner { + fn default() -> Self { + Self::new() + } +} + +impl Metrics { + pub fn new() -> Self { + Self(Arc::new(Inner::new())) + } + + pub fn register(registry: &SharedRegistry) -> Self { + let metrics = Self::new(); + + registry.with_prefix("malachite_blocksync", |registry| { + registry.register( + "requests_sent", + "Number of BlockSync requests sent", + metrics.requests_sent.clone(), + ); + + registry.register( + "requests_received", + "Number of BlockSync requests received", + metrics.requests_received.clone(), + ); + + registry.register( + "responses_sent", + "Number of BlockSync responses sent", + metrics.responses_sent.clone(), + ); + + registry.register( + "responses_received", + "Number of BlockSync responses received", + metrics.responses_received.clone(), + ); + + registry.register( + "client_latency", + "Interval of time between when request was sent and response was received", + metrics.client_latency.clone(), + ); + + registry.register( + "server_latency", + "Interval of time between when request was received and response was sent", + metrics.server_latency.clone(), + ); + + registry.register( + "timeouts", + "Number of BlockSync request timeouts", + metrics.request_timeouts.clone(), + ); + }); + + metrics + } + + pub fn request_sent(&self, height: u64) { + self.requests_sent.inc(); + self.instant_request_sent.insert(height, Instant::now()); + } + + pub fn response_received(&self, height: u64) { + self.responses_received.inc(); + + if let Some((_, instant)) = self.instant_request_sent.remove(&height) { + self.client_latency.observe(instant.elapsed().as_secs_f64()); + } + } + + pub fn request_received(&self, height: u64) { + self.requests_received.inc(); + self.instant_request_received.insert(height, Instant::now()); + } + + pub fn response_sent(&self, height: u64) { + self.responses_sent.inc(); + + if let Some((_, instant)) = self.instant_request_received.remove(&height) { + self.server_latency.observe(instant.elapsed().as_secs_f64()); + } + } + + pub fn request_timed_out(&self, height: u64) { + self.request_timeouts.inc(); + self.instant_request_sent.remove(&height); + } +} + +impl Default for Metrics { + fn default() -> Self { + Self::new() + } +} diff --git a/code/crates/blocksync/src/state.rs b/code/crates/blocksync/src/state.rs new file mode 100644 index 000000000..2545837e6 --- /dev/null +++ b/code/crates/blocksync/src/state.rs @@ -0,0 +1,66 @@ +use std::collections::BTreeMap; + +use libp2p::PeerId; + +use malachite_common::Context; +use rand::seq::IteratorRandom; + +use crate::Status; + +pub struct State +where + Ctx: Context, +{ + rng: Box, + + /// Height of last decided block + pub tip_height: Ctx::Height, + + /// Height currently syncing. + pub sync_height: Ctx::Height, + + /// Requests for these heights have been sent out to peers. + pub pending_requests: BTreeMap, + + /// The set of peers we are connected to in order to get blocks and certificates. + pub peers: BTreeMap>, +} + +impl State +where + Ctx: Context, +{ + pub fn new(rng: Box, tip_height: Ctx::Height) -> Self { + Self { + rng, + tip_height, + sync_height: tip_height, + pending_requests: BTreeMap::new(), + peers: BTreeMap::new(), + } + } + + pub fn update_status(&mut self, status: Status) { + self.peers.insert(status.peer_id, status); + } + + /// Select at random a peer that that we know is at or above the given height. + pub fn random_peer_with_block(&mut self, height: Ctx::Height) -> Option { + self.peers + .iter() + .filter_map(move |(&peer, status)| (status.height >= height).then_some(peer)) + .choose_stable(&mut self.rng) + } + + pub fn store_pending_request(&mut self, height: Ctx::Height, peer: PeerId) { + self.pending_requests.insert(height, peer); + } + + pub fn remove_pending_request(&mut self, height: Ctx::Height) { + self.pending_requests.remove(&height); + } + + pub fn has_pending_request(&self, height: &Ctx::Height) -> bool { + self.pending_requests.contains_key(height) + } +} diff --git a/code/crates/blocksync/src/types.rs b/code/crates/blocksync/src/types.rs new file mode 100644 index 000000000..cabdd96bf --- /dev/null +++ b/code/crates/blocksync/src/types.rs @@ -0,0 +1,69 @@ +use bytes::Bytes; +use derive_where::derive_where; +use displaydoc::Display; +use serde::{Deserialize, Serialize}; + +use malachite_common::{Certificate, Context, SignedProposal}; + +pub use libp2p::identity::PeerId; +pub use libp2p::request_response::{InboundRequestId, OutboundRequestId}; + +pub type ResponseChannel = libp2p::request_response::ResponseChannel; + +#[derive(Display)] +#[displaydoc("Status {{ peer_id: {peer_id}, height: {height} }}")] +#[derive_where(Clone, Debug, PartialEq, Eq)] +pub struct Status { + pub peer_id: PeerId, + pub height: Ctx::Height, + pub earliest_block_height: Ctx::Height, +} + +#[derive_where(Clone, Debug, PartialEq, Eq)] +pub struct Request { + pub height: Ctx::Height, +} + +impl Request { + pub fn new(height: Ctx::Height) -> Self { + Self { height } + } +} + +#[derive_where(Clone, Debug, PartialEq, Eq)] +pub struct Response { + pub height: Ctx::Height, + pub block: Option>, +} + +impl Response { + pub fn new(height: Ctx::Height, block: Option>) -> Self { + Self { height, block } + } +} + +#[derive_where(Clone, Debug, PartialEq, Eq)] +pub struct SyncedBlock { + pub proposal: SignedProposal, + pub certificate: Certificate, + pub block_bytes: Bytes, +} + +#[derive(Clone, Debug)] +pub enum RawMessage { + Request { + request_id: InboundRequestId, + peer: PeerId, + body: Bytes, + }, + Response { + request_id: OutboundRequestId, + body: Bytes, + }, +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct RawRequest(pub Bytes); + +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct RawResponse(pub Bytes); diff --git a/code/crates/cli/src/args.rs b/code/crates/cli/src/args.rs index e7921e2ae..1f0e6b25b 100644 --- a/code/crates/cli/src/args.rs +++ b/code/crates/cli/src/args.rs @@ -61,7 +61,7 @@ pub enum Commands { impl Default for Commands { fn default() -> Self { - Commands::Start(StartCmd) + Commands::Start(StartCmd::default()) } } diff --git a/code/crates/cli/src/cmd/start.rs b/code/crates/cli/src/cmd/start.rs index 02862b595..d4868226b 100644 --- a/code/crates/cli/src/cmd/start.rs +++ b/code/crates/cli/src/cmd/start.rs @@ -11,7 +11,10 @@ use malachite_starknet_app::node::StarknetNode; use crate::metrics; #[derive(Parser, Debug, Clone, Default, PartialEq)] -pub struct StartCmd; +pub struct StartCmd { + #[clap(long)] + start_height: Option, +} impl StartCmd { pub async fn run( @@ -42,7 +45,10 @@ impl StartCmd { let (actor, handle) = match cfg.app { App::Starknet => { use malachite_starknet_app::spawn::spawn_node_actor; - spawn_node_actor(cfg, genesis, private_key, None).await + let start_height = self + .start_height + .map(|height| malachite_starknet_app::types::Height::new(height, 1)); + spawn_node_actor(cfg, genesis, private_key, start_height, None).await } }; diff --git a/code/crates/cli/src/cmd/testnet.rs b/code/crates/cli/src/cmd/testnet.rs index 1e7e4dd9d..039cec714 100644 --- a/code/crates/cli/src/cmd/testnet.rs +++ b/code/crates/cli/src/cmd/testnet.rs @@ -20,8 +20,8 @@ use malachite_starknet_app::node::StarknetNode; use crate::args::Args; use crate::cmd::init::{save_config, save_genesis, save_priv_validator_key}; -const MIN_VOTING_POWER: u64 = 8; -const MAX_VOTING_POWER: u64 = 15; +const MIN_VOTING_POWER: u64 = 1; +const MAX_VOTING_POWER: u64 = 1; #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum RuntimeFlavour { @@ -75,7 +75,7 @@ pub struct TestnetCmd { /// Enable peer discovery. /// If enabled, the node will attempt to discover other nodes in the network - #[clap(long, default_value = "true")] + #[clap(long, default_value = "false")] pub enable_discovery: bool, /// The transport protocol to use for P2P communication @@ -252,12 +252,13 @@ pub fn generate_config( .filter(|j| *j != index) .map(|j| transport.multiaddr("127.0.0.1", MEMPOOL_BASE_PORT + j)) .collect(), - discovery: DiscoveryConfig { enabled: true }, + discovery: DiscoveryConfig { enabled: false }, transport, }, max_tx_count: 10000, gossip_batch_size: 0, }, + blocksync: Default::default(), metrics: MetricsConfig { enabled: true, listen_addr: format!("127.0.0.1:{metrics_port}").parse().unwrap(), diff --git a/code/crates/common/src/certificate.rs b/code/crates/common/src/certificate.rs new file mode 100644 index 000000000..708079975 --- /dev/null +++ b/code/crates/common/src/certificate.rs @@ -0,0 +1,19 @@ +use alloc::vec::Vec; +use derive_where::derive_where; + +use crate::{Context, SignedVote}; + +/// A certificate is a collection of commits +/// TODO - will optimize later +#[derive_where(Clone, Debug, PartialEq, Eq)] +pub struct Certificate { + /// The commits + pub commits: Vec>, +} + +impl Certificate { + /// Creates a certificate + pub fn new(commits: Vec>) -> Self { + Self { commits } + } +} diff --git a/code/crates/common/src/height.rs b/code/crates/common/src/height.rs index c4722604e..f87554e1d 100644 --- a/code/crates/common/src/height.rs +++ b/code/crates/common/src/height.rs @@ -11,7 +11,12 @@ where Default + Copy + Clone + Debug + Display + PartialEq + Eq + PartialOrd + Ord + Send + Sync, { /// Increment the height by one. - fn increment(&self) -> Self; + fn increment(&self) -> Self { + self.increment_by(1) + } + + /// Increment this height by the given amount. + fn increment_by(&self, n: u64) -> Self; /// Convert the height to a `u64`. fn as_u64(&self) -> u64; diff --git a/code/crates/common/src/lib.rs b/code/crates/common/src/lib.rs index f97194271..03fe19b81 100644 --- a/code/crates/common/src/lib.rs +++ b/code/crates/common/src/lib.rs @@ -16,6 +16,7 @@ extern crate alloc; +mod certificate; mod context; mod height; mod proposal; @@ -49,6 +50,7 @@ pub type SignedProposal = SignedMessage::Proposal>; /// A signed proposal part pub type SignedProposalPart = SignedMessage::ProposalPart>; +pub use certificate::Certificate; pub use context::Context; pub use height::Height; pub use proposal::{Proposal, Validity}; diff --git a/code/crates/config/src/lib.rs b/code/crates/config/src/lib.rs index 9c4db7269..193497c14 100644 --- a/code/crates/config/src/lib.rs +++ b/code/crates/config/src/lib.rs @@ -56,6 +56,9 @@ pub struct Config { /// Mempool configuration options pub mempool: MempoolConfig, + /// BlockSync configuration options + pub blocksync: BlockSyncConfig, + /// Metrics configuration options pub metrics: MetricsConfig, @@ -266,6 +269,30 @@ pub struct MempoolConfig { pub gossip_batch_size: usize, } +#[derive(Copy, Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct BlockSyncConfig { + /// Enable BlockSync + pub enabled: bool, + + /// Interval at which to update other peers of our status + #[serde(with = "humantime_serde")] + pub status_update_interval: Duration, + + /// Timeout duration for block sync requests + #[serde(with = "humantime_serde")] + pub request_timeout: Duration, +} + +impl Default for BlockSyncConfig { + fn default() -> Self { + Self { + enabled: true, + status_update_interval: Duration::from_secs(10), + request_timeout: Duration::from_secs(10), + } + } +} + /// Consensus configuration options #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ConsensusConfig { @@ -394,6 +421,7 @@ pub struct TestConfig { pub time_allowance_factor: f32, #[serde(with = "humantime_serde")] pub exec_time_per_tx: Duration, + pub max_retain_blocks: usize, #[serde(default)] pub vote_extensions: VoteExtensionsConfig, } @@ -405,6 +433,7 @@ impl Default for TestConfig { txs_per_part: 256, time_allowance_factor: 0.5, exec_time_per_tx: Duration::from_millis(1), + max_retain_blocks: 1000, vote_extensions: VoteExtensionsConfig::default(), } } diff --git a/code/crates/consensus/Cargo.toml b/code/crates/consensus/Cargo.toml index 61dfd18d4..9263b9ca0 100644 --- a/code/crates/consensus/Cargo.toml +++ b/code/crates/consensus/Cargo.toml @@ -17,6 +17,7 @@ malachite-driver.workspace = true malachite-metrics.workspace = true async-recursion = { workspace = true } +bytes = { workspace = true, features = ["serde"] } genawaiter = { workspace = true } derive-where = { workspace = true } libp2p-identity = { workspace = true, features = ["peerid"] } diff --git a/code/crates/consensus/src/effect.rs b/code/crates/consensus/src/effect.rs index 8a5f8ebcc..a95d60329 100644 --- a/code/crates/consensus/src/effect.rs +++ b/code/crates/consensus/src/effect.rs @@ -1,3 +1,4 @@ +use bytes::Bytes; use derive_where::derive_where; use malachite_common::*; @@ -57,11 +58,16 @@ where /// Consensus has decided on a value /// Resume with: [`Resume::Continue`] Decide { - height: Ctx::Height, - round: Round, - value: Ctx::Value, + proposal: SignedProposal, commits: Vec>, }, + + /// Consensus has received a synced decided block + /// Resume with: [`Resume::Continue`] + SyncedBlock { + proposal: SignedProposal, + block_bytes: Bytes, + }, } /// A value with which the consensus process can be resumed after yielding an [`Effect`]. diff --git a/code/crates/consensus/src/handle.rs b/code/crates/consensus/src/handle.rs index a3633efe8..e1173699c 100644 --- a/code/crates/consensus/src/handle.rs +++ b/code/crates/consensus/src/handle.rs @@ -7,6 +7,7 @@ mod propose_value; mod received_proposed_value; mod signature; mod start_height; +mod synced_block; mod timeout; mod validator_set; mod vote; @@ -15,6 +16,7 @@ use proposal::on_proposal; use propose_value::propose_value; use received_proposed_value::on_received_proposed_value; use start_height::reset_and_start_height; +use synced_block::on_received_synced_block; use timeout::on_timeout_elapsed; use vote::on_vote; @@ -53,5 +55,8 @@ where Input::ReceivedProposedValue(value) => { on_received_proposed_value(co, state, metrics, value).await } + Input::ReceivedSyncedBlock(proposal, commits, block_bytes) => { + on_received_synced_block(co, state, metrics, proposal, commits, block_bytes).await + } } } diff --git a/code/crates/consensus/src/handle/decide.rs b/code/crates/consensus/src/handle/decide.rs index c6b562911..2cad97faa 100644 --- a/code/crates/consensus/src/handle/decide.rs +++ b/code/crates/consensus/src/handle/decide.rs @@ -5,7 +5,7 @@ pub async fn decide( state: &mut State, metrics: &Metrics, consensus_round: Round, - proposal: Ctx::Proposal, + proposal: SignedProposal, ) -> Result<(), Error> where Ctx: Context, @@ -22,7 +22,7 @@ where // Update metrics { - // We are only interesting in consensus time for round 0, ie. in the happy path. + // We are only interested in consensus time for round 0, ie. in the happy path. if consensus_round == Round::new(0) { metrics.consensus_end(); } @@ -46,15 +46,7 @@ where } } - perform!( - co, - Effect::Decide { - height, - round: proposal_round, - value: value.clone(), - commits - } - ); + perform!(co, Effect::Decide { proposal, commits }); // Reinitialize to remove any previous round or equivocating precommits. // TODO: Revise when evidence module is added. diff --git a/code/crates/consensus/src/handle/driver.rs b/code/crates/consensus/src/handle/driver.rs index daff563c4..3a682b0bf 100644 --- a/code/crates/consensus/src/handle/driver.rs +++ b/code/crates/consensus/src/handle/driver.rs @@ -3,6 +3,7 @@ use malachite_driver::Input as DriverInput; use malachite_driver::Output as DriverOutput; use crate::handle::on_proposal; +use crate::handle::vote::on_vote; use crate::types::SignedConsensusMsg; use crate::util::pretty::PrettyVal; @@ -163,21 +164,19 @@ where Effect::Broadcast(SignedConsensusMsg::Vote(signed_vote.clone())) ); - apply_driver_input(co, state, metrics, DriverInput::Vote(signed_vote)).await + on_vote(co, state, metrics, signed_vote).await } DriverOutput::Decide(consensus_round, proposal) => { - // TODO: Remove proposal, votes, block for the round info!( round = %consensus_round, - ?proposal, + height = %proposal.height(), + value = %proposal.value().id(), "Decided", ); // Store value decided on for retrieval when timeout commit elapses - state - .decision - .insert((state.driver.height(), consensus_round), proposal.clone()); + state.store_decision(state.driver.height(), consensus_round, proposal.clone()); perform!( co, diff --git a/code/crates/consensus/src/handle/proposal.rs b/code/crates/consensus/src/handle/proposal.rs index a5bed171f..f8447f01e 100644 --- a/code/crates/consensus/src/handle/proposal.rs +++ b/code/crates/consensus/src/handle/proposal.rs @@ -57,12 +57,13 @@ where } if proposal_height > consensus_height { - debug!("Received proposal for higher height, queuing for later"); - - state - .input_queue - .push_back(Input::Proposal(signed_proposal)); + if consensus_height.increment() == proposal_height { + debug!("Received proposal for next height, queuing for later"); + state + .input_queue + .push_back(Input::Proposal(signed_proposal)); + } return Ok(()); } @@ -139,6 +140,7 @@ where "Received proposal from a non-proposer" ); + // TODO - why when we replay proposals the proposer is wrong return Ok(false); }; diff --git a/code/crates/consensus/src/handle/received_proposed_value.rs b/code/crates/consensus/src/handle/received_proposed_value.rs index 8895fd0db..8e80d4cee 100644 --- a/code/crates/consensus/src/handle/received_proposed_value.rs +++ b/code/crates/consensus/src/handle/received_proposed_value.rs @@ -27,10 +27,12 @@ where } if state.driver.height() < proposed_value.height { - debug!("Received value for higher height, queuing for later"); - state - .input_queue - .push_back(Input::ReceivedProposedValue(proposed_value)); + if state.driver.height().increment() == proposed_value.height { + debug!("Received value for next height, queuing for later"); + state + .input_queue + .push_back(Input::ReceivedProposedValue(proposed_value)); + } return Ok(()); } diff --git a/code/crates/consensus/src/handle/start_height.rs b/code/crates/consensus/src/handle/start_height.rs index aad102564..228815613 100644 --- a/code/crates/consensus/src/handle/start_height.rs +++ b/code/crates/consensus/src/handle/start_height.rs @@ -38,7 +38,11 @@ where let round = Round::new(0); info!(%height, "Starting new height"); - let proposer = state.get_proposer(height, round).clone(); + metrics.block_start(); + metrics.height.set(height.as_u64() as i64); + metrics.round.set(round.as_i64()); + + let proposer = state.get_proposer(height, round); apply_driver_input( co, @@ -48,12 +52,6 @@ where ) .await?; - metrics.block_start(); - metrics.height.set(height.as_u64() as i64); - metrics.round.set(round.as_i64()); - - perform!(co, Effect::StartRound(height, round, proposer)); - replay_pending_msgs(co, state, metrics).await?; Ok(()) diff --git a/code/crates/consensus/src/handle/synced_block.rs b/code/crates/consensus/src/handle/synced_block.rs new file mode 100644 index 000000000..1433f6a91 --- /dev/null +++ b/code/crates/consensus/src/handle/synced_block.rs @@ -0,0 +1,38 @@ +use crate::handle::proposal::on_proposal; +use crate::handle::vote::on_vote; +use crate::prelude::*; +use bytes::Bytes; + +pub async fn on_received_synced_block( + co: &Co, + state: &mut State, + metrics: &Metrics, + proposal: SignedProposal, + certificate: Certificate, + block_bytes: Bytes, +) -> Result<(), Error> +where + Ctx: Context, +{ + debug!( + proposal.height = %proposal.height(), + commits = certificate.commits.len(), + "Processing certificate" + ); + + on_proposal(co, state, metrics, proposal.clone()).await?; + + for commit in certificate.commits { + on_vote(co, state, metrics, commit).await?; + } + + perform!( + co, + Effect::SyncedBlock { + proposal, + block_bytes, + } + ); + + Ok(()) +} diff --git a/code/crates/consensus/src/handle/vote.rs b/code/crates/consensus/src/handle/vote.rs index e9b919f39..27efded2f 100644 --- a/code/crates/consensus/src/handle/vote.rs +++ b/code/crates/consensus/src/handle/vote.rs @@ -60,14 +60,16 @@ where } if consensus_height < vote_height { - debug!( - consensus.height = %consensus_height, - vote.height = %vote_height, - validator = %validator_address, - "Received vote for higher height, queuing for later" - ); - - state.input_queue.push_back(Input::Vote(signed_vote)); + if consensus_height.increment() == vote_height { + debug!( + consensus.height = %consensus_height, + vote.height = %vote_height, + validator = %validator_address, + "Received vote for next height, queuing for later" + ); + + state.input_queue.push_back(Input::Vote(signed_vote)); + } return Ok(()); } diff --git a/code/crates/consensus/src/input.rs b/code/crates/consensus/src/input.rs index 4b1e25835..bbb16e901 100644 --- a/code/crates/consensus/src/input.rs +++ b/code/crates/consensus/src/input.rs @@ -1,5 +1,9 @@ +use bytes::Bytes; use derive_where::derive_where; -use malachite_common::{Context, Extension, Round, SignedProposal, SignedVote, Timeout}; + +use malachite_common::{ + Certificate, Context, Extension, Round, SignedProposal, SignedVote, Timeout, +}; use crate::types::ProposedValue; @@ -26,4 +30,7 @@ where /// The value corresponding to a proposal has been received ReceivedProposedValue(ProposedValue), + + /// A block received via BlockSync + ReceivedSyncedBlock(SignedProposal, Certificate, Bytes), } diff --git a/code/crates/consensus/src/state.rs b/code/crates/consensus/src/state.rs index 0eafea288..3a9b865ce 100644 --- a/code/crates/consensus/src/state.rs +++ b/code/crates/consensus/src/state.rs @@ -1,4 +1,5 @@ -use std::collections::{BTreeMap, VecDeque}; +use std::collections::{BTreeMap, BTreeSet, VecDeque}; +use tracing::debug; use malachite_common::*; use malachite_driver::Driver; @@ -21,17 +22,17 @@ where pub driver: Driver, /// A queue of inputs that were received before the - /// driver started the new height and was still at round Nil. + /// driver started the new height. pub input_queue: VecDeque>, /// The proposals to decide on. pub full_proposal_keeper: FullProposalKeeper, /// Store Precommit votes to be sent along the decision to the host - pub signed_precommits: BTreeMap<(Ctx::Height, Round), Vec>>, + pub signed_precommits: BTreeMap<(Ctx::Height, Round), BTreeSet>>, /// Decision per height - pub decision: BTreeMap<(Ctx::Height, Round), Ctx::Proposal>, + pub decision: BTreeMap<(Ctx::Height, Round), SignedProposal>, } impl State @@ -72,7 +73,20 @@ where self.signed_precommits .entry((height, round)) .or_default() - .push(precommit); + .insert(precommit); + } + + pub fn store_decision(&mut self, height: Ctx::Height, round: Round, proposal: Ctx::Proposal) { + if let Some(full_proposal) = self.full_proposal_keeper.full_proposal_at_round_and_value( + &height, + proposal.round(), + &proposal.value().id(), + ) { + self.decision.insert( + (self.driver.height(), round), + full_proposal.proposal.clone(), + ); + } } pub fn restore_precommits( @@ -82,16 +96,17 @@ where value: &Ctx::Value, ) -> Vec> { // Get the commits for the height and round. - let mut commits_for_height_and_round = self + let commits_for_height_and_round = self .signed_precommits .remove(&(height, round)) .unwrap_or_default(); // Keep the commits for the specified value. - // For now we ignore equivocating votes if present. - commits_for_height_and_round.retain(|c| c.value() == &NilOrVal::Val(value.id())); - + // For now, we ignore equivocating votes if present. commits_for_height_and_round + .into_iter() + .filter(|c| c.value() == &NilOrVal::Val(value.id())) + .collect() } pub fn full_proposal_at_round_and_value( @@ -123,6 +138,7 @@ where } pub fn remove_full_proposals(&mut self, height: Ctx::Height) { + debug!("Removing proposals for {height}"); self.full_proposal_keeper.remove_full_proposals(height) } diff --git a/code/crates/gossip-consensus/Cargo.toml b/code/crates/gossip-consensus/Cargo.toml index ebf5d0f55..0772052cb 100644 --- a/code/crates/gossip-consensus/Cargo.toml +++ b/code/crates/gossip-consensus/Cargo.toml @@ -11,10 +11,12 @@ workspace = true [dependencies] malachite-metrics = { workspace = true } +malachite-blocksync = { workspace = true } malachite-discovery = { workspace = true } bytes = { workspace = true } either = { workspace = true } +eyre = { workspace = true } futures = { workspace = true } libp2p = { workspace = true } libp2p-broadcast = { workspace = true } diff --git a/code/crates/gossip-consensus/src/behaviour.rs b/code/crates/gossip-consensus/src/behaviour.rs index 18c9261a2..6ccafe786 100644 --- a/code/crates/gossip-consensus/src/behaviour.rs +++ b/code/crates/gossip-consensus/src/behaviour.rs @@ -9,10 +9,12 @@ use libp2p_broadcast as broadcast; pub use libp2p::identity::Keypair; pub use libp2p::{Multiaddr, PeerId}; + +use malachite_blocksync as blocksync; use malachite_discovery as discovery; use malachite_metrics::Registry; -use crate::{GossipSubConfig, PubSubProtocol, PROTOCOL_VERSION}; +use crate::{GossipSubConfig, PubSubProtocol, PROTOCOL}; const MAX_TRANSMIT_SIZE: usize = 4 * 1024 * 1024; // 4 MiB @@ -22,6 +24,7 @@ pub enum NetworkEvent { Ping(ping::Event), GossipSub(gossipsub::Event), Broadcast(broadcast::Event), + BlockSync(blocksync::Event), RequestResponse(discovery::Event), } @@ -49,6 +52,12 @@ impl From for NetworkEvent { } } +impl From for NetworkEvent { + fn from(event: blocksync::Event) -> Self { + Self::BlockSync(event) + } +} + impl From for NetworkEvent { fn from(event: discovery::Event) -> Self { Self::RequestResponse(event) @@ -74,6 +83,7 @@ pub struct Behaviour { pub identify: identify::Behaviour, pub ping: ping::Behaviour, pub pubsub: Either, + pub blocksync: blocksync::Behaviour, pub request_response: Toggle, } @@ -131,7 +141,7 @@ impl Behaviour { registry: &mut Registry, ) -> Self { let identify = identify::Behaviour::new(identify::Config::new( - PROTOCOL_VERSION.to_string(), + PROTOCOL.to_string(), keypair.public(), )); @@ -155,12 +165,16 @@ impl Behaviour { )), }; + let blocksync = + blocksync::Behaviour::new_with_metrics(registry.sub_registry_with_prefix("blocksync")); + let request_response = Toggle::from(discovery.enabled.then(discovery::new_behaviour)); Self { identify, ping, pubsub, + blocksync, request_response, } } diff --git a/code/crates/gossip-consensus/src/channel.rs b/code/crates/gossip-consensus/src/channel.rs index 901663f99..ab020a961 100644 --- a/code/crates/gossip-consensus/src/channel.rs +++ b/code/crates/gossip-consensus/src/channel.rs @@ -8,11 +8,16 @@ use serde::{Deserialize, Serialize}; pub enum Channel { Consensus, ProposalParts, + BlockSync, } impl Channel { pub fn all() -> &'static [Channel] { - &[Channel::Consensus, Channel::ProposalParts] + &[ + Channel::Consensus, + Channel::ProposalParts, + Channel::BlockSync, + ] } pub fn to_gossipsub_topic(self) -> gossipsub::IdentTopic { @@ -27,6 +32,7 @@ impl Channel { match self { Channel::Consensus => "/consensus", Channel::ProposalParts => "/proposal_parts", + Channel::BlockSync => "/block_sync", } } @@ -46,6 +52,7 @@ impl Channel { match topic.as_str() { "/consensus" => Some(Channel::Consensus), "/proposal_parts" => Some(Channel::ProposalParts), + "/block_sync" => Some(Channel::BlockSync), _ => None, } } @@ -54,6 +61,7 @@ impl Channel { match topic.as_ref() { b"/consensus" => Some(Channel::Consensus), b"/proposal_parts" => Some(Channel::ProposalParts), + b"/block_sync" => Some(Channel::BlockSync), _ => None, } } diff --git a/code/crates/gossip-consensus/src/handle.rs b/code/crates/gossip-consensus/src/handle.rs index 1f79cdcde..522cfd299 100644 --- a/code/crates/gossip-consensus/src/handle.rs +++ b/code/crates/gossip-consensus/src/handle.rs @@ -1,69 +1,113 @@ use bytes::Bytes; -use tokio::sync::mpsc; +use libp2p::request_response::InboundRequestId; +use libp2p::PeerId; +use malachite_blocksync::OutboundRequestId; +use tokio::sync::{mpsc, oneshot}; use tokio::task; -use crate::{BoxError, Channel, CtrlMsg, Event}; +use crate::{Channel, CtrlMsg, Event}; pub struct RecvHandle { + peer_id: PeerId, rx_event: mpsc::Receiver, } impl RecvHandle { + pub fn peer_id(&self) -> PeerId { + self.peer_id + } + pub async fn recv(&mut self) -> Option { self.rx_event.recv().await } } pub struct CtrlHandle { + peer_id: PeerId, tx_ctrl: mpsc::Sender, task_handle: task::JoinHandle<()>, } impl CtrlHandle { - pub async fn broadcast(&self, channel: Channel, data: Bytes) -> Result<(), BoxError> { + pub fn peer_id(&self) -> PeerId { + self.peer_id + } + + pub async fn publish(&self, channel: Channel, data: Bytes) -> Result<(), eyre::Report> { + self.tx_ctrl.send(CtrlMsg::Publish(channel, data)).await?; + Ok(()) + } + + pub async fn blocksync_request( + &self, + peer_id: PeerId, + data: Bytes, + ) -> Result { + let (tx, rx) = oneshot::channel(); + self.tx_ctrl - .send(CtrlMsg::BroadcastMsg(channel, data)) + .send(CtrlMsg::BlockSyncRequest(peer_id, data, tx)) + .await?; + + Ok(rx.await?) + } + + pub async fn blocksync_reply( + &self, + request_id: InboundRequestId, + data: Bytes, + ) -> Result<(), eyre::Report> { + self.tx_ctrl + .send(CtrlMsg::BlockSyncReply(request_id, data)) .await?; Ok(()) } - pub async fn wait_shutdown(self) -> Result<(), BoxError> { + pub async fn wait_shutdown(self) -> Result<(), eyre::Report> { self.shutdown().await?; self.join().await?; Ok(()) } - pub async fn shutdown(&self) -> Result<(), BoxError> { + pub async fn shutdown(&self) -> Result<(), eyre::Report> { self.tx_ctrl.send(CtrlMsg::Shutdown).await?; Ok(()) } - pub async fn join(self) -> Result<(), BoxError> { + pub async fn join(self) -> Result<(), eyre::Report> { self.task_handle.await?; Ok(()) } } pub struct Handle { + peer_id: PeerId, recv: RecvHandle, ctrl: CtrlHandle, } impl Handle { pub fn new( + peer_id: PeerId, tx_ctrl: mpsc::Sender, rx_event: mpsc::Receiver, task_handle: task::JoinHandle<()>, ) -> Self { Self { - recv: RecvHandle { rx_event }, + peer_id, + recv: RecvHandle { peer_id, rx_event }, ctrl: CtrlHandle { + peer_id, tx_ctrl, task_handle, }, } } + pub fn peer_id(&self) -> PeerId { + self.peer_id + } + pub fn split(self) -> (RecvHandle, CtrlHandle) { (self.recv, self.ctrl) } @@ -72,19 +116,19 @@ impl Handle { self.recv.recv().await } - pub async fn broadcast(&self, channel: Channel, data: Bytes) -> Result<(), BoxError> { - self.ctrl.broadcast(channel, data).await + pub async fn broadcast(&self, channel: Channel, data: Bytes) -> Result<(), eyre::Report> { + self.ctrl.publish(channel, data).await } - pub async fn wait_shutdown(self) -> Result<(), BoxError> { + pub async fn wait_shutdown(self) -> Result<(), eyre::Report> { self.ctrl.wait_shutdown().await } - pub async fn shutdown(&self) -> Result<(), BoxError> { + pub async fn shutdown(&self) -> Result<(), eyre::Report> { self.ctrl.shutdown().await } - pub async fn join(self) -> Result<(), BoxError> { + pub async fn join(self) -> Result<(), eyre::Report> { self.ctrl.join().await } } diff --git a/code/crates/gossip-consensus/src/lib.rs b/code/crates/gossip-consensus/src/lib.rs index 4568d3b5d..7fa07aa55 100644 --- a/code/crates/gossip-consensus/src/lib.rs +++ b/code/crates/gossip-consensus/src/lib.rs @@ -2,18 +2,21 @@ #![allow(unexpected_cfgs)] #![cfg_attr(coverage_nightly, feature(coverage_attribute))] +use std::collections::HashMap; use std::error::Error; use std::ops::ControlFlow; use std::time::Duration; use futures::StreamExt; use libp2p::metrics::{Metrics, Recorder}; +use libp2p::request_response::InboundRequestId; use libp2p::swarm::{self, SwarmEvent}; -use libp2p::{gossipsub, identify, SwarmBuilder}; +use libp2p::{gossipsub, identify, quic, SwarmBuilder}; use libp2p_broadcast as broadcast; -use tokio::sync::mpsc; +use tokio::sync::{mpsc, oneshot}; use tracing::{debug, error, error_span, trace, Instrument}; +use malachite_blocksync::{self as blocksync, OutboundRequestId}; use malachite_discovery::{self as discovery, ConnectionData}; use malachite_metrics::SharedRegistry; @@ -32,13 +35,16 @@ pub use channel::Channel; use behaviour::{Behaviour, NetworkEvent}; use handle::Handle; -const PROTOCOL_VERSION: &str = "/malachite-gossip-consensus/v1beta1"; +const PROTOCOL: &str = "/malachite-consensus/v1beta1"; const METRICS_PREFIX: &str = "malachite_gossip_consensus"; const DISCOVERY_METRICS_PREFIX: &str = "malachite_discovery"; #[derive(Copy, Clone, Debug)] pub enum PubSubProtocol { + /// GossipSub: a pubsub protocol based on epidemic broadcast trees GossipSub(GossipSubConfig), + + /// Broadcast: a simple broadcast protocol Broadcast, } @@ -93,9 +99,18 @@ pub struct Config { } impl Config { - fn apply(&self, cfg: swarm::Config) -> swarm::Config { + fn apply_to_swarm(&self, cfg: swarm::Config) -> swarm::Config { cfg.with_idle_connection_timeout(self.idle_connection_timeout) } + + fn apply_to_quic(&self, mut cfg: quic::Config) -> quic::Config { + // NOTE: This is set low due to quic transport not properly resetting + // connection state when reconnecting before connection timeout. + // See https://github.com/libp2p/rust-libp2p/issues/5097 + cfg.max_idle_timeout = 300; + cfg.keep_alive_interval = Duration::from_millis(100); + cfg + } } #[derive(Copy, Clone, Debug, PartialEq, Eq)] @@ -104,32 +119,52 @@ pub enum TransportProtocol { Quic, } +/// Blocksync event details: +/// +/// peer1: blocksync peer2: gossip_consensus peer2: blocksync peer1: gossip_consensus +/// CtrlMsg::BlockSyncRequest --> Event::BlockSync -----------> CtrlMsg::BlockSyncReply ------> Event::BlockSync +/// (peer_id, height) (RawMessage::Request (request_id, height) RawMessage::Response +/// {request_id, peer_id, height} {request_id, block} +/// /// An event that can be emitted by the gossip layer -#[derive(Clone, Debug, PartialEq, Eq)] +#[derive(Clone, Debug)] pub enum Event { Listening(Multiaddr), Message(Channel, PeerId, Bytes), + BlockSync(blocksync::RawMessage), PeerConnected(PeerId), PeerDisconnected(PeerId), } #[derive(Debug)] pub enum CtrlMsg { - BroadcastMsg(Channel, Bytes), + Publish(Channel, Bytes), + BlockSyncRequest(PeerId, Bytes, oneshot::Sender), + BlockSyncReply(InboundRequestId, Bytes), Shutdown, } #[derive(Debug)] pub struct State { + pub blocksync_channels: HashMap, pub discovery: discovery::Discovery, } +impl State { + fn new(discovery: discovery::Discovery) -> Self { + Self { + blocksync_channels: Default::default(), + discovery, + } + } +} + pub async fn spawn( keypair: Keypair, config: Config, registry: SharedRegistry, -) -> Result { - let swarm = registry.with_prefix(METRICS_PREFIX, |registry| -> Result<_, BoxError> { +) -> Result { + let swarm = registry.with_prefix(METRICS_PREFIX, |registry| -> Result<_, eyre::Report> { let builder = SwarmBuilder::with_existing_identity(keypair).with_tokio(); match config.transport { TransportProtocol::Tcp => Ok(builder @@ -143,16 +178,16 @@ pub async fn spawn( .with_behaviour(|kp| { Behaviour::new_with_metrics(config.protocol, kp, config.discovery, registry) })? - .with_swarm_config(|cfg| config.apply(cfg)) + .with_swarm_config(|cfg| config.apply_to_swarm(cfg)) .build()), TransportProtocol::Quic => Ok(builder - .with_quic() + .with_quic_config(|cfg| config.apply_to_quic(cfg)) .with_dns()? .with_bandwidth_metrics(registry) .with_behaviour(|kp| { Behaviour::new_with_metrics(config.protocol, kp, config.discovery, registry) })? - .with_swarm_config(|cfg| config.apply(cfg)) + .with_swarm_config(|cfg| config.apply_to_swarm(cfg)) .build()), } })?; @@ -162,20 +197,18 @@ pub async fn spawn( let (tx_event, rx_event) = mpsc::channel(32); let (tx_ctrl, rx_ctrl) = mpsc::channel(32); - let state = registry.with_prefix(DISCOVERY_METRICS_PREFIX, |reg| State { - discovery: discovery::Discovery::new( - config.discovery, - config.persistent_peers.clone(), - reg, - ), + let discovery = registry.with_prefix(DISCOVERY_METRICS_PREFIX, |reg| { + discovery::Discovery::new(config.discovery, config.persistent_peers.clone(), reg) }); - let peer_id = swarm.local_peer_id(); + let state = State::new(discovery); + + let peer_id = *swarm.local_peer_id(); let span = error_span!("gossip.consensus", peer = %peer_id); let task_handle = tokio::task::spawn(run(config, metrics, state, swarm, rx_ctrl, tx_event).instrument(span)); - Ok(Handle::new(tx_ctrl, rx_event, task_handle)) + Ok(Handle::new(peer_id, tx_ctrl, rx_event, task_handle)) } async fn run( @@ -197,7 +230,10 @@ async fn run( .add_to_dial_queue(&swarm, ConnectionData::new(None, persistent_peer)); } - pubsub::subscribe(&mut swarm, Channel::all()).unwrap(); // FIXME: unwrap + if let Err(e) = pubsub::subscribe(&mut swarm, Channel::all()) { + error!("Error subscribing to channels: {e}"); + return; + }; loop { let result = tokio::select! { @@ -216,7 +252,7 @@ async fn run( } Some(ctrl) = rx_ctrl.recv() => { - handle_ctrl_msg(ctrl, &mut swarm).await + handle_ctrl_msg(ctrl, &mut swarm, &mut state).await } }; @@ -227,20 +263,53 @@ async fn run( } } -async fn handle_ctrl_msg(msg: CtrlMsg, swarm: &mut swarm::Swarm) -> ControlFlow<()> { +async fn handle_ctrl_msg( + msg: CtrlMsg, + swarm: &mut swarm::Swarm, + state: &mut State, +) -> ControlFlow<()> { match msg { - CtrlMsg::BroadcastMsg(channel, data) => { + CtrlMsg::Publish(channel, data) => { let msg_size = data.len(); let result = pubsub::publish(swarm, channel, data); match result { - Ok(()) => debug!(%channel, size = %msg_size, "Broadcasted message"), + Ok(()) => debug!(%channel, size = %msg_size, "Published message"), Err(e) => error!(%channel, "Error broadcasting message: {e}"), } ControlFlow::Continue(()) } + CtrlMsg::BlockSyncRequest(peer_id, request, reply_to) => { + let request_id = swarm + .behaviour_mut() + .blocksync + .send_request(peer_id, request); + + if let Err(e) = reply_to.send(request_id) { + error!(%peer_id, "Error sending BlockSync request: {e}"); + } + + ControlFlow::Continue(()) + } + + CtrlMsg::BlockSyncReply(request_id, data) => { + let Some(channel) = state.blocksync_channels.remove(&request_id) else { + error!(%request_id, "Received BlockSync reply for unknown request ID"); + return ControlFlow::Continue(()); + }; + + let result = swarm.behaviour_mut().blocksync.send_response(channel, data); + + match result { + Ok(()) => trace!("Replied to BlockSync request"), + Err(e) => error!("Error replying to BlockSync request: {e}"), + } + + ControlFlow::Continue(()) + } + CtrlMsg::Shutdown => ControlFlow::Break(()), } } @@ -314,7 +383,7 @@ async fn handle_swarm_event( info.protocol_version ); - if info.protocol_version == PROTOCOL_VERSION { + if info.protocol_version == PROTOCOL { trace!( "Peer {peer_id} is using compatible protocol version: {:?}", info.protocol_version @@ -353,6 +422,10 @@ async fn handle_swarm_event( return handle_broadcast_event(event, metrics, swarm, state, tx_event).await; } + SwarmEvent::Behaviour(NetworkEvent::BlockSync(event)) => { + return handle_blocksync_event(event, metrics, swarm, state, tx_event).await; + } + SwarmEvent::Behaviour(NetworkEvent::RequestResponse(event)) => { state.discovery.on_event(event, swarm); } @@ -497,3 +570,76 @@ async fn handle_broadcast_event( ControlFlow::Continue(()) } + +async fn handle_blocksync_event( + event: blocksync::Event, + _metrics: &Metrics, + _swarm: &mut swarm::Swarm, + state: &mut State, + tx_event: &mpsc::Sender, +) -> ControlFlow<()> { + match event { + blocksync::Event::Message { peer, message } => { + match message { + libp2p::request_response::Message::Request { + request_id, + request, + channel, + } => { + state.blocksync_channels.insert(request_id, channel); + + let _ = tx_event + .send(Event::BlockSync(blocksync::RawMessage::Request { + request_id, + peer, + body: request.0, + })) + .await + .map_err(|e| { + error!("Error sending BlockSync request to handle: {e}"); + }); + } + + libp2p::request_response::Message::Response { + request_id, + response, + } => { + let _ = tx_event + .send(Event::BlockSync(blocksync::RawMessage::Response { + request_id, + body: response.0, + })) + .await + .map_err(|e| { + error!("Error sending BlockSync response to handle: {e}"); + }); + } + } + ControlFlow::Continue(()) + } + + blocksync::Event::ResponseSent { peer, request_id } => { + // TODO + let _ = (peer, request_id); + ControlFlow::Continue(()) + } + + blocksync::Event::OutboundFailure { + peer, + request_id, + error, + } => { + let _ = (peer, request_id, error); + ControlFlow::Continue(()) + } + + blocksync::Event::InboundFailure { + peer, + request_id, + error, + } => { + let _ = (peer, request_id, error); + ControlFlow::Continue(()) + } + } +} diff --git a/code/crates/gossip-consensus/src/pubsub.rs b/code/crates/gossip-consensus/src/pubsub.rs index cdfc23bc0..31bb5ba79 100644 --- a/code/crates/gossip-consensus/src/pubsub.rs +++ b/code/crates/gossip-consensus/src/pubsub.rs @@ -3,12 +3,12 @@ use either::Either; use libp2p::swarm; use crate::behaviour::Behaviour; -use crate::{BoxError, Channel}; +use crate::Channel; pub fn subscribe( swarm: &mut swarm::Swarm, channels: &[Channel], -) -> Result<(), BoxError> { +) -> Result<(), eyre::Report> { match &mut swarm.behaviour_mut().pubsub { Either::Left(gossipsub) => { for channel in channels { @@ -29,7 +29,7 @@ pub fn publish( swarm: &mut swarm::Swarm, channel: Channel, data: Bytes, -) -> Result<(), BoxError> { +) -> Result<(), eyre::Report> { match &mut swarm.behaviour_mut().pubsub { Either::Left(gossipsub) => { gossipsub.publish(channel.to_gossipsub_topic(), data)?; diff --git a/code/crates/gossip-consensus/test/src/lib.rs b/code/crates/gossip-consensus/test/src/lib.rs index f2788963e..f4ca38602 100644 --- a/code/crates/gossip-consensus/test/src/lib.rs +++ b/code/crates/gossip-consensus/test/src/lib.rs @@ -324,5 +324,8 @@ fn init_logging() { .with_thread_ids(false); let subscriber = builder.finish(); - subscriber.init(); + + if let Err(e) = subscriber.try_init() { + eprintln!("Failed to initialize logging: {e}"); + } } diff --git a/code/crates/gossip-mempool/build.rs b/code/crates/gossip-mempool/build.rs index 2481d6c43..9a3b7ef38 100644 --- a/code/crates/gossip-mempool/build.rs +++ b/code/crates/gossip-mempool/build.rs @@ -3,11 +3,7 @@ use std::io::Result; fn main() -> Result<()> { let mut config = prost_build::Config::new(); config.enable_type_names(); - config.extern_path(".malachite.common", "::malachite_common::proto"); - config.compile_protos( - &["proto/malachite.mempool.proto"], - &["proto", "../common/proto"], - )?; + config.compile_protos(&["proto/malachite.mempool.proto"], &["proto"])?; Ok(()) } diff --git a/code/crates/gossip-mempool/src/behaviour.rs b/code/crates/gossip-mempool/src/behaviour.rs index 7bdcbb0a0..80271fa58 100644 --- a/code/crates/gossip-mempool/src/behaviour.rs +++ b/code/crates/gossip-mempool/src/behaviour.rs @@ -8,7 +8,7 @@ pub use libp2p::{Multiaddr, PeerId}; use malachite_metrics::Registry; -use crate::PROTOCOL_VERSION; +use crate::PROTOCOL; const MAX_TRANSMIT_SIZE: usize = 4 * 1024 * 1024; // 4 MiB @@ -50,7 +50,7 @@ impl Behaviour { pub fn new(keypair: &Keypair) -> Self { Self { identify: identify::Behaviour::new(identify::Config::new( - PROTOCOL_VERSION.to_string(), + PROTOCOL.to_string(), keypair.public(), )), ping: ping::Behaviour::new(ping::Config::new().with_interval(Duration::from_secs(5))), @@ -65,7 +65,7 @@ impl Behaviour { pub fn new_with_metrics(keypair: &Keypair, registry: &mut Registry) -> Self { Self { identify: identify::Behaviour::new(identify::Config::new( - PROTOCOL_VERSION.to_string(), + PROTOCOL.to_string(), keypair.public(), )), ping: ping::Behaviour::new(ping::Config::new().with_interval(Duration::from_secs(5))), diff --git a/code/crates/gossip-mempool/src/lib.rs b/code/crates/gossip-mempool/src/lib.rs index cadb612a6..90f50a1e9 100644 --- a/code/crates/gossip-mempool/src/lib.rs +++ b/code/crates/gossip-mempool/src/lib.rs @@ -79,7 +79,7 @@ impl fmt::Display for Channel { } } -const PROTOCOL_VERSION: &str = "malachite-gossip-mempool/v1beta1"; +const PROTOCOL: &str = "/malachite-gossip-mempool/v1beta1"; pub type BoxError = Box; @@ -283,7 +283,7 @@ async fn handle_swarm_event( info.protocol_version ); - if info.protocol_version == PROTOCOL_VERSION { + if info.protocol_version == PROTOCOL { trace!( "Peer {peer_id} is using compatible protocol version: {:?}", info.protocol_version diff --git a/code/crates/starknet/app/Cargo.toml b/code/crates/starknet/app/Cargo.toml index 2e2b15829..6f3fafcb4 100644 --- a/code/crates/starknet/app/Cargo.toml +++ b/code/crates/starknet/app/Cargo.toml @@ -9,6 +9,7 @@ rust-version.workspace = true [dependencies] malachite-actors.workspace = true +malachite-blocksync.workspace = true malachite-common.workspace = true malachite-consensus.workspace = true malachite-gossip-consensus.workspace = true @@ -27,6 +28,7 @@ bytesize.workspace = true libp2p-identity.workspace = true prost.workspace = true rand.workspace = true +serde.workspace = true serde_json.workspace = true tokio.workspace = true tracing-subscriber.workspace = true diff --git a/code/crates/starknet/app/src/codec.rs b/code/crates/starknet/app/src/codec.rs index 70d8ca0d4..5fc286669 100644 --- a/code/crates/starknet/app/src/codec.rs +++ b/code/crates/starknet/app/src/codec.rs @@ -1,22 +1,173 @@ -use malachite_gossip_consensus::Bytes; use prost::Message; +use blocksync::Status; use malachite_actors::util::codec::NetworkCodec; use malachite_actors::util::streaming::{StreamContent, StreamMessage}; -use malachite_common::{SignedProposal, SignedVote}; +use malachite_common::{Certificate, SignedProposal, SignedVote}; use malachite_consensus::SignedConsensusMsg; +use malachite_gossip_consensus::Bytes; use malachite_proto::{Error as ProtoError, Protobuf}; use malachite_starknet_host::mock::context::MockContext; -use malachite_starknet_host::types::Vote; +use malachite_starknet_host::types::{Proposal, Vote}; use malachite_starknet_p2p_proto::consensus_message::Messages; use malachite_starknet_p2p_proto::ConsensusMessage; -use malachite_starknet_p2p_types as p2p; +use malachite_starknet_p2p_proto::{self as proto}; +use malachite_starknet_p2p_types::{self as p2p, Height}; + +use malachite_blocksync as blocksync; pub struct ProtobufCodec; -impl NetworkCodec for ProtobufCodec { +impl blocksync::NetworkCodec for ProtobufCodec { type Error = ProtoError; + fn decode_status(bytes: Bytes) -> Result, Self::Error> { + let status = + proto::blocksync::Status::decode(bytes.as_ref()).map_err(ProtoError::Decode)?; + + let peer_id = status + .peer_id + .ok_or_else(|| ProtoError::missing_field::("peer_id"))?; + + Ok(Status { + peer_id: libp2p_identity::PeerId::from_bytes(&peer_id.id) + .map_err(|e| ProtoError::Other(e.to_string()))?, + height: Height::new(status.block_number, status.fork_id), + earliest_block_height: Height::new( + status.earliest_block_number, + status.earliest_fork_id, + ), + }) + } + + fn encode_status(status: Status) -> Result { + let proto = proto::blocksync::Status { + peer_id: Some(proto::PeerId { + id: Bytes::from(status.peer_id.to_bytes()), + }), + block_number: status.height.block_number, + fork_id: status.height.fork_id, + earliest_block_number: status.earliest_block_height.block_number, + earliest_fork_id: status.earliest_block_height.fork_id, + }; + + Ok(Bytes::from(proto.encode_to_vec())) + } + + fn decode_request(bytes: Bytes) -> Result, Self::Error> { + let request = proto::blocksync::Request::decode(bytes).map_err(ProtoError::Decode)?; + + Ok(blocksync::Request { + height: Height::new(request.block_number, request.fork_id), + }) + } + + fn encode_request(request: blocksync::Request) -> Result { + let proto = proto::blocksync::Request { + block_number: request.height.block_number, + fork_id: request.height.fork_id, + }; + + Ok(Bytes::from(proto.encode_to_vec())) + } + + fn decode_response(bytes: Bytes) -> Result, Self::Error> { + fn decode_proposal(msg: ConsensusMessage) -> Option> { + let signature = msg.signature?; + let proposal = match msg.messages { + Some(Messages::Proposal(p)) => Some(p), + _ => None, + }?; + + let signature = p2p::Signature::from_proto(signature).ok()?; + let proposal = Proposal::from_proto(proposal).ok()?; + Some(SignedProposal::new(proposal, signature)) + } + + fn decode_vote(msg: ConsensusMessage) -> Option> { + let signature = msg.signature?; + let vote = match msg.messages { + Some(Messages::Vote(v)) => Some(v), + _ => None, + }?; + + let signature = p2p::Signature::from_proto(signature).ok()?; + let vote = Vote::from_proto(vote).ok()?; + Some(SignedVote::new(vote, signature)) + } + + fn decode_sync_block( + synced_block: proto::blocksync::SyncedBlock, + ) -> Result, ProtoError> { + let commits = synced_block + .commits + .into_iter() + .filter_map(decode_vote) + .collect(); + + let certificate = Certificate::new(commits); + + Ok(blocksync::SyncedBlock { + proposal: decode_proposal(synced_block.proposal.unwrap()) + .ok_or_else(|| ProtoError::missing_field::("proposal"))?, + certificate, + block_bytes: synced_block.block_bytes, + }) + } + + let response = proto::blocksync::Response::decode(bytes).map_err(ProtoError::Decode)?; + + Ok(blocksync::Response { + height: Height::new(response.block_number, response.fork_id), + block: response.block.map(decode_sync_block).transpose()?, + }) + } + + fn encode_response(response: blocksync::Response) -> Result { + fn encode_proposal( + proposal: SignedProposal, + ) -> Result { + Ok(ConsensusMessage { + messages: Some(Messages::Proposal(proposal.message.to_proto()?)), + signature: Some(proposal.signature.to_proto()?), + }) + } + + fn encode_vote(vote: SignedVote) -> Result { + Ok(ConsensusMessage { + messages: Some(Messages::Vote(vote.message.to_proto()?)), + signature: Some(vote.signature.to_proto()?), + }) + } + + fn encode_synced_block( + synced_block: blocksync::SyncedBlock, + ) -> Result { + let commits = synced_block + .certificate + .commits + .into_iter() + .map(encode_vote) + .collect::, _>>()?; + + Ok(proto::blocksync::SyncedBlock { + proposal: Some(encode_proposal(synced_block.proposal)?), + commits, + block_bytes: synced_block.block_bytes, + }) + } + + let proto = proto::blocksync::Response { + block_number: response.height.block_number, + fork_id: response.height.fork_id, + block: response.block.map(encode_synced_block).transpose()?, + }; + + Ok(Bytes::from(proto.encode_to_vec())) + } +} + +impl NetworkCodec for ProtobufCodec { fn decode_msg(bytes: Bytes) -> Result, Self::Error> { let proto = ConsensusMessage::decode(bytes)?; diff --git a/code/crates/starknet/app/src/lib.rs b/code/crates/starknet/app/src/lib.rs index bd31466d2..41eefb173 100644 --- a/code/crates/starknet/app/src/lib.rs +++ b/code/crates/starknet/app/src/lib.rs @@ -5,3 +5,6 @@ pub mod codec; pub mod node; pub mod spawn; + +pub use malachite_starknet_host as host; +pub use malachite_starknet_p2p_types as types; diff --git a/code/crates/starknet/app/src/spawn.rs b/code/crates/starknet/app/src/spawn.rs index 21b6c7649..05ff620b6 100644 --- a/code/crates/starknet/app/src/spawn.rs +++ b/code/crates/starknet/app/src/spawn.rs @@ -1,17 +1,20 @@ use std::time::Duration; use libp2p_identity::ecdsa; -use tokio::sync::mpsc; +use tokio::sync::broadcast; use tokio::task::JoinHandle; +use malachite_actors::block_sync::{BlockSync, BlockSyncRef, Params as BlockSyncParams}; use malachite_actors::consensus::{Consensus, ConsensusParams, ConsensusRef}; use malachite_actors::gossip_consensus::{GossipConsensus, GossipConsensusRef}; use malachite_actors::gossip_mempool::{GossipMempool, GossipMempoolRef}; use malachite_actors::host::HostRef; use malachite_actors::node::{Node, NodeRef}; -use malachite_common::Round; +use malachite_blocksync as blocksync; +use malachite_common::SignedProposal; use malachite_config::{ - Config as NodeConfig, MempoolConfig, PubSubProtocol, TestConfig, TransportProtocol, + BlockSyncConfig, Config as NodeConfig, MempoolConfig, PubSubProtocol, TestConfig, + TransportProtocol, }; use malachite_gossip_consensus::{ Config as GossipConsensusConfig, DiscoveryConfig, GossipSubConfig, Keypair, @@ -23,7 +26,7 @@ use malachite_starknet_host::actor::StarknetHost; use malachite_starknet_host::mempool::{Mempool, MempoolRef}; use malachite_starknet_host::mock::context::MockContext; use malachite_starknet_host::mock::host::{MockHost, MockParams}; -use malachite_starknet_host::types::{Address, BlockHash, Height, PrivateKey, ValidatorSet}; +use malachite_starknet_host::types::{Address, Height, PrivateKey, ValidatorSet}; use crate::codec::ProtobufCodec; @@ -31,10 +34,13 @@ pub async fn spawn_node_actor( cfg: NodeConfig, initial_validator_set: ValidatorSet, private_key: PrivateKey, - tx_decision: Option>, + start_height: Option, + tx_decision: Option>>, ) -> (NodeRef, JoinHandle<()>) { let ctx = MockContext::new(private_key); + let start_height = start_height.unwrap_or(Height::new(1, 1)); + let registry = SharedRegistry::global(); let metrics = Metrics::register(registry); let address = Address::from_public_key(private_key.public_key()); @@ -57,7 +63,15 @@ pub async fn spawn_node_actor( ) .await; - let start_height = Height::new(1, 1); + let block_sync = spawn_block_sync_actor( + ctx.clone(), + gossip_consensus.clone(), + host.clone(), + &cfg.blocksync, + start_height, + registry, + ) + .await; // Spawn consensus let consensus = spawn_consensus_actor( @@ -68,6 +82,7 @@ pub async fn spawn_node_actor( cfg, gossip_consensus.clone(), host.clone(), + block_sync.clone(), metrics, tx_decision, ) @@ -79,6 +94,7 @@ pub async fn spawn_node_actor( gossip_consensus, consensus, gossip_mempool, + block_sync, mempool.get_cell(), host, start_height, @@ -89,6 +105,30 @@ pub async fn spawn_node_actor( (actor_ref, handle) } +async fn spawn_block_sync_actor( + ctx: MockContext, + gossip_consensus: GossipConsensusRef, + host: HostRef, + config: &BlockSyncConfig, + initial_height: Height, + registry: &SharedRegistry, +) -> Option> { + if !config.enabled { + return None; + } + + let params = BlockSyncParams { + status_update_interval: config.status_update_interval, + request_timeout: config.request_timeout, + }; + + let metrics = blocksync::Metrics::register(registry); + let block_sync = BlockSync::new(ctx, gossip_consensus, host, params, metrics); + let (actor_ref, _) = block_sync.spawn(initial_height).await.unwrap(); + + Some(actor_ref) +} + #[allow(clippy::too_many_arguments)] async fn spawn_consensus_actor( start_height: Height, @@ -98,8 +138,9 @@ async fn spawn_consensus_actor( cfg: NodeConfig, gossip_consensus: GossipConsensusRef, host: HostRef, + block_sync: Option>, metrics: Metrics, - tx_decision: Option>, + tx_decision: Option>>, ) -> ConsensusRef { let consensus_params = ConsensusParams { start_height, @@ -114,6 +155,7 @@ async fn spawn_consensus_actor( cfg.consensus.timeouts, gossip_consensus, host, + block_sync, metrics, tx_decision, ) @@ -211,6 +253,7 @@ async fn spawn_host_actor( txs_per_part: cfg.test.txs_per_part, time_allowance_factor: cfg.test.time_allowance_factor, exec_time_per_tx: cfg.test.exec_time_per_tx, + max_retain_blocks: cfg.test.max_retain_blocks, vote_extensions: cfg.test.vote_extensions, }; diff --git a/code/crates/starknet/host/Cargo.toml b/code/crates/starknet/host/Cargo.toml index d8dbd40c5..863a7b554 100644 --- a/code/crates/starknet/host/Cargo.toml +++ b/code/crates/starknet/host/Cargo.toml @@ -9,6 +9,7 @@ rust-version.workspace = true [dependencies] malachite-actors.workspace = true +malachite-blocksync.workspace = true malachite-common.workspace = true malachite-metrics.workspace = true malachite-config.workspace = true @@ -16,11 +17,13 @@ malachite-gossip-mempool.workspace = true malachite-proto.workspace = true malachite-starknet-p2p-types.workspace = true +bytes = { workspace = true, features = ["serde"] } + starknet-core.workspace = true async-trait.workspace = true bytesize.workspace = true -bytes.workspace = true derive-where.workspace = true +itertools.workspace = true eyre.workspace = true ractor.workspace = true rand.workspace = true diff --git a/code/crates/starknet/host/src/actor.rs b/code/crates/starknet/host/src/actor.rs index a61fc0ecb..3c3c42909 100644 --- a/code/crates/starknet/host/src/actor.rs +++ b/code/crates/starknet/host/src/actor.rs @@ -1,31 +1,31 @@ -#![allow(unused_variables, unused_imports)] - -use std::ops::Deref; use std::sync::Arc; -use bytes::Bytes; use eyre::eyre; + +use itertools::Itertools; use ractor::{async_trait, Actor, ActorProcessingErr, SpawnErr}; use rand::RngCore; use sha3::Digest; use tokio::time::Instant; -use tracing::{debug, error, trace}; +use tracing::{debug, error, trace, warn}; -use malachite_actors::consensus::ConsensusMsg; use malachite_actors::gossip_consensus::{GossipConsensusMsg, GossipConsensusRef}; -use malachite_actors::host::{LocallyProposedValue, ProposedValue}; use malachite_actors::util::streaming::{StreamContent, StreamId, StreamMessage}; -use malachite_common::{Extension, Round, Validity}; +use malachite_blocksync::SyncedBlock; + +use malachite_actors::consensus::ConsensusMsg; +use malachite_actors::host::{LocallyProposedValue, ProposedValue}; +use malachite_common::{Extension, Proposal, Round, Validity, Value}; use malachite_metrics::Metrics; use malachite_proto::Protobuf; -use malachite_starknet_p2p_types::Transactions; +use crate::block_store::BlockStore; use crate::mempool::{MempoolMsg, MempoolRef}; use crate::mock::context::MockContext; use crate::mock::host::MockHost; use crate::part_store::PartStore; use crate::streaming::PartStreamsMap; -use crate::types::{Address, BlockHash, Height, Proposal, ProposalPart, ValidatorSet}; +use crate::types::{Address, BlockHash, Height, ProposalPart, ValidatorSet}; use crate::Host; pub struct StarknetHost { @@ -39,6 +39,7 @@ pub struct HostState { height: Height, round: Round, proposer: Option
, + block_store: BlockStore, part_store: PartStore, part_streams_map: PartStreamsMap, next_stream_id: StreamId, @@ -50,6 +51,7 @@ impl Default for HostState { height: Height::new(0, 0), round: Round::Nil, proposer: None, + block_store: BlockStore::default(), part_store: PartStore::default(), part_streams_map: PartStreamsMap::default(), next_stream_id: StreamId::default(), @@ -127,7 +129,7 @@ impl StarknetHost { return None; }; - let Some(fin) = parts.iter().find_map(|part| part.as_fin()) else { + let Some(_fin) = parts.iter().find_map(|part| part.as_fin()) else { error!("No Fin part found in the proposal parts"); return None; }; @@ -177,7 +179,7 @@ impl StarknetHost { ) -> Option> { state.part_store.store(height, round, part.clone()); - if let ProposalPart::Transactions(txes) = &part { + if let ProposalPart::Transactions(_txes) = &part { debug!("Simulating tx execution and proof verification"); // Simulate Tx execution and proof verification (assumes success) @@ -191,14 +193,14 @@ impl StarknetHost { let all_parts = state.part_store.all_parts(height, round); - debug!( + trace!( count = state.part_store.blocks_stored(), "The store has blocks" ); // TODO: Do more validations, e.g. there is no higher tx proposal part, // check that we have received the proof, etc. - let Some(fin) = all_parts.iter().find_map(|part| part.as_fin()) else { + let Some(_fin) = all_parts.iter().find_map(|part| part.as_fin()) else { debug!("Final proposal part has not been received yet"); return None; }; @@ -213,6 +215,20 @@ impl StarknetHost { self.build_value_from_parts(&all_parts, height, round) } + + fn store_block(&self, state: &mut HostState) { + let max_height = state.block_store.store_keys().last().unwrap_or_default(); + + let min_number_blocks: u64 = std::cmp::min( + self.host.params().max_retain_blocks as u64, + max_height.as_u64(), + ); + + let retain_height = + Height::new(max_height.as_u64() - min_number_blocks, max_height.fork_id); + + state.block_store.prune(retain_height); + } } #[async_trait] @@ -248,11 +264,18 @@ impl Actor for StarknetHost { Ok(()) } + HostMsg::GetEarliestBlockHeight { reply_to } => { + let earliest_block_height = + state.block_store.store_keys().next().unwrap_or_default(); + reply_to.send(earliest_block_height)?; + Ok(()) + } + HostMsg::GetValue { height, round, timeout_duration, - address, + address: _, reply_to, } => { let deadline = Instant::now() + timeout_duration; @@ -288,13 +311,13 @@ impl Actor for StarknetHost { sequence += 1; self.gossip_consensus - .cast(GossipConsensusMsg::BroadcastProposalPart(msg))?; + .cast(GossipConsensusMsg::PublishProposalPart(msg))?; } let msg = StreamMessage::new(stream_id, sequence, StreamContent::Fin(true)); self.gossip_consensus - .cast(GossipConsensusMsg::BroadcastProposalPart(msg))?; + .cast(GossipConsensusMsg::PublishProposalPart(msg))?; let block_hash = rx_hash.await?; debug!(%block_hash, "Got block"); @@ -372,15 +395,25 @@ impl Actor for StarknetHost { } HostMsg::Decide { - height, - round, - value: block_hash, + proposal, commits, consensus, } => { - let all_parts = state.part_store.all_parts(height, round); + let height = proposal.height; + let round = proposal.round; + let mut all_parts = state.part_store.all_parts(height, round); + + let mut all_txes = vec![]; + for arc in all_parts.iter_mut() { + let part = Arc::unwrap_or_clone((*arc).clone()); + if let ProposalPart::Transactions(transactions) = part { + let mut txes = transactions.into_vec(); + all_txes.append(&mut txes); + } + } - // TODO: Build the block from proposal parts and commits and store it + // Build the block from proposal parts and commits and store it + state.block_store.store(&proposal, &all_txes, &commits); // Update metrics let block_size: usize = all_parts.iter().map(|p| p.size_bytes()).sum(); @@ -398,7 +431,8 @@ impl Actor for StarknetHost { .observe(block_and_commits_size as f64); self.metrics.finalized_txes.inc_by(tx_count as u64); - // Send Update to mempool to remove all the tx-es included in the block. + // Gather hashes of all the tx-es included in the block, + // so that we can notify the mempool to remove them. let mut tx_hashes = vec![]; for part in all_parts { if let ProposalPart::Transactions(txes) = &part.as_ref() { @@ -409,17 +443,75 @@ impl Actor for StarknetHost { // Prune the PartStore of all parts for heights lower than `state.height` state.part_store.prune(state.height); + // Store the block + self.store_block(state); + // Notify the mempool to remove corresponding txs self.mempool.cast(MempoolMsg::Update { tx_hashes })?; // Notify Starknet Host of the decision - self.host.decision(block_hash, commits, height).await; + self.host + .decision(proposal.block_hash, commits, height) + .await; // Start the next height consensus.cast(ConsensusMsg::StartHeight(state.height.increment()))?; Ok(()) } + + HostMsg::GetDecidedBlock { height, reply_to } => { + debug!(%height, "Received request for block"); + + match state.block_store.store.get(&height).cloned() { + None => { + warn!( + "No block for {height}, available blocks: {}", + state.block_store.store_keys().format(", ") + ); + + reply_to.send(None)?; + } + Some(block) => { + let block = SyncedBlock { + proposal: block.proposal, + block_bytes: block.block.to_bytes().unwrap(), + certificate: block.certificate, + }; + + debug!("Got block at {height}"); + reply_to.send(Some(block))?; + } + } + + Ok(()) + } + + HostMsg::ProcessSyncedBlockBytes { + proposal, + block_bytes, + reply_to, + } => { + // TODO - process and check that block_bytes match the proposal + let _block_hash = { + let mut block_hasher = sha3::Keccak256::new(); + block_hasher.update(block_bytes); + BlockHash::new(block_hasher.finalize().into()) + }; + + let proposal = ProposedValue { + height: proposal.height(), + round: proposal.round(), + validator_address: proposal.validator_address().clone(), + value: proposal.value().id(), + validity: Validity::Valid, + extension: None, + }; + + reply_to.send(proposal)?; + + Ok(()) + } } } } diff --git a/code/crates/starknet/host/src/block_store.rs b/code/crates/starknet/host/src/block_store.rs new file mode 100644 index 000000000..83076998e --- /dev/null +++ b/code/crates/starknet/host/src/block_store.rs @@ -0,0 +1,69 @@ +use std::collections::BTreeMap; + +use malachite_common::Value; +use malachite_common::{Certificate, Proposal, SignedProposal, SignedVote}; +use malachite_starknet_p2p_types::{Block, Height, Transaction, Transactions}; + +use crate::mock::context::MockContext; + +#[derive(Clone, Debug)] +pub struct DecidedBlock { + pub block: Block, + pub proposal: SignedProposal, + pub certificate: Certificate, +} + +// This is a temporary store implementation for blocks +type Store = BTreeMap; + +#[derive(Clone, Debug)] +pub struct BlockStore { + pub(crate) store: Store, +} + +impl Default for BlockStore { + fn default() -> Self { + Self::new() + } +} + +impl BlockStore { + pub fn new() -> Self { + Self { + store: Default::default(), + } + } + + pub fn store_keys(&self) -> impl Iterator + use<'_> { + self.store.keys().copied() + } + + pub fn store( + &mut self, + proposal: &SignedProposal, + txes: &[Transaction], + commits: &[SignedVote], + ) { + let block_id = proposal.value().id(); + + let certificate = Certificate { + commits: commits.to_vec(), + }; + + let decided_block = DecidedBlock { + block: Block { + height: proposal.height(), + block_hash: block_id, + transactions: Transactions::new(txes.to_vec()), + }, + proposal: proposal.clone(), + certificate, + }; + + self.store.insert(proposal.height(), decided_block); + } + + pub fn prune(&mut self, retain_height: Height) { + self.store.retain(|height, _| *height >= retain_height); + } +} diff --git a/code/crates/starknet/host/src/lib.rs b/code/crates/starknet/host/src/lib.rs index ba8c01eb9..165d6fce4 100644 --- a/code/crates/starknet/host/src/lib.rs +++ b/code/crates/starknet/host/src/lib.rs @@ -6,6 +6,8 @@ mod host; pub use host::Host; pub mod actor; + +pub mod block_store; pub mod mempool; pub mod mock; pub mod part_store; diff --git a/code/crates/starknet/host/src/mock/host.rs b/code/crates/starknet/host/src/mock/host.rs index c1bfbf1bd..eb015ee6f 100644 --- a/code/crates/starknet/host/src/mock/host.rs +++ b/code/crates/starknet/host/src/mock/host.rs @@ -27,6 +27,7 @@ pub struct MockParams { pub txs_per_part: usize, pub time_allowance_factor: f32, pub exec_time_per_tx: Duration, + pub max_retain_blocks: usize, pub vote_extensions: VoteExtensionsConfig, } diff --git a/code/crates/starknet/p2p-proto/build.rs b/code/crates/starknet/p2p-proto/build.rs index f76765d1b..4a5fac9a4 100644 --- a/code/crates/starknet/p2p-proto/build.rs +++ b/code/crates/starknet/p2p-proto/build.rs @@ -1,5 +1,6 @@ fn main() -> Result<(), Box> { let protos = &[ + "./proto/blocksync.proto", "./proto/p2p/proto/common.proto", "./proto/p2p/proto/header.proto", "./proto/p2p/proto/transaction.proto", @@ -14,7 +15,7 @@ fn main() -> Result<(), Box> { let mut config = prost_build::Config::new(); config.bytes(["."]); config.enable_type_names(); - config.default_package_filename("p2p_specs"); + config.default_package_filename("p2p"); config.compile_protos(protos, &["./proto"])?; Ok(()) diff --git a/code/crates/starknet/p2p-proto/proto/blocksync.proto b/code/crates/starknet/p2p-proto/proto/blocksync.proto new file mode 100644 index 000000000..1f4a04755 --- /dev/null +++ b/code/crates/starknet/p2p-proto/proto/blocksync.proto @@ -0,0 +1,39 @@ +syntax = "proto3"; + +package blocksync; + +import "p2p/proto/common.proto"; +import "p2p/proto/consensus.proto"; +import "p2p/proto/transaction.proto"; + +message Status { + PeerID peer_id = 1; + uint64 block_number = 2; + uint64 fork_id = 3; + uint64 earliest_block_number = 4; + uint64 earliest_fork_id = 5; +} + +message Request { + uint64 block_number = 1; + uint64 fork_id = 2; +} + +message Response { + uint64 block_number = 1; + uint64 fork_id = 2; + SyncedBlock block = 3; +} + +message SyncedBlock { + ConsensusMessage proposal = 1; + repeated ConsensusMessage commits = 2; + bytes block_bytes = 3; +} + +message Block { + uint64 block_number = 1; + uint64 fork_id = 2; + Transactions transactions = 3; + Hash block_hash = 4; +} diff --git a/code/crates/starknet/p2p-proto/src/lib.rs b/code/crates/starknet/p2p-proto/src/lib.rs index 64de3848f..276bf5a6e 100644 --- a/code/crates/starknet/p2p-proto/src/lib.rs +++ b/code/crates/starknet/p2p-proto/src/lib.rs @@ -1,3 +1,7 @@ #![allow(clippy::large_enum_variant)] -include!(concat!(env!("OUT_DIR"), "/p2p_specs.rs")); +include!(concat!(env!("OUT_DIR"), "/p2p.rs")); + +pub mod blocksync { + include!(concat!(env!("OUT_DIR"), "/blocksync.rs")); +} diff --git a/code/crates/starknet/p2p-types/src/block.rs b/code/crates/starknet/p2p-types/src/block.rs new file mode 100644 index 000000000..cfc87663d --- /dev/null +++ b/code/crates/starknet/p2p-types/src/block.rs @@ -0,0 +1,40 @@ +use crate::{BlockHash, Height, Transactions}; + +use malachite_proto::{Error as ProtoError, Protobuf}; +use malachite_starknet_p2p_proto as proto; + +#[derive(Clone, Debug)] +pub struct Block { + pub height: Height, + pub transactions: Transactions, + pub block_hash: BlockHash, +} + +impl Protobuf for Block { + type Proto = proto::blocksync::Block; + + fn from_proto(proto: Self::Proto) -> Result { + let transactions = proto + .transactions + .ok_or_else(|| ProtoError::missing_field::("transactions"))?; + + let block_hash = proto + .block_hash + .ok_or_else(|| ProtoError::missing_field::("block_hash"))?; + + Ok(Self { + height: Height::new(proto.block_number, proto.fork_id), + transactions: Transactions::from_proto(transactions)?, + block_hash: BlockHash::from_proto(block_hash)?, + }) + } + + fn to_proto(&self) -> Result { + Ok(Self::Proto { + block_number: self.height.block_number, + fork_id: self.height.fork_id, + transactions: Some(self.transactions.to_proto()?), + block_hash: Some(self.block_hash.to_proto()?), + }) + } +} diff --git a/code/crates/starknet/p2p-types/src/block_proof.rs b/code/crates/starknet/p2p-types/src/block_proof.rs index 304cfd93f..21580764e 100644 --- a/code/crates/starknet/p2p-types/src/block_proof.rs +++ b/code/crates/starknet/p2p-types/src/block_proof.rs @@ -2,7 +2,7 @@ use bytes::Bytes; use malachite_proto as proto; use malachite_starknet_p2p_proto as p2p_proto; -#[derive(Clone, Debug, PartialEq, Eq)] +#[derive(Clone, Debug, Default, PartialEq, Eq)] pub struct BlockProof { pub bytes: Vec, } diff --git a/code/crates/starknet/p2p-types/src/height.rs b/code/crates/starknet/p2p-types/src/height.rs index 0b462aca2..156d8c976 100644 --- a/code/crates/starknet/p2p-types/src/height.rs +++ b/code/crates/starknet/p2p-types/src/height.rs @@ -45,8 +45,11 @@ impl fmt::Display for Height { } impl malachite_common::Height for Height { - fn increment(&self) -> Self { - self.increment() + fn increment_by(&self, n: u64) -> Self { + Self { + block_number: self.block_number + n, + fork_id: self.fork_id, + } } fn as_u64(&self) -> u64 { diff --git a/code/crates/starknet/p2p-types/src/lib.rs b/code/crates/starknet/p2p-types/src/lib.rs index 8d7790c21..993233b23 100644 --- a/code/crates/starknet/p2p-types/src/lib.rs +++ b/code/crates/starknet/p2p-types/src/lib.rs @@ -29,6 +29,9 @@ pub use validator_set::ValidatorSet; mod proposal_part; pub use proposal_part::{PartType, ProposalFin, ProposalInit, ProposalPart}; +mod block; +pub use block::Block; + mod block_proof; pub use block_proof::BlockProof; diff --git a/code/crates/starknet/p2p-types/src/transaction.rs b/code/crates/starknet/p2p-types/src/transaction.rs index c641ec5c7..95a89ea5c 100644 --- a/code/crates/starknet/p2p-types/src/transaction.rs +++ b/code/crates/starknet/p2p-types/src/transaction.rs @@ -110,6 +110,12 @@ impl Transactions { self.0.push(tx); } + /// Add a set of transaction to the batch + pub fn append(&mut self, txes: Transactions) { + let mut txes1 = txes.clone(); + self.0.append(&mut txes1.0); + } + /// Get the number of transactions in the batch pub fn len(&self) -> usize { self.0.len() diff --git a/code/crates/starknet/p2p-types/src/validator_set.rs b/code/crates/starknet/p2p-types/src/validator_set.rs index 77056855f..8cacae0f2 100644 --- a/code/crates/starknet/p2p-types/src/validator_set.rs +++ b/code/crates/starknet/p2p-types/src/validator_set.rs @@ -4,7 +4,7 @@ use serde::{Deserialize, Serialize}; use crate::{Address, PublicKey, Validator}; /// A validator set contains a list of validators sorted by address. -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Clone, Debug, PartialEq, Eq)] pub struct ValidatorSet { pub validators: Vec, } @@ -63,15 +63,14 @@ impl ValidatorSet { /// In place sort and deduplication of a list of validators fn sort_validators(vals: &mut Vec) { // Sort the validators according to the current Tendermint requirements - // - // use core::cmp::Reverse; - // - // (v. 0.34 -> first by validator power, descending, then by address, ascending) - // vals.sort_unstable_by(|v1, v2| { - // let a = (Reverse(v1.voting_power), &v1.address); - // let b = (Reverse(v2.voting_power), &v2.address); - // a.cmp(&b) - // }); + use core::cmp::Reverse; + + // first by validator power descending, then by address ascending + vals.sort_unstable_by(|v1, v2| { + let a = (Reverse(v1.voting_power), &v1.address); + let b = (Reverse(v2.voting_power), &v2.address); + a.cmp(&b) + }); vals.dedup(); } @@ -80,3 +79,32 @@ impl ValidatorSet { self.validators.iter().map(|v| v.public_key).collect() } } + +impl Serialize for ValidatorSet { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + #[derive(Serialize)] + struct ValidatorSet<'a> { + validators: &'a [Validator], + } + + let vs = ValidatorSet { + validators: &self.validators, + }; + + vs.serialize(serializer) + } +} + +impl<'de> Deserialize<'de> for ValidatorSet { + fn deserialize>(deserializer: D) -> Result { + #[derive(Deserialize)] + struct ValidatorSet { + validators: Vec, + } + + ValidatorSet::deserialize(deserializer).map(|vs| Self::new(vs.validators)) + } +} diff --git a/code/crates/starknet/test/Cargo.toml b/code/crates/starknet/test/Cargo.toml index 776235be3..80c5066e8 100644 --- a/code/crates/starknet/test/Cargo.toml +++ b/code/crates/starknet/test/Cargo.toml @@ -10,12 +10,13 @@ rust-version.workspace = true [dependencies] malachite-common.workspace = true -malachite-config.workspace = true +malachite-config.workspace = true malachite-starknet-host.workspace = true malachite-starknet-app.workspace = true bytesize.workspace = true rand.workspace = true +ractor.workspace = true tokio.workspace = true tracing.workspace = true tracing-subscriber.workspace = true diff --git a/code/crates/starknet/test/src/lib.rs b/code/crates/starknet/test/src/lib.rs index 050b5297d..1d2587e3f 100644 --- a/code/crates/starknet/test/src/lib.rs +++ b/code/crates/starknet/test/src/lib.rs @@ -1,22 +1,29 @@ +#![allow(unused_crate_dependencies)] + use core::fmt; +use std::str::FromStr; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; +use malachite_starknet_host::mock::context::MockContext; use rand::rngs::StdRng; -use rand::{Rng, SeedableRng}; -use tokio::sync::mpsc; +use rand::SeedableRng; +use tokio::sync::broadcast; +use tokio::task::JoinSet; use tokio::time::{sleep, Duration}; -use tracing::{error, info, Instrument}; +use tracing::{error, error_span, info, Instrument}; -use malachite_common::VotingPower; +use malachite_common::{SignedProposal, VotingPower}; use malachite_config::{ - Config as NodeConfig, Config, DiscoveryConfig, LoggingConfig, PubSubProtocol, TransportProtocol, + BlockSyncConfig, Config as NodeConfig, Config, DiscoveryConfig, LoggingConfig, PubSubProtocol, + TestConfig, TransportProtocol, }; use malachite_starknet_app::spawn::spawn_node_actor; use malachite_starknet_host::types::{Height, PrivateKey, Validator, ValidatorSet}; pub use malachite_config::App; +#[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum Expected { Exactly(usize), AtLeast(usize), @@ -50,237 +57,387 @@ impl fmt::Display for Expected { } pub struct TestParams { - protocol: PubSubProtocol, - block_size: ByteSize, - tx_size: ByteSize, + pub enable_blocksync: bool, + pub protocol: PubSubProtocol, + pub block_size: ByteSize, + pub tx_size: ByteSize, + pub txs_per_part: usize, + pub vote_extensions: Option, +} + +impl Default for TestParams { + fn default() -> Self { + Self { + enable_blocksync: false, + protocol: PubSubProtocol::default(), + block_size: ByteSize::mib(1), + tx_size: ByteSize::kib(1), + txs_per_part: 256, + vote_extensions: None, + } + } } impl TestParams { - pub fn new(protocol: PubSubProtocol, block_size: ByteSize, tx_size: ByteSize) -> Self { + fn apply_to_config(&self, config: &mut Config) { + config.blocksync.enabled = self.enable_blocksync; + config.consensus.p2p.protocol = self.protocol; + config.consensus.max_block_size = self.block_size; + config.test.tx_size = self.tx_size; + config.test.txs_per_part = self.txs_per_part; + config.test.vote_extensions.enabled = self.vote_extensions.is_some(); + config.test.vote_extensions.size = self.vote_extensions.unwrap_or_default(); + } +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum Step { + Crash, + Restart(Duration), + WaitUntil(u64), + Expect(Expected), + Success, + Pause, +} + +pub type NodeId = usize; + +#[derive(Clone)] +pub struct TestNode { + pub id: NodeId, + pub voting_power: VotingPower, + pub start_height: Height, + pub start_delay: Duration, + pub steps: Vec, +} + +impl TestNode { + pub fn new(id: usize) -> Self { Self { - protocol, - block_size, - tx_size, + id, + voting_power: 1, + start_height: Height::new(1, 1), + start_delay: Duration::from_secs(0), + steps: vec![], } } + + pub fn vp(mut self, power: VotingPower) -> Self { + self.voting_power = power; + self + } + + pub fn start(self) -> Self { + self.start_at(1) + } + + pub fn start_at(self, height: u64) -> Self { + self.start_after(height, Duration::from_secs(0)) + } + + pub fn start_after(mut self, height: u64, delay: Duration) -> Self { + self.start_height.block_number = height; + self.start_delay = delay; + self + } + + pub fn crash(mut self) -> Self { + self.steps.push(Step::Crash); + self + } + + pub fn restart_after(mut self, delay: Duration) -> Self { + self.steps.push(Step::Restart(delay)); + self + } + + pub fn wait_until(mut self, height: u64) -> Self { + self.steps.push(Step::WaitUntil(height)); + self + } + + pub fn expect(mut self, expected: Expected) -> Self { + self.steps.push(Step::Expect(expected)); + self + } + + pub fn success(mut self) -> Self { + self.steps.push(Step::Success); + self + } + + pub fn pause(mut self) -> Self { + self.steps.push(Step::Pause); + self + } +} + +fn unique_id() -> usize { + use std::sync::atomic::{AtomicUsize, Ordering}; + static ID: AtomicUsize = AtomicUsize::new(1); + ID.fetch_add(1, Ordering::SeqCst) } pub struct Test { + pub id: usize, pub nodes: [TestNode; N], + pub private_keys: [PrivateKey; N], pub validator_set: ValidatorSet, - pub vals_and_keys: [(Validator, PrivateKey); N], - pub expected_decisions: Expected, pub consensus_base_port: usize, pub mempool_base_port: usize, pub metrics_base_port: usize, } impl Test { - pub fn new(nodes: [TestNode; N], expected_decisions: Expected) -> Self { - let vals_and_keys = make_validators(Self::voting_powers(&nodes)); - let validators = vals_and_keys.iter().map(|(v, _)| v).cloned(); + pub fn new(nodes: [TestNode; N]) -> Self { + let vals_and_keys = make_validators(voting_powers(&nodes)); + let (validators, private_keys): (Vec<_>, Vec<_>) = vals_and_keys.into_iter().unzip(); + let private_keys = private_keys.try_into().expect("N private keys"); let validator_set = ValidatorSet::new(validators); + let id = unique_id(); + let base_port = 20_000 + id * 1000; Self { + id, nodes, + private_keys, validator_set, - vals_and_keys, - expected_decisions, - consensus_base_port: rand::thread_rng().gen_range(21000..30000), - mempool_base_port: rand::thread_rng().gen_range(31000..40000), - metrics_base_port: rand::thread_rng().gen_range(41000..50000), + consensus_base_port: base_port, + mempool_base_port: base_port + 100, + metrics_base_port: base_port + 200, } } - pub fn voting_powers(nodes: &[TestNode; N]) -> [VotingPower; N] { - let mut voting_powers = [0; N]; - for (i, node) in nodes.iter().enumerate() { - voting_powers[i] = node.voting_power; - } - voting_powers - } - pub fn generate_default_configs(&self, app: App) -> [Config; N] { - let mut configs = vec![]; + let configs: Vec<_> = (0..N).map(|i| make_node_config(self, i, app)).collect(); + configs.try_into().expect("N configs") + } - for i in 0..N { - let config = make_node_config(self, i, app); - configs.push(config) + pub fn generate_custom_configs(&self, app: App, params: TestParams) -> [Config; N] { + let mut configs = self.generate_default_configs(app); + for config in &mut configs { + params.apply_to_config(config); } + configs + } - configs.try_into().expect("N configs") + pub async fn run(self, app: App, timeout: Duration) { + let configs = self.generate_default_configs(app); + self.run_with_config(configs, timeout).await + } + + pub async fn run_with_custom_config(self, app: App, timeout: Duration, params: TestParams) { + let configs = self.generate_custom_configs(app, params); + self.run_with_config(configs, timeout).await } - pub fn generate_custom_configs(&self, app: App, test_params: TestParams) -> [Config; N] { - let mut configs = vec![]; + pub async fn run_with_config(self, configs: [Config; N], timeout: Duration) { + init_logging(); - for i in 0..N { - let mut config = make_node_config(self, i, app); + let _span = error_span!("test", id = %self.id).entered(); - config.consensus.max_block_size = test_params.block_size; - config.consensus.p2p.protocol = test_params.protocol; - config.test.tx_size = test_params.tx_size; - config.test.txs_per_part = 1; + let mut set = JoinSet::new(); - configs.push(config); + for ((node, config), private_key) in self + .nodes + .into_iter() + .zip(configs.into_iter()) + .zip(self.private_keys.into_iter()) + { + let validator_set = self.validator_set.clone(); + set.spawn( + async move { + let id = node.id; + let result = run_node(node, config, validator_set, private_key).await; + (id, result) + } + .instrument(tracing::Span::current()), + ); } - configs.try_into().expect("N configs") + let results = tokio::time::timeout(timeout, set.join_all()).await; + match results { + Ok(results) => { + check_results(results); + } + Err(_) => { + error!("Test timed out after {timeout:?}"); + std::process::exit(1); + } + } } +} - pub async fn run(self, app: App) { - let node_configs = self.generate_default_configs(app); - self.run_with_config(&node_configs).await - } +fn check_results(results: Vec<(NodeId, TestResult)>) { + let mut errors = 0; - pub async fn run_with_custom_config(self, app: App, test_params: TestParams) { - let node_configs = self.generate_custom_configs(app, test_params); - self.run_with_config(&node_configs).await + for (id, result) in results { + let _span = tracing::error_span!("node", %id).entered(); + match result { + TestResult::Success(actual, expected) => { + info!("Correct number of decisions: got {actual}, expected: {expected}",); + } + TestResult::Failure(actual, expected) => { + errors += 1; + error!("Incorrect number of decisions: got {actual}, expected {expected}",); + } + TestResult::Unknown => { + errors += 1; + error!("Unknown test result"); + } + } } - pub async fn run_with_config(self, configs: &[Config; N]) { - init_logging(); + if errors > 0 { + error!("Test failed with {errors} errors"); + std::process::exit(1); + } +} - let mut handles = Vec::with_capacity(N); +pub enum TestResult { + Success(usize, Expected), + Failure(usize, Expected), + Unknown, +} - for (i, config) in configs.iter().enumerate().take(N) { - if self.nodes[i].faults.contains(&Fault::NoStart) { - continue; +type RxDecision = broadcast::Receiver>; + +#[tracing::instrument("node", skip_all, fields(id = %node.id))] +async fn run_node( + node: TestNode, + config: Config, + validator_set: ValidatorSet, + private_key: PrivateKey, +) -> TestResult { + sleep(node.start_delay).await; + + info!("Spawning node with voting power {}", node.voting_power); + + let (tx, mut rx) = broadcast::channel(100); + let (mut actor_ref, mut handle) = spawn_node_actor( + config.clone(), + validator_set.clone(), + private_key, + Some(node.start_height), + Some(tx.clone()), + ) + .await; + + let decisions = Arc::new(AtomicUsize::new(0)); + + let spawn_bg = |mut rx: RxDecision| { + tokio::spawn({ + let decisions = Arc::clone(&decisions); + + async move { + while let Ok(_decision) = rx.recv().await { + decisions.fetch_add(1, Ordering::SeqCst); + } } + }) + }; - let (_, private_key) = &self.vals_and_keys[i]; - let (tx_decision, rx_decision) = mpsc::channel(HEIGHTS as usize); + let mut bg = spawn_bg(tx.subscribe()); - let node = tokio::spawn(spawn_node_actor( - config.clone(), - self.validator_set.clone(), - *private_key, - Some(tx_decision), - )); + for step in node.steps { + match step { + Step::WaitUntil(target_height) => { + info!("Waiting until node reaches height {target_height}"); - handles.push((node, rx_decision)); - } + 'inner: while let Ok(decision) = rx.recv().await { + let height = decision.height.as_u64(); + info!("Node reached height {height}"); - sleep(Duration::from_secs(5)).await; + if height == target_height { + sleep(Duration::from_millis(100)).await; + break 'inner; + } + } + } - let mut nodes = Vec::with_capacity(handles.len()); - for (i, (handle, rx)) in handles.into_iter().enumerate() { - let (actor_ref, _) = handle.await.expect("Error: node failed to start"); - let test = self.nodes[i].clone(); - nodes.push((actor_ref, test, rx)); - } + Step::Crash => { + let height = decisions.load(Ordering::SeqCst); + info!("Node crashes at height {height}"); - let mut actors = Vec::with_capacity(nodes.len()); - let mut rxs = Vec::with_capacity(nodes.len()); + actor_ref + .stop_and_wait(Some("Node must crash".to_string()), None) + .await + .expect("Node must stop"); + } - for (actor, _, rx) in nodes { - actors.push(actor); - rxs.push(rx); - } + Step::Restart(after) => { + info!("Node will restart in {after:?}"); - let correct_decisions = Arc::new(AtomicUsize::new(0)); + sleep(after).await; - for (i, mut rx_decision) in rxs.into_iter().enumerate() { - let correct_decisions = Arc::clone(&correct_decisions); + bg.abort(); + handle.abort(); - let node_test = self.nodes[i].clone(); - let actor_ref = actors[i].clone(); + let (new_tx, new_rx) = broadcast::channel(100); + let (new_actor_ref, new_handle) = spawn_node_actor( + config.clone(), + validator_set.clone(), + private_key, + Some(node.start_height), + Some(new_tx.clone()), + ) + .await; - tokio::spawn( - async move { - for height in START_HEIGHT.as_u64()..=END_HEIGHT.as_u64() { - if node_test.crashes_at(height) { - info!("Faulty node has crashed"); - actor_ref.kill(); - break; - } - - let decision = rx_decision.recv().await; - - // TODO: Heights can go to higher rounds, therefore removing the round and value check for now. - match decision { - Some((h, _r, _)) if h.as_u64() == height /* && r == Round::new(0) */ => { - info!(%height, heights = HEIGHTS, "Correct decision"); - correct_decisions.fetch_add(1, Ordering::Relaxed); - } - _ => { - error!("{height}/{HEIGHTS} no decision") - } - } - } - } - .instrument(tracing::error_span!("node", i)), - ); - } + bg = spawn_bg(new_tx.subscribe()); - tokio::time::sleep(TEST_TIMEOUT).await; + actor_ref = new_actor_ref; + handle = new_handle; + rx = new_rx; + } - let correct_decisions = correct_decisions.load(Ordering::Relaxed); + Step::Pause => { + info!("Pausing"); + while rx.recv().await.is_ok() {} + } - if !self.expected_decisions.check(correct_decisions) { - panic!( - "Incorrect number of decisions: got {}, expected {}", - correct_decisions, self.expected_decisions - ); - } + Step::Expect(expected) => { + let actual = decisions.load(Ordering::SeqCst); - for actor in actors { - let _ = actor.stop_and_wait(None, None).await; - } - } -} + actor_ref.stop(Some("Test is over".to_string())); + handle.abort(); + bg.abort(); -#[derive(Clone, Debug, PartialEq, Eq)] -pub enum Fault { - NoStart, - Crash(u64), -} + if expected.check(actual) { + return TestResult::Success(actual, expected); + } else { + return TestResult::Failure(actual, expected); + } + } -#[derive(Clone)] -pub struct TestNode { - pub voting_power: VotingPower, - pub faults: Vec, -} + Step::Success => { + actor_ref.stop(Some("Test is over".to_string())); + handle.abort(); + bg.abort(); -impl TestNode { - pub fn correct(voting_power: VotingPower) -> Self { - Self { - voting_power, - faults: vec![], - } - } - - pub fn faulty(voting_power: VotingPower, faults: Vec) -> Self { - Self { - voting_power, - faults, + let actual = decisions.load(Ordering::SeqCst); + return TestResult::Success(actual, Expected::Exactly(actual)); + } } } - pub fn start_node(&self) -> bool { - !self.faults.contains(&Fault::NoStart) - } + actor_ref.stop(Some("Test is over".to_string())); + handle.abort(); + bg.abort(); - pub fn crashes_at(&self, height: u64) -> bool { - self.faults.iter().any(|f| match f { - Fault::NoStart => false, - Fault::Crash(h) => *h == height, - }) - } + return TestResult::Unknown; } -pub const HEIGHTS: u64 = 3; -pub const START_HEIGHT: Height = Height::new(1, 1); -pub const END_HEIGHT: Height = START_HEIGHT.increment_by(HEIGHTS - 1); -pub const TEST_TIMEOUT: Duration = Duration::from_secs(20); - fn init_logging() { use tracing_subscriber::util::SubscriberInitExt; use tracing_subscriber::{EnvFilter, FmtSubscriber}; - let filter = EnvFilter::builder() - .parse("info,malachite=debug,ractor=error") - .unwrap(); + let directive = if matches!(std::env::var("TEST_DEBUG").as_deref(), Ok("1")) { + "malachite=debug,malachite_starknet_test=debug,ractor=error" + } else { + "malachite=error,malachite_starknet_test=debug,ractor=error" + }; + + let filter = EnvFilter::builder().parse(directive).unwrap(); pub fn enable_ansi() -> bool { use std::io::IsTerminal; @@ -296,7 +453,10 @@ fn init_logging() { .with_thread_ids(false); let subscriber = builder.finish(); - subscriber.init(); + + if let Err(e) = subscriber.try_init() { + eprintln!("Failed to initialize logging: {e}"); + } } use bytesize::ByteSize; @@ -305,8 +465,16 @@ use malachite_config::{ ConsensusConfig, MempoolConfig, MetricsConfig, P2pConfig, RuntimeConfig, TimeoutConfig, }; +fn transport_from_env(default: TransportProtocol) -> TransportProtocol { + if let Ok(protocol) = std::env::var("MALACHITE_TRANSPORT") { + TransportProtocol::from_str(&protocol).unwrap_or(default) + } else { + default + } +} + pub fn make_node_config(test: &Test, i: usize, app: App) -> NodeConfig { - let transport = TransportProtocol::Tcp; + let transport = transport_from_env(TransportProtocol::Tcp); let protocol = PubSubProtocol::default(); NodeConfig { @@ -341,6 +509,11 @@ pub fn make_node_config(test: &Test, i: usize, app: App) -> N max_tx_count: 10000, gossip_batch_size: 100, }, + blocksync: BlockSyncConfig { + enabled: false, + status_update_interval: Duration::from_secs(2), + request_timeout: Duration::from_secs(5), + }, metrics: MetricsConfig { enabled: false, listen_addr: format!("127.0.0.1:{}", test.metrics_base_port + i) @@ -348,8 +521,16 @@ pub fn make_node_config(test: &Test, i: usize, app: App) -> N .unwrap(), }, runtime: RuntimeConfig::single_threaded(), - test: Default::default(), + test: TestConfig::default(), + } +} + +fn voting_powers(nodes: &[TestNode; N]) -> [VotingPower; N] { + let mut voting_powers = [0; N]; + for (i, node) in nodes.iter().enumerate() { + voting_powers[i] = node.voting_power; } + voting_powers } pub fn make_validators( diff --git a/code/crates/starknet/test/tests/blocksync.rs b/code/crates/starknet/test/tests/blocksync.rs new file mode 100644 index 000000000..656eecef5 --- /dev/null +++ b/code/crates/starknet/test/tests/blocksync.rs @@ -0,0 +1,84 @@ +#![allow(unused_crate_dependencies)] + +use std::time::Duration; + +use malachite_starknet_test::{App, Test, TestNode, TestParams}; + +#[tokio::test] +pub async fn crash_restart() { + const HEIGHT: u64 = 10; + + // Node 1 starts with 10 voting power. + let n1 = TestNode::new(1) + .vp(10) + .start() + // Wait until it reaches height 10 + .wait_until(HEIGHT) + // Record a successful test for this node + .success(); + + // Node 2 starts with 10 voting power, in parallel with node 1 and with the same behaviour + let n2 = TestNode::new(2).vp(10).start().wait_until(HEIGHT).success(); + + // Node 3 starts with 5 voting power, in parallel with node 1 and 2. + let n3 = TestNode::new(3) + .vp(5) + .start() + // Then the test runner waits until it reaches height 2... + .wait_until(2) + // ...and kills the node! + .crash() + // After that, it waits 5 seconds before restarting the node + .restart_after(Duration::from_secs(5)) + // Wait until the node reached the expected height + .wait_until(HEIGHT) + // Record a successful test for this node + .success(); + + Test::new([n1, n2, n3]) + .run_with_custom_config( + App::Starknet, // Application to run + Duration::from_secs(60), // Timeout for the whole test + TestParams { + enable_blocksync: true, // Enable BlockSync + ..Default::default() + }, + ) + .await +} + +// TODO: Enable this test once we can start the network without everybody being online +// #[tokio::test] +// pub async fn blocksync_start_late() { +// const HEIGHT: u64 = 5; +// +// let n1 = TestNode::new(1) +// .voting_power(10) +// .start(1) +// .wait_until(HEIGHT * 2) +// .success(); +// +// let n2 = TestNode::new(2) +// .voting_power(10) +// .start(1) +// .wait_until(HEIGHT * 2) +// .success(); +// +// let n3 = TestNode::new(3) +// .voting_power(5) +// .start_after(1, Duration::from_secs(10)) +// .wait_until(HEIGHT) +// .success(); +// +// Test::new([n1, n2, n3]) +// .run_with_custom_config( +// App::Starknet, +// Duration::from_secs(30), +// TestParams { +// enable_blocksync: true, +// ..Default::default() +// }, +// ) +// .await +// } +// diff --git a/code/crates/starknet/test/tests/n2f0_pubsub_protocol.rs b/code/crates/starknet/test/tests/n2f0_pubsub_protocol.rs index f134e3a43..10b516697 100644 --- a/code/crates/starknet/test/tests/n2f0_pubsub_protocol.rs +++ b/code/crates/starknet/test/tests/n2f0_pubsub_protocol.rs @@ -1,68 +1,74 @@ #![allow(unused_crate_dependencies)] +use std::time::Duration; + use bytesize::ByteSize; use malachite_config::{GossipSubConfig, PubSubProtocol}; -use malachite_starknet_test::{App, Expected, Test, TestNode, TestParams}; +use malachite_starknet_test::{App, Test, TestNode, TestParams}; -async fn run_n2f0_tests(test_params: TestParams) { - let test = Test::new( - [TestNode::correct(10), TestNode::correct(10)], - Expected::Exactly(6), - ); +async fn run_n2f0_tests(params: TestParams) { + const HEIGHT: u64 = 5; - test.run_with_custom_config(App::Starknet, test_params) - .await -} + let n1 = TestNode::new(1).start().wait_until(HEIGHT).success(); + let n2 = TestNode::new(2).start().wait_until(HEIGHT).success(); -#[tokio::test] -pub async fn flood_default_config() { - let test = Test::new( - [TestNode::correct(10), TestNode::correct(10)], - Expected::Exactly(6), - ); - - test.run(App::Starknet).await + Test::new([n1, n2]) + .run_with_custom_config(App::Starknet, Duration::from_secs(30), params) + .await } #[tokio::test] pub async fn broadcast_custom_config_1ktx() { - let test_params = TestParams::new( - PubSubProtocol::Broadcast, - ByteSize::kib(1), - ByteSize::kib(1), - ); + let params = TestParams { + enable_blocksync: false, + protocol: PubSubProtocol::Broadcast, + block_size: ByteSize::kib(1), + tx_size: ByteSize::kib(1), + txs_per_part: 1, + ..Default::default() + }; - run_n2f0_tests(test_params).await + run_n2f0_tests(params).await } #[tokio::test] pub async fn broadcast_custom_config_2ktx() { - let test_params = TestParams::new( - PubSubProtocol::Broadcast, - ByteSize::kib(2), - ByteSize::kib(2), - ); + let params = TestParams { + enable_blocksync: false, + protocol: PubSubProtocol::Broadcast, + block_size: ByteSize::kib(2), + tx_size: ByteSize::kib(2), + txs_per_part: 1, + ..Default::default() + }; - run_n2f0_tests(test_params).await + run_n2f0_tests(params).await } #[tokio::test] pub async fn gossip_custom_config_1ktx() { - let test_params = TestParams::new( - PubSubProtocol::GossipSub(GossipSubConfig::default()), - ByteSize::kib(1), - ByteSize::kib(1), - ); - run_n2f0_tests(test_params).await + let params = TestParams { + enable_blocksync: false, + protocol: PubSubProtocol::GossipSub(GossipSubConfig::default()), + block_size: ByteSize::kib(1), + tx_size: ByteSize::kib(1), + txs_per_part: 1, + ..Default::default() + }; + + run_n2f0_tests(params).await } #[tokio::test] pub async fn gossip_custom_config_2ktx() { - let test_params = TestParams::new( - PubSubProtocol::GossipSub(GossipSubConfig::default()), - ByteSize::kib(2), - ByteSize::kib(2), - ); + let params = TestParams { + enable_blocksync: false, + protocol: PubSubProtocol::GossipSub(GossipSubConfig::default()), + block_size: ByteSize::kib(2), + tx_size: ByteSize::kib(2), + txs_per_part: 1, + ..Default::default() + }; - run_n2f0_tests(test_params).await + run_n2f0_tests(params).await } diff --git a/code/crates/starknet/test/tests/n3f0.rs b/code/crates/starknet/test/tests/n3f0.rs index 20e6b4afc..14a84317f 100644 --- a/code/crates/starknet/test/tests/n3f0.rs +++ b/code/crates/starknet/test/tests/n3f0.rs @@ -1,17 +1,18 @@ #![allow(unused_crate_dependencies)] -use malachite_starknet_test::{App, Expected, Test, TestNode}; +use std::time::Duration; + +use malachite_starknet_test::{App, Test, TestNode}; #[tokio::test] pub async fn all_correct_nodes() { - let test = Test::new( - [ - TestNode::correct(5), - TestNode::correct(15), - TestNode::correct(10), - ], - Expected::Exactly(9), - ); + const HEIGHT: u64 = 5; + + let n1 = TestNode::new(1).start().wait_until(HEIGHT).success(); + let n2 = TestNode::new(2).start().wait_until(HEIGHT).success(); + let n3 = TestNode::new(3).start().wait_until(HEIGHT).success(); - test.run(App::Starknet).await + Test::new([n1, n2, n3]) + .run(App::Starknet, Duration::from_secs(30)) + .await } diff --git a/code/crates/starknet/test/tests/n3f1.rs b/code/crates/starknet/test/tests/n3f1.rs index d9e6d39d5..67ec209f7 100644 --- a/code/crates/starknet/test/tests/n3f1.rs +++ b/code/crates/starknet/test/tests/n3f1.rs @@ -1,59 +1,67 @@ #![allow(unused_crate_dependencies)] -use malachite_starknet_test::{App, Expected, Fault, Test, TestNode}; +use std::time::Duration; + +use malachite_starknet_test::{App, Test, TestNode}; #[tokio::test] pub async fn proposer_fails_to_start() { - let test = Test::new( - [ - TestNode::faulty(10, vec![Fault::NoStart]), - TestNode::correct(10), - TestNode::correct(10), - ], - Expected::Exactly(0), - ); - - test.run(App::Starknet).await + const HEIGHT: u64 = 5; + + let n1 = TestNode::new(1).vp(1).success(); + let n2 = TestNode::new(2).vp(5).start().wait_until(HEIGHT).success(); + let n3 = TestNode::new(3).vp(5).start().wait_until(HEIGHT).success(); + + Test::new([n1, n2, n3]) + .run(App::Starknet, Duration::from_secs(30)) + .await } #[tokio::test] pub async fn one_node_fails_to_start() { - let test = Test::new( - [ - TestNode::correct(10), - TestNode::faulty(10, vec![Fault::NoStart]), - TestNode::correct(10), - ], - Expected::Exactly(0), - ); - - test.run(App::Starknet).await + const HEIGHT: u64 = 5; + + let n1 = TestNode::new(1).vp(5).start().wait_until(HEIGHT).success(); + let n2 = TestNode::new(2).vp(5).start().wait_until(HEIGHT).success(); + let n3 = TestNode::new(3).vp(1).success(); + + Test::new([n1, n2, n3]) + .run(App::Starknet, Duration::from_secs(30)) + .await } #[tokio::test] -pub async fn proposer_crashes_at_height_1() { - let test = Test::new( - [ - TestNode::faulty(10, vec![Fault::Crash(1)]), - TestNode::correct(10), - TestNode::correct(10), - ], - Expected::AtMost(4), - ); - - test.run(App::Starknet).await +pub async fn proposer_crashes_at_height_2() { + const HEIGHT: u64 = 5; + + let n1 = TestNode::new(1).vp(5).start().wait_until(HEIGHT).success(); + let n2 = TestNode::new(2) + .vp(1) + .start() + .wait_until(2) + .crash() + .success(); + let n3 = TestNode::new(3).vp(5).start().wait_until(HEIGHT).success(); + + Test::new([n1, n2, n3]) + .run(App::Starknet, Duration::from_secs(30)) + .await } #[tokio::test] -pub async fn one_node_crashes_at_height_2() { - let test = Test::new( - [ - TestNode::correct(10), - TestNode::correct(10), - TestNode::faulty(5, vec![Fault::Crash(2)]), - ], - Expected::AtMost(7), - ); - - test.run(App::Starknet).await +pub async fn one_node_crashes_at_height_3() { + const HEIGHT: u64 = 5; + + let n1 = TestNode::new(1).vp(5).start().wait_until(HEIGHT).success(); + let n3 = TestNode::new(3).vp(5).start().wait_until(HEIGHT).success(); + let n2 = TestNode::new(2) + .vp(1) + .start() + .wait_until(3) + .crash() + .success(); + + Test::new([n1, n2, n3]) + .run(App::Starknet, Duration::from_secs(30)) + .await } diff --git a/code/crates/test/src/height.rs b/code/crates/test/src/height.rs index b4bf5f4cb..d22ee2875 100644 --- a/code/crates/test/src/height.rs +++ b/code/crates/test/src/height.rs @@ -39,8 +39,8 @@ impl fmt::Debug for Height { } impl malachite_common::Height for Height { - fn increment(&self) -> Self { - Self(self.0 + 1) + fn increment_by(&self, n: u64) -> Self { + Self(self.0 + n) } fn as_u64(&self) -> u64 { diff --git a/code/scripts/spawn.bash b/code/scripts/spawn.bash index 19009f578..1497bfa77 100755 --- a/code/scripts/spawn.bash +++ b/code/scripts/spawn.bash @@ -34,16 +34,18 @@ fi export MALACHITE__CONSENSUS__P2P__PROTOCOL__TYPE="gossipsub" export MALACHITE__CONSENSUS__MAX_BLOCK_SIZE="50KiB" export MALACHITE__CONSENSUS__TIMEOUT_PROPOSE="5s" +export MALACHITE__CONSENSUS__TIMEOUT_PROPOSE_DELTA="1s" export MALACHITE__CONSENSUS__TIMEOUT_PREVOTE="1s" export MALACHITE__CONSENSUS__TIMEOUT_PRECOMMIT="1s" export MALACHITE__CONSENSUS__TIMEOUT_COMMIT="0s" export MALACHITE__MEMPOOL__MAX_TX_COUNT="10000" export MALACHITE__MEMPOOL__GOSSIP_BATCH_SIZE=0 export MALACHITE__TEST__TX_SIZE="1KiB" -export MALACHITE__TEST__TXS_PER_PART=50 -export MALACHITE__TEST__TIME_ALLOWANCE_FACTOR=0.5 -export MALACHITE__TEST__EXEC_TIME_PER_TX="1ms" -export MALACHITE__TEST__VOTE_EXTENSIONS__ENABLED="true" +export MALACHITE__TEST__TXS_PER_PART=256 +export MALACHITE__TEST__TIME_ALLOWANCE_FACTOR=0.3 +export MALACHITE__TEST__EXEC_TIME_PER_TX="0ms" +export MALACHITE__TEST__MAX_RETAIN_BLOCKS=10000 +export MALACHITE__TEST__VOTE_EXTENSIONS__ENABLED="false" export MALACHITE__TEST__VOTE_EXTENSIONS__SIZE="1KiB" echo "Compiling Malachite..." diff --git a/code/scripts/spawn.fish b/code/scripts/spawn.fish index 92cc98931..746403fe8 100755 --- a/code/scripts/spawn.fish +++ b/code/scripts/spawn.fish @@ -1,5 +1,23 @@ #!/usr/bin/env fish +set -x MALACHITE__CONSENSUS__P2P__PROTOCOL__TYPE "broadcast" +set -x MALACHITE__CONSENSUS__MAX_BLOCK_SIZE "10MiB" +set -x MALACHITE__CONSENSUS__TIMEOUT_PROPOSE "5s" +set -x MALACHITE__CONSENSUS__TIMEOUT_PREVOTE "3s" +set -x MALACHITE__CONSENSUS__TIMEOUT_PRECOMMIT "3s" +set -x MALACHITE__CONSENSUS__TIMEOUT_COMMIT "0s" +set -x MALACHITE__MEMPOOL__MAX_TX_COUNT 10000 +set -x MALACHITE__MEMPOOL__GOSSIP_BATCH_SIZE 0 +set -x MALACHITE__TEST__TX_SIZE "1KB" +set -x MALACHITE__TEST__TXS_PER_PART 1024 +set -x MALACHITE__TEST__TIME_ALLOWANCE_FACTOR 0.5 +set -x MALACHITE__TEST__EXEC_TIME_PER_TX "500us" +set -x MALACHITE__TEST__MAX_RETAIN_BLOCKS 100 +set -x MALACHITE__TEST__VOTE_EXTENSIONS__ENABLED false +set -x MALACHITE__TEST__VOTE_EXTENSIONS__SIZE "1KiB" +set -x MALACHITE__BLOCKSYNC__ENABLED true + + # This script takes: # - a number of nodes to run as an argument, # - the home directory for the nodes configuration folders @@ -51,21 +69,6 @@ if set -q _flag_lldb set lldb true end -set -x MALACHITE__CONSENSUS__P2P__PROTOCOL__TYPE "gossipsub" -set -x MALACHITE__CONSENSUS__MAX_BLOCK_SIZE "1MiB" -set -x MALACHITE__CONSENSUS__TIMEOUT_PROPOSE "5s" -set -x MALACHITE__CONSENSUS__TIMEOUT_PREVOTE "3s" -set -x MALACHITE__CONSENSUS__TIMEOUT_PRECOMMIT "3s" -set -x MALACHITE__CONSENSUS__TIMEOUT_COMMIT "0s" -set -x MALACHITE__MEMPOOL__MAX_TX_COUNT "1000" -set -x MALACHITE__MEMPOOL__GOSSIP_BATCH_SIZE 0 -set -x MALACHITE__TEST__TX_SIZE "1KB" -set -x MALACHITE__TEST__TXS_PER_PART 128 -set -x MALACHITE__TEST__TIME_ALLOWANCE_FACTOR 0.5 -set -x MALACHITE__TEST__EXEC_TIME_PER_TX "1ms" -set -x MALACHITE__TEST__VOTE_EXTENSIONS__ENABLED true -set -x MALACHITE__TEST__VOTE_EXTENSIONS__SIZE "1KiB" - echo "Compiling Malachite..." cargo build --profile $build_profile @@ -83,7 +86,7 @@ for NODE in (seq 0 $(math $NODES_COUNT - 1)) rm -f "$NODE_HOME/logs/*.log" - set pane $(tmux new-window -P -n "node-$NODE" /bin/zsh) + set pane $(tmux new-window -P -n "node-$NODE" "$(which fish)") echo "[Node $NODE] Spawning node..." @@ -102,41 +105,33 @@ for NODE in (seq 0 $(math $NODES_COUNT - 1)) set cmd_prefix "cargo instruments --profile $build_profile --template $profile_template --time-limit 60000 --output '$NODE_HOME/traces/' --" tmux send -t "$pane" "sleep $NODE" Enter - tmux send -t "$pane" "$cmd_prefix start --home '$NODE_HOME' 2>&1 > '$NODE_HOME/logs/node.log' &" Enter - tmux send -t "$pane" "echo \$! > '$NODE_HOME/node.pid'" Enter - tmux send -t "$pane" "tail -f '$NODE_HOME/logs/node.log'" Enter + tmux send -t "$pane" "unbuffer $cmd_prefix start --home '$NODE_HOME' 2>&1 | tee '$NODE_HOME/logs/node.log'" Enter + # tmux send -t "$pane" "echo \$! > '$NODE_HOME/node.pid'" Enter + # tmux send -t "$pane" "fg" Enter else set cmd_prefix "./target/$build_folder/malachite-cli" - tmux send -t "$pane" "$cmd_prefix start --home '$NODE_HOME' 2>&1 > '$NODE_HOME/logs/node.log' &" Enter - tmux send -t "$pane" "echo \$! > '$NODE_HOME/node.pid'" Enter - tmux send -t "$pane" "tail -f '$NODE_HOME/logs/node.log'" Enter + tmux send -t "$pane" "unbuffer $cmd_prefix start --home '$NODE_HOME' 2>&1 | tee '$NODE_HOME/logs/node.log'" Enter + # tmux send -t "$pane" "echo \$! > '$NODE_HOME/node.pid'" Enter + # tmux send -t "$pane" "fg" Enter end end echo "Spawned $NODES_COUNT nodes." echo -read -l -P "Launch tmux? [Y/n] " launch_tmux -switch $launch_tmux - case N n - echo "To attach to the tmux session, run:" - echo " tmux attach -t $session" - case '*' - tmux attach -t $session -end +tmux attach -t $session echo -read -l -P "Press Enter to stop the nodes... " done +# read -l -P "Press Enter to stop the nodes... " done +# echo "Stopping all nodes..." +# for NODE in (seq 0 $(math $NODES_COUNT - 1)) +# set NODE_PID (cat "$NODES_HOME/$NODE/node.pid") +# echo "[Node $NODE] Stopping node (PID: $NODE_PID)..." +# kill $NODE_PID +# end +# echo -echo "Stopping all nodes..." -for NODE in (seq 0 $(math $NODES_COUNT - 1)) - set NODE_PID (cat "$NODES_HOME/$NODE/node.pid") - echo "[Node $NODE] Stopping node (PID: $NODE_PID)..." - kill $NODE_PID -end -echo read -l -P "Press Enter to kill the tmux session... " done - tmux kill-session -t $session