diff --git a/.github/workflows/cd.yml b/.github/workflows/cd.yml index 9baef8ad1ee..9f7d7ed512f 100644 --- a/.github/workflows/cd.yml +++ b/.github/workflows/cd.yml @@ -41,7 +41,7 @@ jobs: platform: arm64 runs-on: ${{ matrix.os }} steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: submodules: "recursive" - uses: actions-rs/toolchain@v1 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f19c49ff100..b59441fb114 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -21,7 +21,7 @@ jobs: sanity: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: submodules: "recursive" - uses: actions-rs/toolchain@v1 @@ -45,7 +45,7 @@ jobs: runs-on: ubuntu-latest continue-on-error: true steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: submodules: "recursive" - uses: actions-rs/toolchain@v1 @@ -66,7 +66,7 @@ jobs: runs-on: ubuntu-latest continue-on-error: true steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: submodules: "recursive" - uses: actions-rs/toolchain@v1 @@ -97,7 +97,7 @@ jobs: - macOS-latest - windows-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: submodules: "recursive" - uses: actions-rs/toolchain@v1 @@ -124,10 +124,47 @@ jobs: steps: - run: exit 0 + # Maybe launch it only in testnet branches + gas-costs-check: + if: github.ref != 'refs/heads/staging' + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + submodules: "recursive" + - uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: nightly-2022-12-24 + components: rustfmt + override: true + - uses: actions/checkout@v3 + with: + repository: massalabs/gas-calibration + path: gas-calibration + ref: main + - uses: actions/checkout@v3 + with: + repository: massalabs/massa-as-sdk + path: massa-as-sdk + ref: main + # Replace the branch in the dependencies in gas-calibration project by the current branch + # Special case for massa-sc-runtime: we use the branch referenced in the Cargo.toml of massa-execution-worker. + - name: "Use the current branch as dependency for gas-calibration" + run: > + cd gas-calibration && + sed -i 's!main!${{ github.head_ref || github.ref_name }}!g' Cargo.toml && + sed -i 's!massa-sc-runtime = { git = "https://github.com/massalabs/massa-sc-runtime", features = \["gas_calibration"\] }!'"$(cat ../massa-execution-worker/Cargo.toml | grep 'massalabs/massa-sc-runtime' | sed 's!/!\\/!g' | sed 's!}!, features = ["gas_calibration"]}!g' )"'!g' Cargo.toml + - name: "Launch gas-calibration with one SC per ABI in massa-as-sdk to see if there is an ABI missing" + run: > + cd gas-calibration && + cargo run -r -- --nb-scs-by-abi=1 --as-sdk-env-path=../massa-as-sdk/assembly/env/env.ts --only-generate + + doc: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: submodules: "recursive" - uses: actions-rs/toolchain@v1 diff --git a/.rusty-hook.toml b/.rusty-hook.toml new file mode 100644 index 00000000000..34a4f400b96 --- /dev/null +++ b/.rusty-hook.toml @@ -0,0 +1,6 @@ +# Generate git hooks run: cargo install rusty-hook && rusty-hook init +[hooks] +pre-commit= "cargo check && cargo fmt -- --check && cargo clippy -- -A clippy::uninlined-format-args" + +[logging] +verbose = true diff --git a/Cargo.lock b/Cargo.lock index fd3e22a226a..b9886772bba 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8,7 +8,7 @@ version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a76fd60b23679b7d19bd066031410fb7e458ccc5e958eb5c325888ce4baedc97" dependencies = [ - "gimli 0.27.0", + "gimli 0.27.1", ] [[package]] @@ -63,6 +63,18 @@ dependencies = [ "version_check", ] +[[package]] +name = "ahash" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" +dependencies = [ + "cfg-if", + "getrandom 0.2.8", + "once_cell", + "version_check", +] + [[package]] name = "aho-corasick" version = "0.7.20" @@ -132,7 +144,7 @@ checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" [[package]] name = "as-ffi-bindings" version = "0.2.5" -source = "git+https://github.com/massalabs/as-ffi-bindings.git?tag=v0.3.0#d40a1586953d396508ef739f39f48e1a18e0b0cc" +source = "git+https://github.com/massalabs/as-ffi-bindings.git?tag=v0.3.3#81a1cba61a20a6e5065341b00d7f751c7974e65b" dependencies = [ "anyhow", "wasmer", @@ -176,9 +188,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.60" +version = "0.1.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677d1d8ab452a3936018a687b20e6f7cf5363d713b732b8884001317b0e48aa3" +checksum = "1cd7fce9ba8c3c042128ce72d8b2ddbf3a05747efb67ea0313c635e10bda47a2" dependencies = [ "proc-macro2", "quote", @@ -223,6 +235,12 @@ version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" +[[package]] +name = "base64" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" + [[package]] name = "base64ct" version = "1.5.3" @@ -366,9 +384,9 @@ dependencies = [ [[package]] name = "brotli-decompressor" -version = "2.3.2" +version = "2.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59ad2d4653bf5ca36ae797b1f4bb4dbddb60ce49ca4aed8a2ce4829f60425b80" +checksum = "4b6561fd3f895a11e8f72af2cb7d22e08366bebc2b6b57f7744c4bda27034744" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -385,18 +403,19 @@ dependencies = [ [[package]] name = "bstr" -version = "0.2.17" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3569f383e8f1598449f1a423e72e99569137b47740b1da11ef19af3d5c3223" +checksum = "b45ea9b00a7b3f2988e9a65ad3917e62123c38dba709b666506207be96d1790b" dependencies = [ "memchr", + "serde", ] [[package]] name = "bumpalo" -version = "3.11.1" +version = "3.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "572f695136211188308f16ad2ca5c851a712c464060ae6974944458eb83880ba" +checksum = "0d261e256854913907f67ed06efbc3338dfe6179796deefc1ff763fc1aee5535" [[package]] name = "bytecheck" @@ -427,9 +446,9 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfb24e866b15a1af2a1b663f10c6b6b8f397a84aadb828f12e5b289ec23a3a3c" +checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" [[package]] name = "bzip2-sys" @@ -450,9 +469,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.0.78" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a20104e2335ce8a659d6dd92a51a767a0c062599c73b343fd152cb401e828c3d" +checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" dependencies = [ "jobserver", ] @@ -479,9 +498,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16b0a3d9ed01224b22057780a37bb8c5dbfe1be8ba48678e7bf57ec4b385411f" dependencies = [ "iana-time-zone", + "js-sys", "num-integer", "num-traits", "serde", + "time 0.1.45", + "wasm-bindgen", "winapi", ] @@ -571,9 +593,9 @@ dependencies = [ [[package]] name = "clipboard-win" -version = "4.4.2" +version = "4.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4ab1b92798304eedc095b53942963240037c0516452cb11aeba709d420b2219" +checksum = "7191c27c2357d9b7ef96baac1773290d4ca63b24205b82a3fd8a0637afcf0362" dependencies = [ "error-code", "str-buf", @@ -611,16 +633,35 @@ dependencies = [ [[package]] name = "console" -version = "0.15.2" +version = "0.15.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c050367d967ced717c04b65d8c619d863ef9292ce0c5760028655a2fb298718c" +checksum = "c3d79fbe8970a77e3e34151cc13d3b3e248aa0faaecb9f6091fa07ebefe5ad60" dependencies = [ "encode_unicode", "lazy_static", "libc", - "terminal_size", "unicode-width", - "winapi", + "windows-sys 0.42.0", +] + +[[package]] +name = "const_format" +version = "0.2.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7309d9b4d3d2c0641e018d449232f2e28f1b22933c137f157d3dbc14228b8c0e" +dependencies = [ + "const_format_proc_macros", +] + +[[package]] +name = "const_format_proc_macros" +version = "0.2.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d897f47bf7270cf70d370f8f98c1abb6d2d4cf60a6845d30e05bfb90c6568650" +dependencies = [ + "proc-macro2", + "quote", + "unicode-xid", ] [[package]] @@ -866,9 +907,9 @@ dependencies = [ [[package]] name = "cxx" -version = "1.0.85" +version = "1.0.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5add3fc1717409d029b20c5b6903fc0c0b02fa6741d820054f4a2efa5e5816fd" +checksum = "322296e2f2e5af4270b54df9e85a02ff037e271af20ba3e7fe1575515dc840b8" dependencies = [ "cc", "cxxbridge-flags", @@ -878,9 +919,9 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.85" +version = "1.0.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4c87959ba14bc6fbc61df77c3fcfe180fc32b93538c4f1031dd802ccb5f2ff0" +checksum = "017a1385b05d631e7875b1f151c9f012d37b53491e2a87f65bff5c262b2111d8" dependencies = [ "cc", "codespan-reporting", @@ -893,15 +934,15 @@ dependencies = [ [[package]] name = "cxxbridge-flags" -version = "1.0.85" +version = "1.0.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69a3e162fde4e594ed2b07d0f83c6c67b745e7f28ce58c6df5e6b6bef99dfb59" +checksum = "c26bbb078acf09bc1ecda02d4223f03bdd28bd4874edcb0379138efc499ce971" [[package]] name = "cxxbridge-macro" -version = "1.0.85" +version = "1.0.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e7e2adeb6a0d4a282e581096b06e1791532b7d576dcde5ccd9382acf55db8e6" +checksum = "357f40d1f06a24b60ae1fe122542c1fb05d28d32acb2aed064e84bc2ad1e252e" dependencies = [ "proc-macro2", "quote", @@ -958,11 +999,12 @@ dependencies = [ [[package]] name = "dialoguer" -version = "0.10.2" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a92e7e37ecef6857fdc0c0c5d42fd5b0938e46590c2183cc92dd310a6d078eb1" +checksum = "af3c796f3b0b408d9fd581611b47fa850821fcb84aa640b83a3c1a5be2d691f2" dependencies = [ "console", + "shell-words", "tempfile", "zeroize", ] @@ -1079,9 +1121,9 @@ dependencies = [ [[package]] name = "ed25519" -version = "1.5.2" +version = "1.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e9c280362032ea4203659fc489832d0204ef09f247a0506f170dafcac08c369" +checksum = "91cff35c70bba8a626e3185d8cd48cc11b5437e1a5bcd15b9b5fa3c64b6dfee7" dependencies = [ "signature", ] @@ -1103,9 +1145,9 @@ dependencies = [ [[package]] name = "either" -version = "1.8.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797" +checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91" [[package]] name = "encode_unicode" @@ -1244,9 +1286,9 @@ dependencies = [ [[package]] name = "fd-lock" -version = "3.0.8" +version = "3.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb21c69b9fea5e15dbc1049e4b77145dd0ba1c84019c488102de0dc4ea4b0a27" +checksum = "28c0190ff0bd3b28bfdd4d0cf9f92faa12880fb0b8ae2054723dd6c76a4efd42" dependencies = [ "cfg-if", "rustix", @@ -1298,9 +1340,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38390104763dc37a5145a53c29c63c1290b5d316d6086ec32c293f6736051bb0" +checksum = "13e2792b0ff0340399d58445b88fd9770e3489eff258a4cbc1523418f12abf84" dependencies = [ "futures-channel", "futures-core", @@ -1313,9 +1355,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52ba265a92256105f45b719605a571ffe2d1f0fea3807304b522c1d778f79eed" +checksum = "2e5317663a9089767a1ec00a487df42e0ca174b61b4483213ac24448e4664df5" dependencies = [ "futures-core", "futures-sink", @@ -1323,15 +1365,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04909a7a7e4633ae6c4a9ab280aeb86da1236243a77b694a49eacd659a4bd3ac" +checksum = "ec90ff4d0fe1f57d600049061dc6bb68ed03c7d2fbd697274c41805dcb3f8608" [[package]] name = "futures-executor" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7acc85df6714c176ab5edf386123fafe217be88c0840ec11f199441134a074e2" +checksum = "e8de0a35a6ab97ec8869e32a2473f4b1324459e14c29275d14b10cb1fd19b50e" dependencies = [ "futures-core", "futures-task", @@ -1340,9 +1382,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00f5fb52a06bdcadeb54e8d3671f8888a39697dcb0b81b23b55174030427f4eb" +checksum = "bfb8371b6fb2aeb2d280374607aeabfc99d95c72edfe51692e42d3d7f0d08531" [[package]] name = "futures-lite" @@ -1361,9 +1403,9 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdfb8ce053d86b91919aad980c220b1fb8401a9394410e1c289ed7e66b61835d" +checksum = "95a73af87da33b5acf53acfebdc339fe592ecf5357ac7c0a7734ab9d8c876a70" dependencies = [ "proc-macro2", "quote", @@ -1372,15 +1414,15 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39c15cf1a4aa79df40f1bb462fb39676d0ad9e366c2a33b590d7c66f4f81fcf9" +checksum = "f310820bb3e8cfd46c80db4d7fb8353e15dfff853a127158425f31e0be6c8364" [[package]] name = "futures-task" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ffb393ac5d9a6eaa9d3fdf37ae2776656b706e200c8e16b1bdb227f5198e6ea" +checksum = "dcf79a1bf610b10f42aea489289c5a2c478a786509693b80cd39c44ccd936366" [[package]] name = "futures-timer" @@ -1394,9 +1436,9 @@ dependencies = [ [[package]] name = "futures-util" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "197676987abd2f9cadff84926f410af1c183608d36641465df73ae8211dc65d6" +checksum = "9c1d6de3acfef38d2be4b1f543f553131788603495be83da675e180c8d6b7bd1" dependencies = [ "futures-channel", "futures-core", @@ -1474,21 +1516,21 @@ dependencies = [ [[package]] name = "gimli" -version = "0.27.0" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dec7af912d60cdbd3677c1af9352ebae6fb8394d165568a2234df0fa00f87793" +checksum = "221996f774192f0f718773def8201c4ae31f02616a54ccfc2d358bb0e5cefdec" [[package]] name = "glob" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" +checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "globset" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a1e17342619edbc21a964c2afbeb6c820c6a2560032872f397bb97ea127bd0a" +checksum = "029d74589adefde59de1a0c4f4732695c32805624aec7b68d91503d4dba79afc" dependencies = [ "aho-corasick", "bstr", @@ -1499,9 +1541,9 @@ dependencies = [ [[package]] name = "gloo-net" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9050ff8617e950288d7bf7f300707639fdeda5ca0d0ecf380cff448cfd52f4a6" +checksum = "9902a044653b26b99f7e3693a42f171312d9be8b26b5697bd1e43ad1f8a35e10" dependencies = [ "futures-channel", "futures-core", @@ -1519,9 +1561,9 @@ dependencies = [ [[package]] name = "gloo-timers" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98c4a8d6391675c6b2ee1a6c8d06e8e2d03605c44cec1270675985a4c2a5500b" +checksum = "9b995a66bb87bebce9a0f4a95aed01daca4872c050bfcb21653361c03bc35e5c" dependencies = [ "futures-channel", "futures-core", @@ -1573,7 +1615,7 @@ version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" dependencies = [ - "ahash", + "ahash 0.7.6", ] [[package]] @@ -1582,9 +1624,15 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" dependencies = [ - "ahash", + "ahash 0.7.6", ] +[[package]] +name = "hashbrown" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" + [[package]] name = "hdrhistogram" version = "7.5.2" @@ -1790,9 +1838,9 @@ dependencies = [ [[package]] name = "io-lifetimes" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46112a93252b123d31a119a8d1a1ac19deac4fac6e0e8b0df58f0d4e5870e63c" +checksum = "e7d6c6f8c91b4b9ed43484ad1a938e393caf35960fce7f82a040497207bd8e9e" dependencies = [ "libc", "windows-sys 0.42.0", @@ -1948,7 +1996,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baa6da1e4199c10d7b1d0a6e5e8bd8e55f351163b6f4b3cbb044672a69bd4c1c" dependencies = [ "heck 0.4.0", - "proc-macro-crate 1.2.1", + "proc-macro-crate 1.3.0", "proc-macro2", "quote", "syn", @@ -2167,6 +2215,7 @@ dependencies = [ "dialoguer", "erased-serde", "lazy_static", + "massa_api_exports", "massa_models", "massa_sdk", "massa_signature", @@ -2181,7 +2230,7 @@ dependencies = [ "strum", "strum_macros", "tokio", - "toml_edit", + "toml_edit 0.16.2", ] [[package]] @@ -2194,6 +2243,7 @@ dependencies = [ "enum-map", "lazy_static", "massa_api", + "massa_api_exports", "massa_async_pool", "massa_bootstrap", "massa_consensus_exports", @@ -2216,7 +2266,6 @@ dependencies = [ "massa_pos_worker", "massa_protocol_exports", "massa_protocol_worker", - "massa_signature", "massa_storage", "massa_time", "massa_wallet", @@ -2233,11 +2282,12 @@ dependencies = [ [[package]] name = "massa-sc-runtime" version = "0.10.0" -source = "git+https://github.com/massalabs/massa-sc-runtime#04082b4986753160f9c3a075daf9f6f772d73c29" +source = "git+https://github.com/massalabs/massa-sc-runtime?branch=main#3c4815dd78e3e78c387045df0117eac24d016db3" dependencies = [ "anyhow", "as-ffi-bindings", - "base64", + "base64 0.13.1", + "chrono", "displaydoc", "function_name", "loupe", @@ -2249,6 +2299,7 @@ dependencies = [ "serde_json", "serial_test 0.8.0", "thiserror", + "tracing", "wasmer", "wasmer-compiler-singlepass", "wasmer-middlewares", @@ -2260,13 +2311,12 @@ name = "massa_api" version = "0.1.0" dependencies = [ "async-trait", - "displaydoc", "hyper", "itertools", "jsonrpsee", + "massa_api_exports", "massa_consensus_exports", "massa_execution_exports", - "massa_hash", "massa_models", "massa_network_exports", "massa_pool_exports", @@ -2280,7 +2330,6 @@ dependencies = [ "parking_lot", "serde", "serde_json", - "thiserror", "tokio", "tokio-stream", "tower", @@ -2288,6 +2337,30 @@ dependencies = [ "tracing", ] +[[package]] +name = "massa_api_exports" +version = "0.1.0" +dependencies = [ + "displaydoc", + "jsonrpsee-core", + "jsonrpsee-types", + "massa_consensus_exports", + "massa_execution_exports", + "massa_final_state", + "massa_hash", + "massa_models", + "massa_network_exports", + "massa_protocol_exports", + "massa_signature", + "massa_time", + "massa_wallet", + "paginate", + "serde", + "serial_test 1.0.0", + "strum", + "thiserror", +] + [[package]] name = "massa_async_pool" version = "0.1.0" @@ -2461,6 +2534,7 @@ dependencies = [ "parking_lot", "rand 0.8.5", "rand_xoshiro", + "schnellru", "serde_json", "serial_test 0.10.0", "tempfile", @@ -2532,6 +2606,7 @@ dependencies = [ "massa_serialization", "massa_signature", "nom", + "serde", "thiserror", "tracing", ] @@ -2599,6 +2674,7 @@ dependencies = [ "bitvec", "bs58", "config", + "const_format", "directories", "displaydoc", "lazy_static", @@ -2613,7 +2689,6 @@ dependencies = [ "serde", "serde_with", "serial_test 0.10.0", - "strum", "thiserror", ] @@ -2670,6 +2745,7 @@ dependencies = [ "massa_storage", "massa_time", "serde", + "tokio", ] [[package]] @@ -2684,6 +2760,7 @@ dependencies = [ "massa_storage", "num", "parking_lot", + "tokio", "tracing", ] @@ -2781,6 +2858,7 @@ version = "0.1.0" dependencies = [ "http", "jsonrpsee", + "massa_api_exports", "massa_models", "massa_time", ] @@ -2834,7 +2912,7 @@ dependencies = [ "nom", "serde", "thiserror", - "time", + "time 0.3.17", ] [[package]] @@ -2964,10 +3042,11 @@ dependencies = [ [[package]] name = "nix" -version = "0.24.3" +version = "0.25.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa52e972a9a719cecb6864fb88568781eb706bac2cd1d4f04a648542dbf78069" +checksum = "f346ff70e7dbfd675fe90590b92d59ef2de15a8779ae305ebcbfd3f0caf59be4" dependencies = [ + "autocfg", "bitflags", "cfg-if", "libc", @@ -2975,9 +3054,9 @@ dependencies = [ [[package]] name = "nom" -version = "7.1.1" +version = "7.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8903e5a29a317527874d0402f867152a3d21c908bb0b933e416c65e301d4c36" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" dependencies = [ "memchr", "minimal-lexical", @@ -2985,12 +3064,11 @@ dependencies = [ [[package]] name = "nom8" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75d908f0297c3526d34e478d438b07eefe3d7b0416494d7ffccb17f1c7f7262c" +checksum = "ae01545c9c7fc4486ab7debaf2aad7003ac19431791868fb2e8066df97fad2f8" dependencies = [ "memchr", - "minimal-lexical", ] [[package]] @@ -3031,9 +3109,9 @@ dependencies = [ [[package]] name = "num-complex" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ae39348c8bc5fbd7f40c727a9925f03517afd2ab27d46702108b6a7e5414c19" +checksum = "02e0d21255c828d6f128a1e41534206671e8c3ea0c62f32291e808dc82cff17d" dependencies = [ "num-traits", "serde", @@ -3095,20 +3173,20 @@ dependencies = [ [[package]] name = "num_enum" -version = "0.5.7" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf5395665662ef45796a4ff5486c5d41d29e0c09640af4c5f17fd94ee2c119c9" +checksum = "8d829733185c1ca374f17e52b762f24f535ec625d2cc1f070e34c8a9068f341b" dependencies = [ "num_enum_derive", ] [[package]] name = "num_enum_derive" -version = "0.5.7" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b0498641e53dd6ac1a4f22547548caa6864cc4933784319cd1775271c5a46ce" +checksum = "2be1598bf1c313dcdd12092e3f1920f463462525a21b7b4e11b4168353d0123e" dependencies = [ - "proc-macro-crate 1.2.1", + "proc-macro-crate 1.3.0", "proc-macro2", "quote", "syn", @@ -3116,18 +3194,18 @@ dependencies = [ [[package]] name = "object" -version = "0.30.0" +version = "0.30.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "239da7f290cfa979f43f85a8efeee9a8a76d0827c356d37f9d3d7254d6b537fb" +checksum = "ea86265d3d3dcb6a27fc51bd29a4bf387fae9d2986b823079d4986af253eb439" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.16.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86f0b0d4bf799edbc74508c1e8bf170ff5f41238e5f8225603ca7caaae2b7860" +checksum = "6f61fba1741ea2b3d6a1e3178721804bb716a68a6aeba1149b5d52e3d464ea66" [[package]] name = "oorandom" @@ -3178,6 +3256,12 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" +[[package]] +name = "paginate" +version = "1.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ac1db209d9d6dc8e4435b744ed76198494406cd20eb8ca14baf9828664664c8" + [[package]] name = "parking" version = "2.0.0" @@ -3196,9 +3280,9 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.5" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ff9f3fef3968a3ec5945535ed654cb38ff72d7495a25619e2247fb15a2ed9ba" +checksum = "ba1ef8814b5c993410bb3adfad7a5ed269563e4a2f90c41f5d85be7fb47133bf" dependencies = [ "backtrace", "cfg-if", @@ -3280,9 +3364,9 @@ checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" [[package]] name = "pest" -version = "2.5.2" +version = "2.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f6e86fb9e7026527a0d46bc308b841d73170ef8f443e1807f6ef88526a816d4" +checksum = "4ab62d2fa33726dbe6321cc97ef96d8cde531e3eeaf858a058de53a8a6d40d8f" dependencies = [ "thiserror", "ucd-trie", @@ -3290,9 +3374,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.5.2" +version = "2.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96504449aa860c8dcde14f9fba5c58dc6658688ca1fe363589d6327b8662c603" +checksum = "8bf026e2d0581559db66d837fe5242320f525d85c76283c61f4d51a1238d65ea" dependencies = [ "pest", "pest_generator", @@ -3300,9 +3384,9 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.5.2" +version = "2.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "798e0220d1111ae63d66cb66a5dcb3fc2d986d520b98e49e1852bfdb11d7c5e7" +checksum = "2b27bd18aa01d91c8ed2b61ea23406a676b42d82609c6e2581fba42f0c15f17f" dependencies = [ "pest", "pest_meta", @@ -3313,13 +3397,13 @@ dependencies = [ [[package]] name = "pest_meta" -version = "2.5.2" +version = "2.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "984298b75898e30a843e278a9f2452c31e349a073a0ce6fd950a12a74464e065" +checksum = "9f02b677c1859756359fc9983c2e56a0237f18624a3789528804406b7e915e5d" dependencies = [ "once_cell", "pest", - "sha1", + "sha2 0.10.6", ] [[package]] @@ -3439,13 +3523,12 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eda0fc3b0fb7c975631757e14d9049da17374063edb6ebbcbc54d880d4fe94e9" +checksum = "66618389e4ec1c7afe67d51a9bf34ff9236480f8d51e7489b7d5ab0303c13f34" dependencies = [ "once_cell", - "thiserror", - "toml", + "toml_edit 0.18.1", ] [[package]] @@ -3474,9 +3557,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.49" +version = "1.0.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57a8eca9f9c4ffde41714334dee777596264c7825420f521abc92b5b5deb63a5" +checksum = "6ef7d57beacfaf2d8aee5937dab7b7f28de3cb8b1828479bb5de2a7106f2bae2" dependencies = [ "unicode-ident", ] @@ -3628,9 +3711,9 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.10.1" +version = "1.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cac410af5d00ab6884528b4ab69d1e8e146e8d471201800fa1b4524126de6ad3" +checksum = "356a0625f1954f730c0201cdab48611198dc6ce21f4acff55089b5a78e6e835b" dependencies = [ "crossbeam-channel", "crossbeam-deque", @@ -3672,9 +3755,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.7.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e076559ef8e241f2ae3479e36f97bd5741c0330689e217ad51ce2c76808b868a" +checksum = "48aaa5748ba571fb95cd2c85c09f629215d3a6ece942baa100950af03a34f733" dependencies = [ "aho-corasick", "memchr", @@ -3774,7 +3857,7 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "88073939a61e5b7680558e6be56b419e208420c2adb92be54921fa6b72283f1a" dependencies = [ - "base64", + "base64 0.13.1", "bitflags", "serde", ] @@ -3791,9 +3874,9 @@ dependencies = [ [[package]] name = "rust_decimal" -version = "1.27.0" +version = "1.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33c321ee4e17d2b7abe12b5d20c1231db708dd36185c8a21e9de5fed6da4dbe9" +checksum = "7fe32e8c89834541077a5c5bbe5691aa69324361e27e6aeb3552a737db4a70c8" dependencies = [ "arrayvec", "borsh", @@ -3821,9 +3904,9 @@ checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "rustix" -version = "0.36.6" +version = "0.36.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4feacf7db682c6c329c4ede12649cd36ecab0f3be5b7d74e6a20304725db4549" +checksum = "d4fdebc4b395b7fbb9ab11e462e20ed9051e7b16e42d24042c776eca0ac81b03" dependencies = [ "bitflags", "errno", @@ -3835,9 +3918,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.20.7" +version = "0.20.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "539a2bfe908f471bfa933876bd1eb6a19cf2176d375f82ef7f99530a40e48c2c" +checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" dependencies = [ "log", "ring", @@ -3859,11 +3942,11 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0864aeff53f8c05aa08d86e5ef839d3dfcf07aeba2db32f12db0ef716e87bd55" +checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b" dependencies = [ - "base64", + "base64 0.21.0", ] [[package]] @@ -3874,9 +3957,9 @@ checksum = "5583e89e108996506031660fe09baa5011b9dd0341b89029313006d1fb508d70" [[package]] name = "rustyline" -version = "10.0.0" +version = "10.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d1cd5ae51d3f7bf65d7969d579d502168ef578f289452bd8ccc91de28fda20e" +checksum = "c1e83c32c3f3c33b08496e0d1df9ea8c64d39adb8eb36a1ebb1440c690697aef" dependencies = [ "bitflags", "cfg-if", @@ -3923,12 +4006,22 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.20" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88d6731146462ea25d9244b2ed5fd1d716d25c52e4d54aa4fb0f3c4e9854dbe2" +checksum = "713cfb06c7059f3588fb8044c0fad1d09e3c01d225e25b9220dbfdcf16dbb1b3" dependencies = [ - "lazy_static", - "windows-sys 0.36.1", + "windows-sys 0.42.0", +] + +[[package]] +name = "schnellru" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "772575a524feeb803e5b0fcbc6dd9f367e579488197c94c6e4023aad2305774d" +dependencies = [ + "ahash 0.8.3", + "cfg-if", + "hashbrown 0.13.2", ] [[package]] @@ -3961,9 +4054,9 @@ checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b" [[package]] name = "security-framework" -version = "2.7.0" +version = "2.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bc1bb97804af6631813c55739f771071e0f2ed33ee20b68c86ec505d906356c" +checksum = "a332be01508d814fed64bf28f798a146d73792121129962fdf335bb3c49a4254" dependencies = [ "bitflags", "core-foundation", @@ -3974,9 +4067,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.6.1" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0160a13a177a45bfb43ce71c01580998474f556ad854dcbca936dd2841a5c556" +checksum = "31c9bb296072e961fcbd8853511dd39c2d8be2deb1e17c6860b1d30732b323b4" dependencies = [ "core-foundation-sys", "libc", @@ -4043,25 +4136,25 @@ dependencies = [ [[package]] name = "serde_with" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25bf4a5a814902cd1014dbccfa4d4560fb8432c779471e96e035602519f82eef" +checksum = "30d904179146de381af4c93d3af6ca4984b3152db687dacb9c3c35e86f39809c" dependencies = [ - "base64", + "base64 0.13.1", "chrono", "hex", "indexmap", "serde", "serde_json", "serde_with_macros", - "time", + "time 0.3.17", ] [[package]] name = "serde_with_macros" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3452b4c0f6c1e357f73fdb87cd1efabaa12acf328c7a528e252893baeb3f4aa" +checksum = "a1966009f3c05f095697c537312f5415d1e3ed31ce0a56942bac4c771c5c335e" dependencies = [ "darling", "proc-macro2", @@ -4096,6 +4189,20 @@ dependencies = [ "serial_test_derive 0.10.0", ] +[[package]] +name = "serial_test" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "538c30747ae860d6fb88330addbbd3e0ddbe46d662d032855596d8a8ca260611" +dependencies = [ + "dashmap", + "futures", + "lazy_static", + "log", + "parking_lot", + "serial_test_derive 1.0.0", +] + [[package]] name = "serial_test_derive" version = "0.8.0" @@ -4120,6 +4227,17 @@ dependencies = [ "syn", ] +[[package]] +name = "serial_test_derive" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "079a83df15f85d89a68d64ae1238f142f172b1fa915d0d76b26a7cba1b659a69" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "sha-1" version = "0.9.8" @@ -4133,17 +4251,6 @@ dependencies = [ "opaque-debug", ] -[[package]] -name = "sha1" -version = "0.10.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" -dependencies = [ - "cfg-if", - "cpufeatures", - "digest 0.10.6", -] - [[package]] name = "sha2" version = "0.9.9" @@ -4177,6 +4284,12 @@ dependencies = [ "lazy_static", ] +[[package]] +name = "shell-words" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde" + [[package]] name = "shlex" version = "1.1.0" @@ -4235,7 +4348,7 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" dependencies = [ - "base64", + "base64 0.13.1", "bytes", "futures", "http", @@ -4379,23 +4492,13 @@ dependencies = [ [[package]] name = "termcolor" -version = "1.1.3" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755" +checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" dependencies = [ "winapi-util", ] -[[package]] -name = "terminal_size" -version = "0.1.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "633c1a546cee861a1a6d0dc69ebeca693bf4296661ba7852b9d21d159e0506df" -dependencies = [ - "libc", - "winapi", -] - [[package]] name = "textwrap" version = "0.11.0" @@ -4451,6 +4554,17 @@ dependencies = [ "once_cell", ] +[[package]] +name = "time" +version = "0.1.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b797afad3f312d1c66a56d11d0316f916356d11bd158fbc6ca6389ff6bf805a" +dependencies = [ + "libc", + "wasi 0.10.0+wasi-snapshot-preview1", + "winapi", +] + [[package]] name = "time" version = "0.3.17" @@ -4490,9 +4604,9 @@ dependencies = [ [[package]] name = "tokio" -version = "1.23.0" +version = "1.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eab6d665857cc6ca78d6e80303a02cea7a7851e85dfbd77cbdc09bd129f1ef46" +checksum = "c8e00990ebabbe4c14c08aca901caed183ecd5c09562a12c824bb53d3c3fd3af" dependencies = [ "autocfg", "bytes", @@ -4559,24 +4673,24 @@ dependencies = [ [[package]] name = "toml" -version = "0.5.10" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1333c76748e868a4d9d1017b5ab53171dfd095f70c712fdb4653a406547f598f" +checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" dependencies = [ "serde", ] [[package]] name = "toml_datetime" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "808b51e57d0ef8f71115d8f3a01e7d3750d01c79cac4b3eda910f4389fdf92fd" +checksum = "4553f467ac8e3d374bc9a177a26801e5d0f9b211aa1673fb137a403afd1c9cf5" [[package]] name = "toml_edit" -version = "0.16.1" +version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c040d7eb2b695a2a39048f9d8e7ee865ef1c57cd9c44ba9b4a4d389095f7e6a" +checksum = "dd30deba9a1cd7153c22aecf93e86df639e7b81c622b0af8d9255e989991a7b7" dependencies = [ "indexmap", "itertools", @@ -4584,6 +4698,17 @@ dependencies = [ "toml_datetime", ] +[[package]] +name = "toml_edit" +version = "0.18.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56c59d8dd7d0dcbc6428bf7aa2f0e823e26e43b3c9aca15bbc9475d23e5fa12b" +dependencies = [ + "indexmap", + "nom8", + "toml_datetime", +] + [[package]] name = "tower" version = "0.4.13" @@ -4612,7 +4737,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f873044bf02dd1e8239e9c1293ea39dad76dc594ec16185d0a1bf31d8dc8d858" dependencies = [ "async-compression", - "base64", + "base64 0.13.1", "bitflags", "bytes", "futures-core", @@ -4708,9 +4833,9 @@ dependencies = [ [[package]] name = "try-lock" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" +checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" [[package]] name = "typenum" @@ -4741,9 +4866,9 @@ checksum = "84a22b9f218b40614adcb3f4ff08b703773ad44fa9423e4e0d346d5db86e4ebc" [[package]] name = "unicode-segmentation" -version = "1.10.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fdbf052a0783de01e944a6ce7a8cb939e295b1e7be835a1112c3b9a7f047a5a" +checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36" [[package]] name = "unicode-width" @@ -4853,6 +4978,12 @@ version = "0.9.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" +[[package]] +name = "wasi" +version = "0.10.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" + [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" @@ -4950,18 +5081,18 @@ checksum = "1c38c045535d93ec4f0b4defec448e4291638ee608530863b1e2ba115d4fff7f" [[package]] name = "wasm-encoder" -version = "0.20.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05632e0a66a6ed8cca593c24223aabd6262f256c3693ad9822c315285f010614" +checksum = "ef126be0e14bdf355ac1a8b41afc89195289e5c7179f80118e3abddb472f0810" dependencies = [ "leb128", ] [[package]] name = "wasmer" -version = "3.1.0" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "740f96c9e5d49f0056d716977657f3f7f8eea9923b41f46d1046946707aa038f" +checksum = "840af6d21701220cb805dc7201af301cb99e9b4f646f48a41befbc1d949f0f90" dependencies = [ "bytes", "cfg-if", @@ -4985,9 +5116,9 @@ dependencies = [ [[package]] name = "wasmer-compiler" -version = "3.1.0" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "001d072dd9823e5a06052621eadb531627b4a508d74b67da4590a3d5d9332dc8" +checksum = "b86fab98beaaace77380cb04e681773739473860d1b8499ea6b14f920923e0c5" dependencies = [ "backtrace", "cfg-if", @@ -5009,9 +5140,9 @@ dependencies = [ [[package]] name = "wasmer-compiler-cranelift" -version = "3.1.0" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2974856a7ce40eb033efc9db3d480845385c27079b6e33ce51751f2f3c67e9bd" +checksum = "015eef629fc84889540dc1686bd7fa524b93da9fd2d275b16c49dbe96268e58f" dependencies = [ "cranelift-codegen", "cranelift-entity", @@ -5028,9 +5159,9 @@ dependencies = [ [[package]] name = "wasmer-compiler-singlepass" -version = "3.1.0" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c6baae9a0b87050564178fc34138411682aeb725b57255b9b03735d6620d065" +checksum = "07e235ccc192d5f39147e8a430f48040dcfeebc1f1b0d979d2232ec1618d255c" dependencies = [ "byteorder", "dynasm", @@ -5047,9 +5178,9 @@ dependencies = [ [[package]] name = "wasmer-derive" -version = "3.1.0" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36b23b52272494369a1f96428f0056425a85a66154610c988d971bbace8230f1" +checksum = "1ff577b7c1cfcd3d7c5b3a09fe1a499b73f7c17084845ff71225c8250a6a63a9" dependencies = [ "proc-macro-error", "proc-macro2", @@ -5059,9 +5190,9 @@ dependencies = [ [[package]] name = "wasmer-middlewares" -version = "3.1.0" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ebe29eb090b5212606a2f295ded55d44f38f65ff9cfa85795127f77e119a729" +checksum = "c3f7b2443d00487fcd63e0158ea2eb7a12253fcc99b1c73a7a89796f3cb5a10f" dependencies = [ "wasmer", "wasmer-types", @@ -5070,9 +5201,9 @@ dependencies = [ [[package]] name = "wasmer-types" -version = "3.1.0" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bc6cd7a2d2d3bd901ff491f131188c1030694350685279e16e1233b9922846b" +checksum = "8b9600f9da966abae3be0b0a4560e7d1f2c88415a2d01ce362ac06063cb1c473" dependencies = [ "enum-iterator", "enumset", @@ -5085,9 +5216,9 @@ dependencies = [ [[package]] name = "wasmer-vm" -version = "3.1.0" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e67d0cd6c0ef4985d1ce9c7d7cccf34e910804417a230fa16ab7ee904efb4c34" +checksum = "9fc68a7f0a003e6cb63845b7510065097d289553201d64afb9a5e1744da3c6a0" dependencies = [ "backtrace", "cc", @@ -5115,9 +5246,9 @@ checksum = "718ed7c55c2add6548cca3ddd6383d738cd73b892df400e96b9aa876f0141d7a" [[package]] name = "wast" -version = "50.0.0" +version = "52.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2cbb59d4ac799842791fe7e806fa5dbbf6b5554d538e51cc8e176db6ff0ae34" +checksum = "707a9fd59b0144c530f0a31f21737036ffea6ece492918cae0843dd09b6f9bc9" dependencies = [ "leb128", "memchr", @@ -5127,9 +5258,9 @@ dependencies = [ [[package]] name = "wat" -version = "1.0.52" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "584aaf7a1ecf4d383bbe1a25eeab0cbb8ff96acc6796707ff65cde48f4632f15" +checksum = "91d73cbaa81acc2f8a3303e2289205c971d99c89245c2f56ab8765c4daabc2be" dependencies = [ "wast", ] @@ -5207,19 +5338,6 @@ dependencies = [ "windows_x86_64_msvc 0.33.0", ] -[[package]] -name = "windows-sys" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2" -dependencies = [ - "windows_aarch64_msvc 0.36.1", - "windows_i686_gnu 0.36.1", - "windows_i686_msvc 0.36.1", - "windows_x86_64_gnu 0.36.1", - "windows_x86_64_msvc 0.36.1", -] - [[package]] name = "windows-sys" version = "0.42.0" @@ -5227,19 +5345,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" dependencies = [ "windows_aarch64_gnullvm", - "windows_aarch64_msvc 0.42.0", - "windows_i686_gnu 0.42.0", - "windows_i686_msvc 0.42.0", - "windows_x86_64_gnu 0.42.0", + "windows_aarch64_msvc 0.42.1", + "windows_i686_gnu 0.42.1", + "windows_i686_msvc 0.42.1", + "windows_x86_64_gnu 0.42.1", "windows_x86_64_gnullvm", - "windows_x86_64_msvc 0.42.0", + "windows_x86_64_msvc 0.42.1", ] [[package]] name = "windows_aarch64_gnullvm" -version = "0.42.0" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41d2aa71f6f0cbe00ae5167d90ef3cfe66527d6f613ca78ac8024c3ccab9a19e" +checksum = "8c9864e83243fdec7fc9c5444389dcbbfd258f745e7853198f365e3c4968a608" [[package]] name = "windows_aarch64_msvc" @@ -5249,15 +5367,9 @@ checksum = "cd761fd3eb9ab8cc1ed81e56e567f02dd82c4c837e48ac3b2181b9ffc5060807" [[package]] name = "windows_aarch64_msvc" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.42.0" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd0f252f5a35cac83d6311b2e795981f5ee6e67eb1f9a7f64eb4500fbc4dcdb4" +checksum = "4c8b1b673ffc16c47a9ff48570a9d85e25d265735c503681332589af6253c6c7" [[package]] name = "windows_i686_gnu" @@ -5267,15 +5379,9 @@ checksum = "cab0cf703a96bab2dc0c02c0fa748491294bf9b7feb27e1f4f96340f208ada0e" [[package]] name = "windows_i686_gnu" -version = "0.36.1" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" - -[[package]] -name = "windows_i686_gnu" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbeae19f6716841636c28d695375df17562ca208b2b7d0dc47635a50ae6c5de7" +checksum = "de3887528ad530ba7bdbb1faa8275ec7a1155a45ffa57c37993960277145d640" [[package]] name = "windows_i686_msvc" @@ -5285,15 +5391,9 @@ checksum = "8cfdbe89cc9ad7ce618ba34abc34bbb6c36d99e96cae2245b7943cd75ee773d0" [[package]] name = "windows_i686_msvc" -version = "0.36.1" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" - -[[package]] -name = "windows_i686_msvc" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84c12f65daa39dd2babe6e442988fc329d6243fdce47d7d2d155b8d874862246" +checksum = "bf4d1122317eddd6ff351aa852118a2418ad4214e6613a50e0191f7004372605" [[package]] name = "windows_x86_64_gnu" @@ -5303,21 +5403,15 @@ checksum = "b4dd9b0c0e9ece7bb22e84d70d01b71c6d6248b81a3c60d11869451b4cb24784" [[package]] name = "windows_x86_64_gnu" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.42.0" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf7b1b21b5362cbc318f686150e5bcea75ecedc74dd157d874d754a2ca44b0ed" +checksum = "c1040f221285e17ebccbc2591ffdc2d44ee1f9186324dd3e84e99ac68d699c45" [[package]] name = "windows_x86_64_gnullvm" -version = "0.42.0" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09d525d2ba30eeb3297665bd434a54297e4170c7f1a44cad4ef58095b4cd2028" +checksum = "628bfdf232daa22b0d64fdb62b09fcc36bb01f05a3939e20ab73aaf9470d0463" [[package]] name = "windows_x86_64_msvc" @@ -5327,15 +5421,9 @@ checksum = "ff1e4aa646495048ec7f3ffddc411e1d829c026a2ec62b39da15c1055e406eaa" [[package]] name = "windows_x86_64_msvc" -version = "0.36.1" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f40009d85759725a34da6d89a94e63d7bdc50a862acf0dbc7c8e488f1edcb6f5" +checksum = "447660ad36a13288b1db4d4248e857b510e8c3a225c822ba4fb748c0aafecffd" [[package]] name = "wyz" @@ -5384,10 +5472,11 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "2.0.4+zstd.1.5.2" +version = "2.0.6+zstd.1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fa202f2ef00074143e219d15b62ffc317d17cc33909feac471c044087cad7b0" +checksum = "68a3f9792c0c3dc6c165840a75f47ae1f4da402c2d006881129579f6597e801b" dependencies = [ "cc", "libc", + "pkg-config", ] diff --git a/Cargo.toml b/Cargo.toml index 6eb3224f43e..27c70714b81 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,7 @@ [workspace] members = [ "massa-api", + "massa-api-exports", "massa-async-pool", "massa-bootstrap", "massa-client", diff --git a/massa-api-exports/Cargo.toml b/massa-api-exports/Cargo.toml new file mode 100644 index 00000000000..518501adc11 --- /dev/null +++ b/massa-api-exports/Cargo.toml @@ -0,0 +1,35 @@ +[package] +name = "massa_api_exports" +version = "0.1.0" +authors = ["Massa Labs "] +edition = "2021" + +[dependencies] +paginate = "1.1.11" +displaydoc = "0.2" +thiserror = "1.0" +jsonrpsee-core = { version = "0.16.2" } +jsonrpsee-types = { version = "0.16.2" } +serde = { version = "1.0", features = ["derive"] } +strum = { version = "0.24", features = ["derive"] } + +# custom modules +massa_signature = { path = "../massa-signature" } +massa_time = { path = "../massa-time" } +massa_models = { path = "../massa-models" } +massa_final_state = { path = "../massa-final-state" } + +massa_consensus_exports = { path = "../massa-consensus-exports" } +massa_hash = { path = "../massa-hash" } +massa_network_exports = { path = "../massa-network-exports" } +massa_protocol_exports = { path = "../massa-protocol-exports" } +massa_execution_exports = { path = "../massa-execution-exports" } +massa_wallet = { path = "../massa-wallet" } + +[dev-dependencies] +serial_test = "1.0.0" + +# for more information on what are the following features used for, see the cargo.toml at workspace level +[features] +sandbox = [] +testing = [] diff --git a/massa-api-exports/src/address.rs b/massa-api-exports/src/address.rs new file mode 100644 index 00000000000..7410f0df20e --- /dev/null +++ b/massa-api-exports/src/address.rs @@ -0,0 +1,157 @@ +// Copyright (c) 2022 MASSA LABS + +use massa_models::address::ExecutionAddressCycleInfo; +use massa_models::endorsement::EndorsementId; +use massa_models::operation::OperationId; +use massa_models::slot::{IndexedSlot, Slot}; +use massa_models::{address::Address, amount::Amount, block_id::BlockId}; +use serde::{Deserialize, Serialize}; + +use crate::slot::SlotAmount; + +/// All you ever dream to know about an address +#[derive(Debug, Deserialize, Serialize)] +pub struct AddressInfo { + /// the address + pub address: Address, + /// the thread the address belongs to + pub thread: u8, + + /// final balance + pub final_balance: Amount, + /// final roll count + pub final_roll_count: u64, + /// final datastore keys + pub final_datastore_keys: Vec>, + + /// candidate balance + pub candidate_balance: Amount, + /// candidate roll count + pub candidate_roll_count: u64, + /// candidate datastore keys + pub candidate_datastore_keys: Vec>, + + /// deferred credits + pub deferred_credits: Vec, + + /// next block draws + pub next_block_draws: Vec, + /// next endorsement draws + pub next_endorsement_draws: Vec, + + /// created blocks + pub created_blocks: Vec, + /// created operations + pub created_operations: Vec, + /// created endorsements + pub created_endorsements: Vec, + + /// cycle information + pub cycle_infos: Vec, +} + +impl std::fmt::Display for AddressInfo { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + writeln!(f, "Address {} (thread {}):", self.address, self.thread)?; + writeln!( + f, + "\tBalance: final={}, candidate={}", + self.final_balance, self.candidate_balance + )?; + writeln!( + f, + "\tRolls: final={}, candidate={}", + self.final_roll_count, self.candidate_roll_count + )?; + write!(f, "\tLocked coins:")?; + if self.deferred_credits.is_empty() { + writeln!(f, "0")?; + } else { + for slot_amount in &self.deferred_credits { + writeln!( + f, + "\t\t{} locked coins will be unlocked at slot {}", + slot_amount.amount, slot_amount.slot + )?; + } + } + writeln!(f, "\tCycle infos:")?; + for cycle_info in &self.cycle_infos { + writeln!( + f, + "\t\tCycle {} ({}): produced {} and missed {} blocks{}", + cycle_info.cycle, + if cycle_info.is_final { + "final" + } else { + "candidate" + }, + cycle_info.ok_count, + cycle_info.nok_count, + match cycle_info.active_rolls { + Some(rolls) => format!(" with {} active rolls", rolls), + None => "".into(), + }, + )?; + } + //writeln!(f, "\tProduced blocks: {}", self.created_blocks.iter().map(|id| id.to_string()).intersperse(", ".into()).collect())?; + //writeln!(f, "\tProduced operations: {}", self.created_operations.iter().map(|id| id.to_string()).intersperse(", ".into()).collect())?; + //writeln!(f, "\tProduced endorsements: {}", self.created_endorsements.iter().map(|id| id.to_string()).intersperse(", ".into()).collect())?; + Ok(()) + } +} + +impl AddressInfo { + /// Only essential info about an address + pub fn compact(&self) -> CompactAddressInfo { + CompactAddressInfo { + address: self.address, + thread: self.thread, + active_rolls: self + .cycle_infos + .last() + .and_then(|c| c.active_rolls) + .unwrap_or_default(), + final_rolls: self.final_roll_count, + candidate_rolls: self.candidate_roll_count, + final_balance: self.final_balance, + candidate_balance: self.candidate_balance, + } + } +} + +/// Less information about an address +#[derive(Debug, Serialize, Deserialize)] +pub struct CompactAddressInfo { + /// the address + pub address: Address, + /// the thread it is + pub thread: u8, + /// candidate rolls + pub candidate_rolls: u64, + /// final rolls + pub final_rolls: u64, + /// active rolls + pub active_rolls: u64, + /// final balance + pub final_balance: Amount, + /// candidate balance + pub candidate_balance: Amount, +} + +impl std::fmt::Display for CompactAddressInfo { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + writeln!(f, "Address: {} (thread {}):", self.address, self.thread)?; + writeln!( + f, + "\tBalance: final={}, candidate={}", + self.final_balance, self.candidate_balance + )?; + writeln!( + f, + "\tRolls: active={}, final={}, candidate={}", + self.active_rolls, self.final_rolls, self.candidate_rolls + )?; + Ok(()) + } +} diff --git a/massa-api-exports/src/block.rs b/massa-api-exports/src/block.rs new file mode 100644 index 00000000000..f10bcbd9633 --- /dev/null +++ b/massa-api-exports/src/block.rs @@ -0,0 +1,90 @@ +// Copyright (c) 2022 MASSA LABS + +use massa_models::{address::Address, block::Block, block_id::BlockId, slot::Slot}; + +use serde::{Deserialize, Serialize}; + +use crate::display_if_true; + +/// refactor to delete +#[derive(Debug, Deserialize, Serialize)] +pub struct BlockInfo { + /// block id + pub id: BlockId, + /// optional block info content + pub content: Option, +} + +/// Block content +#[derive(Debug, Deserialize, Serialize)] +pub struct BlockInfoContent { + /// true if final + pub is_final: bool, + /// true if in the greatest clique (and not final) + pub is_in_blockclique: bool, + /// true if candidate (active any clique but not final) + pub is_candidate: bool, + /// true if discarded + pub is_discarded: bool, + /// block + pub block: Block, +} + +impl std::fmt::Display for BlockInfo { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + if let Some(content) = &self.content { + writeln!( + f, + "Block ID: {}{}{}{}{}", + self.id, + display_if_true(content.is_final, " (final)"), + display_if_true(content.is_candidate, " (candidate)"), + display_if_true(content.is_in_blockclique, " (blockclique)"), + display_if_true(content.is_discarded, " (discarded)"), + )?; + writeln!(f, "Block: {}", content.block)?; + } else { + writeln!(f, "Block {} not found", self.id)?; + } + Ok(()) + } +} + +/// A block resume (without the block itself) +#[derive(Debug, Deserialize, Serialize)] +pub struct BlockSummary { + /// id + pub id: BlockId, + /// true if in a final block + pub is_final: bool, + /// true if incompatible with a final block + pub is_stale: bool, + /// true if in the greatest block clique + pub is_in_blockclique: bool, + /// the slot the block is in + pub slot: Slot, + /// the block creator + pub creator: Address, + /// the block parents + pub parents: Vec, +} + +impl std::fmt::Display for BlockSummary { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + writeln!( + f, + "Block's ID: {}{}{}{}", + self.id, + display_if_true(self.is_final, "final"), + display_if_true(self.is_stale, "stale"), + display_if_true(self.is_in_blockclique, "in blockclique"), + )?; + writeln!(f, "Slot: {}", self.slot)?; + writeln!(f, "Creator: {}", self.creator)?; + writeln!(f, "Parents' IDs:")?; + for parent in &self.parents { + writeln!(f, "\t- {}", parent)?; + } + Ok(()) + } +} diff --git a/massa-api/src/config.rs b/massa-api-exports/src/config.rs similarity index 100% rename from massa-api/src/config.rs rename to massa-api-exports/src/config.rs diff --git a/massa-api-exports/src/datastore.rs b/massa-api-exports/src/datastore.rs new file mode 100644 index 00000000000..83cfb1a8f6f --- /dev/null +++ b/massa-api-exports/src/datastore.rs @@ -0,0 +1,30 @@ +// Copyright (c) 2022 MASSA LABS + +use massa_models::address::Address; +use serde::{Deserialize, Serialize}; + +/// Datastore entry query input structure +#[derive(Debug, Deserialize, Clone, Serialize)] +pub struct DatastoreEntryInput { + /// associated address of the entry + pub address: Address, + /// datastore key + pub key: Vec, +} + +/// Datastore entry query output structure +#[derive(Debug, Deserialize, Clone, Serialize)] +pub struct DatastoreEntryOutput { + /// final datastore entry value + pub final_value: Option>, + /// candidate datastore entry value + pub candidate_value: Option>, +} + +impl std::fmt::Display for DatastoreEntryOutput { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + writeln!(f, "final value: {:?}", self.final_value)?; + writeln!(f, "candidate value: {:?}", self.candidate_value)?; + Ok(()) + } +} diff --git a/massa-api-exports/src/endorsement.rs b/massa-api-exports/src/endorsement.rs new file mode 100644 index 00000000000..1269dadc435 --- /dev/null +++ b/massa-api-exports/src/endorsement.rs @@ -0,0 +1,43 @@ +// Copyright (c) 2022 MASSA LABS + +use massa_models::{ + block_id::BlockId, + endorsement::{EndorsementId, SecureShareEndorsement}, +}; +use serde::{Deserialize, Serialize}; + +use crate::display_if_true; + +/// All you wanna know about an endorsement +#[derive(Debug, Deserialize, Serialize, Clone)] +pub struct EndorsementInfo { + /// id + pub id: EndorsementId, + /// true if endorsement is still in pool + pub in_pool: bool, + /// the endorsement appears in `in_blocks` + /// if it appears in multiple blocks, these blocks are in different cliques + pub in_blocks: Vec, + /// true if the endorsement is final (for example in a final block) + pub is_final: bool, + /// the endorsement itself + pub endorsement: SecureShareEndorsement, +} + +impl std::fmt::Display for EndorsementInfo { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + writeln!( + f, + "Endorsement {}{}{}", + self.id, + display_if_true(self.in_pool, " (in pool)"), + display_if_true(self.is_final, " (final)") + )?; + writeln!(f, "In blocks:")?; + for block_id in &self.in_blocks { + writeln!(f, "\t- {}", block_id)?; + } + writeln!(f, "{}", self.endorsement)?; + Ok(()) + } +} diff --git a/massa-api/src/error.rs b/massa-api-exports/src/error.rs similarity index 95% rename from massa-api/src/error.rs rename to massa-api-exports/src/error.rs index 2ce7d9efd54..f1e6330b6c1 100644 --- a/massa-api/src/error.rs +++ b/massa-api-exports/src/error.rs @@ -1,10 +1,8 @@ // Copyright (c) 2022 MASSA LABS use displaydoc::Display; -use jsonrpsee::{ - core::Error as JsonRpseeError, - types::error::{CallError, ErrorObject}, -}; +use jsonrpsee_core::Error as JsonRpseeError; +use jsonrpsee_types::{error::CallError, ErrorObject}; use massa_consensus_exports::error::ConsensusError; use massa_execution_exports::ExecutionError; @@ -15,6 +13,7 @@ use massa_protocol_exports::ProtocolError; use massa_time::TimeError; use massa_wallet::WalletError; +/// Errors of the api component. #[non_exhaustive] #[derive(Display, thiserror::Error, Debug)] pub enum ApiError { diff --git a/massa-api-exports/src/execution.rs b/massa-api-exports/src/execution.rs new file mode 100644 index 00000000000..826f1bba31e --- /dev/null +++ b/massa-api-exports/src/execution.rs @@ -0,0 +1,87 @@ +// Copyright (c) 2022 MASSA LABS + +use massa_final_state::StateChanges; +use massa_models::{address::Address, output_event::SCOutputEvent, slot::Slot}; +use serde::{Deserialize, Serialize}; +use std::{collections::VecDeque, fmt::Display}; + +/// The result of the read-only execution. +#[derive(Clone, Debug, Deserialize, Serialize)] +pub enum ReadOnlyResult { + /// An error occurred during execution. + Error(String), + /// The result of a successful execution. + Ok(Vec), +} + +/// The response to a request for a read-only execution. +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct ExecuteReadOnlyResponse { + /// The slot at which the read-only execution occurred. + pub executed_at: Slot, + /// The result of the read-only execution. + pub result: ReadOnlyResult, + /// The output events generated by the read-only execution. + pub output_events: VecDeque, + /// The gas cost for the execution + pub gas_cost: u64, + /// state changes caused by the execution step + pub state_changes: StateChanges, +} + +impl Display for ExecuteReadOnlyResponse { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + writeln!(f, "Executed at slot: {}", self.executed_at)?; + writeln!( + f, + "Result: {}", + match &self.result { + ReadOnlyResult::Error(e) => + format!("an error occurred during the execution: {}", e), + ReadOnlyResult::Ok(ret) => format!("success, returned value: {:?}", ret), + } + )?; + writeln!(f, "Gas cost: {}", self.gas_cost)?; + if !self.output_events.is_empty() { + writeln!(f, "Generated events:",)?; + for event in self.output_events.iter() { + writeln!(f, "{}", event)?; // id already displayed in event + } + } + Ok(()) + } +} + +/// read only bytecode execution request +#[derive(Debug, Deserialize, Clone, Serialize)] +pub struct ReadOnlyBytecodeExecution { + /// max available gas + pub max_gas: u64, + /// byte code + pub bytecode: Vec, + /// caller's address, optional + pub address: Option
, + /// Operation datastore, optional + pub operation_datastore: Option>, + /// whether to start execution from final or active state. Default false + #[serde(default)] + pub is_final: bool, +} + +/// read SC call request +#[derive(Debug, Deserialize, Clone, Serialize)] +pub struct ReadOnlyCall { + /// max available gas + pub max_gas: u64, + /// target address + pub target_address: Address, + /// target function + pub target_function: String, + /// function parameter + pub parameter: Vec, + /// caller's address, optional + pub caller_address: Option
, + /// whether to start execution from final or active state. Default false + #[serde(default)] + pub is_final: bool, +} diff --git a/massa-api-exports/src/ledger.rs b/massa-api-exports/src/ledger.rs new file mode 100644 index 00000000000..b9e83094976 --- /dev/null +++ b/massa-api-exports/src/ledger.rs @@ -0,0 +1,30 @@ +// Copyright (c) 2022 MASSA LABS + +use massa_models::amount::Amount; +use massa_models::ledger::LedgerData; + +use serde::{Deserialize, Serialize}; + +/// Current balance ledger info +#[derive(Debug, Deserialize, Serialize, Clone, Copy)] +pub struct LedgerInfo { + /// final data + pub final_ledger_info: LedgerData, + /// latest data + pub candidate_ledger_info: LedgerData, + /// locked balance, for example balance due to a roll sell + pub locked_balance: Amount, +} + +impl std::fmt::Display for LedgerInfo { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + writeln!(f, "\tFinal balance: {}", self.final_ledger_info.balance)?; + writeln!( + f, + "\tCandidate balance: {}", + self.candidate_ledger_info.balance + )?; + writeln!(f, "\tLocked balance: {}", self.locked_balance)?; + Ok(()) + } +} diff --git a/massa-api-exports/src/lib.rs b/massa-api-exports/src/lib.rs new file mode 100644 index 00000000000..6b9d5de2344 --- /dev/null +++ b/massa-api-exports/src/lib.rs @@ -0,0 +1,82 @@ +//! Copyright (c) 2022 MASSA LABS +//! All the structures that are used everywhere +//! +#![warn(missing_docs)] +#![warn(unused_crate_dependencies)] +#![feature(bound_map)] +#![feature(int_roundings)] +#![feature(iter_intersperse)] + +use massa_time::MassaTime; +use serde::{Deserialize, Serialize}; + +/// address related structures +pub mod address; +/// block-related structures +pub mod block; +/// node configuration +pub mod config; +/// datastore serialization / deserialization +pub mod datastore; +/// endorsements +pub mod endorsement; +/// models error +pub mod error; +/// execution +pub mod execution; +/// ledger structures +pub mod ledger; +/// node related structure +pub mod node; +/// operations +pub mod operation; +/// page +pub mod page; +/// rolls +pub mod rolls; +/// slots +pub mod slot; + +/// Dumb utils function to display nicely boolean value +fn display_if_true(value: bool, text: &str) -> String { + if value { + format!("[{}]", text) + } else { + String::from("") + } +} + +/// Just a wrapper with a optional beginning and end +#[derive(Debug, Deserialize, Clone, Copy, Serialize)] +pub struct TimeInterval { + /// optional start slot + pub start: Option, + /// optional end slot + pub end: Option, +} + +/// SCRUD operations +#[derive(strum::Display)] +#[strum(serialize_all = "snake_case")] +pub enum ScrudOperation { + /// search operation + Search, + /// create operation + Create, + /// read operation + Read, + /// update operation + Update, + /// delete operation + Delete, +} + +/// Bootsrap lists types +#[derive(strum::Display)] +#[strum(serialize_all = "snake_case")] +pub enum ListType { + /// contains banned entry + Blacklist, + /// contains allowed entry + Whitelist, +} diff --git a/massa-api-exports/src/node.rs b/massa-api-exports/src/node.rs new file mode 100644 index 00000000000..7f8069e291a --- /dev/null +++ b/massa-api-exports/src/node.rs @@ -0,0 +1,91 @@ +// Copyright (c) 2022 MASSA LABS + +use massa_models::node::NodeId; +use massa_models::stats::{ConsensusStats, ExecutionStats, NetworkStats}; +use massa_models::{config::CompactConfig, slot::Slot, version::Version}; +use massa_time::MassaTime; +use serde::{Deserialize, Serialize}; +use std::collections::BTreeMap; +use std::net::IpAddr; + +/// node status +#[derive(Debug, Deserialize, Serialize)] +pub struct NodeStatus { + /// our node id + pub node_id: NodeId, + /// optional node ip + pub node_ip: Option, + /// node version + pub version: Version, + /// now + pub current_time: MassaTime, + /// current cycle + pub current_cycle: u64, + /// current cycle starting timestamp + pub current_cycle_time: MassaTime, + /// next cycle starting timestamp + pub next_cycle_time: MassaTime, + /// connected nodes (node id, ip address, true if the connection is outgoing, false if incoming) + pub connected_nodes: BTreeMap, + /// latest slot, none if now is before genesis timestamp + pub last_slot: Option, + /// next slot + pub next_slot: Slot, + /// consensus stats + pub consensus_stats: ConsensusStats, + /// pool stats (operation count and endorsement count) + pub pool_stats: (usize, usize), + /// network stats + pub network_stats: NetworkStats, + /// execution stats + pub execution_stats: ExecutionStats, + /// compact configuration + pub config: CompactConfig, +} + +impl std::fmt::Display for NodeStatus { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + writeln!(f, "Node's ID: {}", self.node_id)?; + if self.node_ip.is_some() { + writeln!(f, "Node's IP: {}", self.node_ip.unwrap())?; + } else { + writeln!(f, "No routable IP set")?; + } + writeln!(f)?; + + writeln!(f, "Version: {}", self.version)?; + writeln!(f, "Config:\n{}", self.config)?; + writeln!(f)?; + + writeln!(f, "Current time: {}", self.current_time.to_utc_string())?; + writeln!(f, "Current cycle: {}", self.current_cycle)?; + if self.last_slot.is_some() { + writeln!(f, "Last slot: {}", self.last_slot.unwrap())?; + } + writeln!(f, "Next slot: {}", self.next_slot)?; + writeln!(f)?; + + writeln!(f, "{}", self.consensus_stats)?; + + writeln!(f, "Pool stats:")?; + writeln!(f, "\tOperations count: {}", self.pool_stats.0)?; + writeln!(f, "\tEndorsements count: {}", self.pool_stats.1)?; + writeln!(f)?; + + writeln!(f, "{}", self.network_stats)?; + + writeln!(f, "{}", self.execution_stats)?; + + writeln!(f, "Connected nodes:")?; + for (node_id, (ip_addr, is_outgoing)) in &self.connected_nodes { + writeln!( + f, + "Node's ID: {} / IP address: {} / {} connection", + node_id, + ip_addr, + if *is_outgoing { "Out" } else { "In" } + )? + } + Ok(()) + } +} diff --git a/massa-api-exports/src/operation.rs b/massa-api-exports/src/operation.rs new file mode 100644 index 00000000000..1b62d8f5c15 --- /dev/null +++ b/massa-api-exports/src/operation.rs @@ -0,0 +1,101 @@ +// Copyright (c) 2022 MASSA LABS + +use massa_models::{ + block_id::BlockId, + operation::{OperationId, SecureShareOperation}, +}; + +use massa_signature::{PublicKey, Signature}; +use serde::{Deserialize, Serialize}; + +use crate::display_if_true; + +/// operation input +#[derive(Serialize, Deserialize, Debug)] +pub struct OperationInput { + /// The public key of the creator of the TX + pub creator_public_key: PublicKey, + /// The signature of the operation + pub signature: Signature, + /// The serialized version of the content `base58` encoded + pub serialized_content: Vec, +} + +/// Operation and contextual info about it +#[derive(Debug, Deserialize, Serialize)] +pub struct OperationInfo { + /// id + pub id: OperationId, + /// true if operation is still in pool + pub in_pool: bool, + /// the operation appears in `in_blocks` + /// if it appears in multiple blocks, these blocks are in different cliques + pub in_blocks: Vec, + /// true if the operation is final (for example in a final block) + pub is_final: bool, + /// Thread in which the operation can be included + pub thread: u8, + /// the operation itself + pub operation: SecureShareOperation, +} + +impl std::fmt::Display for OperationInfo { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + writeln!( + f, + "Operation {}{}{}", + self.id, + display_if_true(self.in_pool, " (in pool)"), + display_if_true(self.is_final, " (final)") + )?; + writeln!(f, "In blocks:")?; + for block_id in &self.in_blocks { + writeln!(f, "\t- {}", block_id)?; + } + writeln!(f, "{}", self.operation)?; + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use jsonrpsee_core::__reexports::serde_json::{self, Value}; + use massa_models::operation::OperationType; + use serial_test::serial; + use std::collections::BTreeMap; + + #[test] + #[serial] + fn test_execute_sc_with_datastore() { + let given_op = OperationType::ExecuteSC { + max_gas: 123, + data: vec![23u8, 123u8, 44u8], + datastore: BTreeMap::from([ + (vec![1, 2, 3], vec![4, 5, 6, 7, 8, 9]), + (vec![22, 33, 44, 55, 66, 77], vec![11]), + (vec![2, 3, 4, 5, 6, 7], vec![1]), + ]), + }; + + let op_json_str = serde_json::to_string(&given_op).unwrap(); + + let op_json_value: Value = serde_json::from_str(&op_json_str).unwrap(); + let datastore = op_json_value["ExecuteSC"] + .as_object() + .unwrap() + .get("datastore") + .unwrap() + .as_array() + .unwrap(); + assert_eq!(datastore.len(), 3); + let first_entry = datastore[0].as_array().unwrap(); + assert_eq!(first_entry.len(), 2); + let first_key = first_entry[0].as_array().unwrap(); + let first_value = first_entry[1].as_array().unwrap(); + assert_eq!(first_key.len(), 3); + assert_eq!(first_value.len(), 6); + + let expected_op = serde_json::from_str(&op_json_str).unwrap(); + assert_eq!(given_op, expected_op); + } +} diff --git a/massa-api-exports/src/page.rs b/massa-api-exports/src/page.rs new file mode 100644 index 00000000000..d0ad5fa0295 --- /dev/null +++ b/massa-api-exports/src/page.rs @@ -0,0 +1,52 @@ +// Copyright (c) 2022 MASSA LABS + +use paginate::Pages; +use serde::{Deserialize, Serialize, Serializer}; + +/// Represents a Vec that can be split across Pages +/// Cf. https://docs.rs/paginate/latest/paginate/ +pub struct PagedVec { + res: Vec, + _total_count: usize, +} + +impl PagedVec { + /// Creates a new Paged Vec with optional limits of item per page and offset + pub fn new(elements: Vec, page_request: Option) -> Self { + let total_count = elements.len(); + + let (limit, offset) = match page_request { + Some(PageRequest { limit, offset }) => (limit, offset), + None => (total_count, 0), + }; + + let pages = Pages::new(total_count, limit); + let page = pages.with_offset(offset); + + let res: Vec<_> = elements + .into_iter() + .skip(page.start) + .take(page.length) + .collect(); + + PagedVec { + res, + _total_count: total_count, + } + } +} + +impl Serialize for PagedVec { + fn serialize(&self, s: S) -> Result { + self.res.serialize::(s) + } +} + +/// Represents the request inputs for a PagedVec +#[derive(Deserialize)] +pub struct PageRequest { + /// The limit of elements in a page + pub limit: usize, + /// The page offset + pub offset: usize, +} diff --git a/massa-api-exports/src/rolls.rs b/massa-api-exports/src/rolls.rs new file mode 100644 index 00000000000..2802ee8451e --- /dev/null +++ b/massa-api-exports/src/rolls.rs @@ -0,0 +1,23 @@ +// Copyright (c) 2022 MASSA LABS + +use serde::{Deserialize, Serialize}; + +/// Roll counts +#[derive(Debug, Deserialize, Serialize, Clone, Copy)] +pub struct RollsInfo { + /// count taken into account for the current cycle + pub active_rolls: u64, + /// at final blocks + pub final_rolls: u64, + /// at latest blocks + pub candidate_rolls: u64, +} + +impl std::fmt::Display for RollsInfo { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + writeln!(f, "\tActive rolls: {}", self.active_rolls)?; + writeln!(f, "\tFinal rolls: {}", self.final_rolls)?; + writeln!(f, "\tCandidate rolls: {}", self.candidate_rolls)?; + Ok(()) + } +} diff --git a/massa-api-exports/src/slot.rs b/massa-api-exports/src/slot.rs new file mode 100644 index 00000000000..a62f505ae5f --- /dev/null +++ b/massa-api-exports/src/slot.rs @@ -0,0 +1,14 @@ +// Copyright (c) 2022 MASSA LABS + +use massa_models::{amount::Amount, slot::Slot}; + +use serde::{Deserialize, Serialize}; + +/// slot / amount pair +#[derive(Debug, Deserialize, Serialize)] +pub struct SlotAmount { + /// slot + pub slot: Slot, + /// amount + pub amount: Amount, +} diff --git a/massa-api/Cargo.toml b/massa-api/Cargo.toml index 0a5190689fc..cecf9f59514 100644 --- a/massa-api/Cargo.toml +++ b/massa-api/Cargo.toml @@ -6,7 +6,6 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -displaydoc = "0.2" jsonrpsee = { version = "0.16.2", features = ["server", "macros"] } async-trait = "0.1.58" serde = { version = "1.0", features = ["derive"] } @@ -14,15 +13,14 @@ serde_json = "1.0.87" tower-http = { version = "0.3.4", features = ["full"] } tower = { version = "0.4.13", features = ["full"] } hyper = "0.14.20" -thiserror = "1.0" -tokio = { version = "1.21", features = ["full"] } +tokio = { version = "1.23", features = ["full"] } tokio-stream = { version = "0.1", features = ["sync"] } tracing = "0.1" itertools = "0.10" parking_lot = { version = "0.12", features = ["deadlock_detection"] } # custom modules massa_consensus_exports = { path = "../massa-consensus-exports" } -massa_hash = { path = "../massa-hash" } +massa_api_exports = { path = "../massa-api-exports" } massa_models = { path = "../massa-models" } massa_network_exports = { path = "../massa-network-exports" } massa_pool_exports = { path = "../massa-pool-exports" } diff --git a/massa-api/src/api.rs b/massa-api/src/api.rs index 5e9f2ea4d19..35b1458763f 100644 --- a/massa-api/src/api.rs +++ b/massa-api/src/api.rs @@ -3,15 +3,16 @@ use std::net::SocketAddr; use crate::api_trait::MassaApiServer; -use crate::{APIConfig, ApiServer, ApiV2, StopHandle, API}; +use crate::{ApiServer, ApiV2, StopHandle, API}; use async_trait::async_trait; use jsonrpsee::core::error::SubscriptionClosed; use jsonrpsee::core::{Error as JsonRpseeError, RpcResult}; use jsonrpsee::types::SubscriptionResult; use jsonrpsee::SubscriptionSink; +use massa_api_exports::config::APIConfig; use massa_consensus_exports::ConsensusChannels; use massa_models::version::Version; -use massa_protocol_exports::ProtocolSenders; +use massa_pool_exports::PoolChannels; use serde::Serialize; use tokio_stream::wrappers::BroadcastStream; @@ -19,13 +20,13 @@ impl API { /// generate a new massa API pub fn new( consensus_channels: ConsensusChannels, - protocol_senders: ProtocolSenders, + pool_channels: PoolChannels, api_settings: APIConfig, version: Version, ) -> Self { API(ApiV2 { consensus_channels, - protocol_senders, + pool_channels, api_settings, version, }) @@ -66,7 +67,7 @@ impl MassaApiServer for API { } fn subscribe_new_operations(&self, sink: SubscriptionSink) -> SubscriptionResult { - broadcast_via_ws(self.0.protocol_senders.operation_sender.clone(), sink); + broadcast_via_ws(self.0.pool_channels.operation_sender.clone(), sink); Ok(()) } } diff --git a/massa-api/src/api_trait.rs b/massa-api/src/api_trait.rs index baf6f90e05b..da233c8c0ca 100644 --- a/massa-api/src/api_trait.rs +++ b/massa-api/src/api_trait.rs @@ -27,7 +27,7 @@ pub trait MassaApi { )] fn subscribe_new_blocks_headers(&self); - /// New produced block with operations content. + /// New produced blocks with operations content. #[subscription( name = "subscribe_new_filled_blocks" => "new_filled_blocks", unsubscribe = "unsubscribe_new_filled_blocks", diff --git a/massa-api/src/lib.rs b/massa-api/src/lib.rs index b6b72e5333e..798e255a4ab 100644 --- a/massa-api/src/lib.rs +++ b/massa-api/src/lib.rs @@ -3,56 +3,55 @@ #![feature(async_closure)] #![warn(missing_docs)] #![warn(unused_crate_dependencies)] -use crate::api_trait::MassaApiServer; -use crate::error::ApiError::WrongAPI; +use api_trait::MassaApiServer; use hyper::Method; use jsonrpsee::core::{Error as JsonRpseeError, RpcResult}; use jsonrpsee::proc_macros::rpc; use jsonrpsee::server::{AllowHosts, ServerBuilder, ServerHandle}; use jsonrpsee::RpcModule; +use massa_api_exports::{ + address::AddressInfo, + block::{BlockInfo, BlockSummary}, + config::APIConfig, + datastore::{DatastoreEntryInput, DatastoreEntryOutput}, + endorsement::EndorsementInfo, + error::ApiError::WrongAPI, + execution::{ExecuteReadOnlyResponse, ReadOnlyBytecodeExecution, ReadOnlyCall}, + node::NodeStatus, + operation::{OperationInfo, OperationInput}, + page::{PageRequest, PagedVec}, + TimeInterval, +}; use massa_consensus_exports::{ConsensusChannels, ConsensusController}; use massa_execution_exports::ExecutionController; -use massa_models::api::{ - AddressInfo, BlockInfo, BlockSummary, DatastoreEntryInput, DatastoreEntryOutput, - EndorsementInfo, EventFilter, NodeStatus, OperationInfo, OperationInput, - ReadOnlyBytecodeExecution, ReadOnlyCall, TimeInterval, -}; use massa_models::clique::Clique; use massa_models::composite::PubkeySig; -use massa_models::execution::ExecuteReadOnlyResponse; use massa_models::node::NodeId; use massa_models::operation::OperationId; use massa_models::output_event::SCOutputEvent; use massa_models::prehash::PreHashSet; use massa_models::{ - address::Address, - block::{Block, BlockId}, - endorsement::EndorsementId, - slot::Slot, - version::Version, + address::Address, block::Block, block_id::BlockId, endorsement::EndorsementId, + execution::EventFilter, slot::Slot, version::Version, }; use massa_network_exports::{NetworkCommandSender, NetworkConfig}; -use massa_pool_exports::PoolController; +use massa_pool_exports::{PoolChannels, PoolController}; use massa_pos_exports::SelectorController; -use massa_protocol_exports::{ProtocolCommandSender, ProtocolSenders}; +use massa_protocol_exports::ProtocolCommandSender; use massa_storage::Storage; use massa_wallet::Wallet; use parking_lot::RwLock; use serde_json::Value; use std::net::{IpAddr, SocketAddr}; use std::sync::Arc; -use tower_http::cors::{Any, CorsLayer}; - use tokio::sync::mpsc; +use tower_http::cors::{Any, CorsLayer}; use tracing::{info, warn}; mod api; mod api_trait; -mod config; -mod error; mod private; mod public; -pub use config::APIConfig; /// Public API component pub struct Public { @@ -98,8 +97,8 @@ pub struct Private { pub struct ApiV2 { /// link(channels) to the consensus component pub consensus_channels: ConsensusChannels, - /// link(channels) to the protocol component - pub protocol_senders: ProtocolSenders, + /// link(channels) to the pool component + pub pool_channels: PoolChannels, /// API settings pub api_settings: APIConfig, /// node version @@ -267,32 +266,32 @@ pub trait MassaRpc { #[method(name = "node_remove_from_peers_whitelist")] async fn node_remove_from_peers_whitelist(&self, arg: Vec) -> RpcResult<()>; - /// Returns node bootsrap whitelist IP address(es). + /// Returns node bootstrap whitelist IP address(es). #[method(name = "node_bootstrap_whitelist")] async fn node_bootstrap_whitelist(&self) -> RpcResult>; - /// Allow everyone to bootsrap from the node. - /// remove bootsrap whitelist configuration file. + /// Allow everyone to bootstrap from the node. + /// remove bootstrap whitelist configuration file. #[method(name = "node_bootstrap_whitelist_allow_all")] async fn node_bootstrap_whitelist_allow_all(&self) -> RpcResult<()>; - /// Add IP address(es) to node bootsrap whitelist. + /// Add IP address(es) to node bootstrap whitelist. #[method(name = "node_add_to_bootstrap_whitelist")] async fn node_add_to_bootstrap_whitelist(&self, arg: Vec) -> RpcResult<()>; - /// Remove IP address(es) to bootsrap whitelist. + /// Remove IP address(es) to bootstrap whitelist. #[method(name = "node_remove_from_bootstrap_whitelist")] async fn node_remove_from_bootstrap_whitelist(&self, arg: Vec) -> RpcResult<()>; - /// Returns node bootsrap blacklist IP address(es). + /// Returns node bootstrap blacklist IP address(es). #[method(name = "node_bootstrap_blacklist")] async fn node_bootstrap_blacklist(&self) -> RpcResult>; - /// Add IP address(es) to node bootsrap blacklist. + /// Add IP address(es) to node bootstrap blacklist. #[method(name = "node_add_to_bootstrap_blacklist")] async fn node_add_to_bootstrap_blacklist(&self, arg: Vec) -> RpcResult<()>; - /// Remove IP address(es) to bootsrap blacklist. + /// Remove IP address(es) to bootstrap blacklist. #[method(name = "node_remove_from_bootstrap_blacklist")] async fn node_remove_from_bootstrap_blacklist(&self, arg: Vec) -> RpcResult<()>; @@ -316,7 +315,10 @@ pub trait MassaRpc { /// Returns the active stakers and their active roll counts for the current cycle. #[method(name = "get_stakers")] - async fn get_stakers(&self) -> RpcResult>; + async fn get_stakers( + &self, + page_request: Option, + ) -> RpcResult>; /// Returns operation(s) information associated to a given list of operation(s) ID(s). #[method(name = "get_operations")] diff --git a/massa-api/src/private.rs b/massa-api/src/private.rs index c1048cac6a6..7eafb34d80b 100644 --- a/massa-api/src/private.rs +++ b/massa-api/src/private.rs @@ -1,30 +1,32 @@ //! Copyright (c) 2022 MASSA LABS -use crate::config::APIConfig; -use crate::error::ApiError; use crate::{MassaRpcServer, Private, RpcServer, StopHandle, Value, API}; use async_trait::async_trait; use itertools::Itertools; use jsonrpsee::core::{Error as JsonRpseeError, RpcResult}; -use massa_execution_exports::ExecutionController; -use massa_models::api::{ - AddressInfo, BlockInfo, BlockSummary, DatastoreEntryInput, DatastoreEntryOutput, - EndorsementInfo, EventFilter, ListType, NodeStatus, OperationInfo, OperationInput, - ReadOnlyBytecodeExecution, ReadOnlyCall, ScrudOperation, TimeInterval, +use massa_api_exports::{ + address::AddressInfo, + block::{BlockInfo, BlockSummary}, + config::APIConfig, + datastore::{DatastoreEntryInput, DatastoreEntryOutput}, + endorsement::EndorsementInfo, + error::ApiError, + execution::{ExecuteReadOnlyResponse, ReadOnlyBytecodeExecution, ReadOnlyCall}, + node::NodeStatus, + operation::{OperationInfo, OperationInput}, + page::{PageRequest, PagedVec}, + ListType, ScrudOperation, TimeInterval, }; +use massa_execution_exports::ExecutionController; use massa_models::clique::Clique; use massa_models::composite::PubkeySig; -use massa_models::execution::ExecuteReadOnlyResponse; use massa_models::node::NodeId; use massa_models::output_event::SCOutputEvent; use massa_models::prehash::PreHashSet; use massa_models::{ - address::Address, - block::{Block, BlockId}, - endorsement::EndorsementId, - operation::OperationId, - slot::Slot, + address::Address, block::Block, block_id::BlockId, endorsement::EndorsementId, + execution::EventFilter, operation::OperationId, slot::Slot, }; use massa_network_exports::NetworkCommandSender; use massa_signature::KeyPair; @@ -173,8 +175,8 @@ impl MassaRpcServer for API { crate::wrong_api::>() } - async fn get_stakers(&self) -> RpcResult> { - crate::wrong_api::>() + async fn get_stakers(&self, _: Option) -> RpcResult> { + crate::wrong_api::>() } async fn get_operations(&self, _: Vec) -> RpcResult> { @@ -250,7 +252,7 @@ impl MassaRpcServer for API { async fn node_bootstrap_whitelist_allow_all(&self) -> RpcResult<()> { remove_file(self.0.api_settings.bootstrap_whitelist_path.clone()).map_err(|e| { ApiError::InternalServerError(format!( - "failed to delete bootsrap whitelist configuration file: {}", + "failed to delete bootstrap whitelist configuration file: {}", e )) .into() @@ -305,7 +307,7 @@ impl MassaRpcServer for API { } } -/// Run Search, Create, Read, Update, Delete operation on bootsrap list of IP(s) +/// Run Search, Create, Read, Update, Delete operation on bootstrap list of IP(s) fn run_scrud_operation( bootstrap_list_file: PathBuf, ips: Vec, @@ -330,7 +332,7 @@ fn run_scrud_operation( .and_then(|length| { if length == 0 { Err(ApiError::InternalServerError(format!( - "failed, bootsrap {} configuration file is empty", + "failed, bootstrap {} configuration file is empty", list_type )) .into()) @@ -340,7 +342,7 @@ fn run_scrud_operation( .and_then(|mut list_ips: BTreeSet| { if list_ips.is_empty() { return Err(ApiError::InternalServerError(format!( - "failed to execute delete operation, bootsrap {} is empty", + "failed to execute delete operation, bootstrap {} is empty", list_type )) .into()); @@ -373,7 +375,7 @@ fn get_file_len( .open(bootstrap_list_file) .map_err(|e| { ApiError::InternalServerError(format!( - "failed to read bootsrap {} configuration file: {}", + "failed to read bootstrap {} configuration file: {}", list_type, e )) .into() @@ -381,14 +383,14 @@ fn get_file_len( .and_then(|file| match file.metadata() { Ok(metadata) => Ok(metadata.len()), Err(e) => Err(ApiError::InternalServerError(format!( - "failed to read bootsrap {} configuration file metadata: {}", + "failed to read bootstrap {} configuration file metadata: {}", list_type, e )) .into()), }) } -/// Read bootsrap list IP(s) from json file +/// Read bootstrap list IP(s) from json file fn read_ips_from_jsonfile( bootstrap_list_file: PathBuf, list_type: &ListType, @@ -396,15 +398,15 @@ fn read_ips_from_jsonfile( std::fs::read_to_string(bootstrap_list_file) .map_err(|e| { ApiError::InternalServerError(format!( - "failed to read bootsrap {} configuration file: {}", + "failed to read bootstrap {} configuration file: {}", list_type, e )) .into() }) - .and_then(|bootsrap_list_str| { - serde_json::from_str(&bootsrap_list_str).map_err(|e| { + .and_then(|bootstrap_list_str| { + serde_json::from_str(&bootstrap_list_str).map_err(|e| { ApiError::InternalServerError(format!( - "failed to parse bootsrap {} configuration file: {}", + "failed to parse bootstrap {} configuration file: {}", list_type, e )) .into() @@ -412,7 +414,7 @@ fn read_ips_from_jsonfile( }) } -/// Write bootsrap list IP(s) from json file +/// Write bootstrap list IP(s) from json file fn write_ips_to_jsonfile( bootstrap_list_file: PathBuf, ips: BTreeSet, @@ -425,7 +427,7 @@ fn write_ips_to_jsonfile( .open(bootstrap_list_file) .map_err(|e| { ApiError::InternalServerError(format!( - "failed to create bootsrap {} configuration file: {}", + "failed to create bootstrap {} configuration file: {}", list_type, e )) .into() @@ -433,7 +435,7 @@ fn write_ips_to_jsonfile( .and_then(|file| { serde_json::to_writer_pretty(file, &ips).map_err(|e| { ApiError::InternalServerError(format!( - "failed to write bootsrap {} configuration file: {}", + "failed to write bootstrap {} configuration file: {}", list_type, e )) .into() diff --git a/massa-api/src/public.rs b/massa-api/src/public.rs index 58b82824d8d..fb50bf0c11d 100644 --- a/massa-api/src/public.rs +++ b/massa-api/src/public.rs @@ -1,25 +1,35 @@ //! Copyright (c) 2022 MASSA LABS #![allow(clippy::too_many_arguments)] -use crate::config::APIConfig; -use crate::error::ApiError; use crate::{MassaRpcServer, Public, RpcServer, StopHandle, Value, API}; use async_trait::async_trait; use jsonrpsee::core::{Error as JsonRpseeError, RpcResult}; +use massa_api_exports::{ + address::AddressInfo, + block::{BlockInfo, BlockInfoContent, BlockSummary}, + config::APIConfig, + datastore::{DatastoreEntryInput, DatastoreEntryOutput}, + endorsement::EndorsementInfo, + error::ApiError, + execution::{ExecuteReadOnlyResponse, ReadOnlyBytecodeExecution, ReadOnlyCall, ReadOnlyResult}, + node::NodeStatus, + operation::{OperationInfo, OperationInput}, + page::{PageRequest, PagedVec}, + slot::SlotAmount, + TimeInterval, +}; use massa_consensus_exports::block_status::DiscardReason; use massa_consensus_exports::ConsensusController; use massa_execution_exports::{ ExecutionController, ExecutionStackElement, ReadOnlyExecutionRequest, ReadOnlyExecutionTarget, }; -use massa_models::api::{ - BlockGraphStatus, DatastoreEntryInput, DatastoreEntryOutput, OperationInput, - ReadOnlyBytecodeExecution, ReadOnlyCall, SlotAmount, -}; -use massa_models::execution::ReadOnlyResult; use massa_models::operation::OperationDeserializer; -use massa_models::wrapped::WrappedDeserializer; +use massa_models::secure_share::SecureShareDeserializer; use massa_models::{ - block::Block, endorsement::WrappedEndorsement, error::ModelsError, operation::WrappedOperation, + block::{Block, BlockGraphStatus}, + endorsement::SecureShareEndorsement, + error::ModelsError, + operation::SecureShareOperation, timeslots, }; use massa_pos_exports::SelectorController; @@ -30,16 +40,12 @@ use itertools::{izip, Itertools}; use massa_models::datastore::DatastoreDeserializer; use massa_models::{ address::Address, - api::{ - AddressInfo, BlockInfo, BlockInfoContent, BlockSummary, EndorsementInfo, EventFilter, - NodeStatus, OperationInfo, TimeInterval, - }, - block::BlockId, + block_id::BlockId, clique::Clique, composite::PubkeySig, config::CompactConfig, endorsement::EndorsementId, - execution::ExecuteReadOnlyResponse, + execution::EventFilter, node::NodeId, operation::OperationId, output_event::SCOutputEvent, @@ -127,6 +133,7 @@ impl MassaRpcServer for API { address, bytecode, operation_datastore, + is_final, } in reqs { let address = address.unwrap_or_else(|| { @@ -170,6 +177,7 @@ impl MassaRpcServer for API { owned_addresses: vec![address], operation_datastore: op_datastore, }], + is_final, }; // run @@ -186,7 +194,9 @@ impl MassaRpcServer for API { ), gas_cost: result.as_ref().map_or_else(|_| 0, |v| v.gas_cost), output_events: result - .map_or_else(|_| Default::default(), |mut v| v.out.events.take()), + .as_ref() + .map_or_else(|_| Default::default(), |v| v.out.events.clone().0), + state_changes: result.map_or_else(|_| Default::default(), |v| v.out.state_changes), }; res.push(result); @@ -211,6 +221,7 @@ impl MassaRpcServer for API { target_function, parameter, caller_address, + is_final, } in reqs { let caller_address = caller_address.unwrap_or_else(|| { @@ -245,6 +256,7 @@ impl MassaRpcServer for API { operation_datastore: None, // should always be None }, ], + is_final, }; // run @@ -261,7 +273,9 @@ impl MassaRpcServer for API { ), gas_cost: result.as_ref().map_or_else(|_| 0, |v| v.gas_cost), output_events: result - .map_or_else(|_| Default::default(), |mut v| v.out.events.take()), + .as_ref() + .map_or_else(|_| Default::default(), |v| v.out.events.clone().0), + state_changes: result.map_or_else(|_| Default::default(), |v| v.out.state_changes), }; res.push(result); @@ -367,11 +381,44 @@ impl MassaRpcServer for API { }) .collect::>(); + let current_cycle = last_slot + .unwrap_or_else(|| Slot::new(0, 0)) + .get_cycle(api_settings.periods_per_cycle); + + let cycle_duration = match api_settings.t0.checked_mul(api_settings.periods_per_cycle) { + Ok(cycle_duration) => cycle_duration, + Err(e) => return Err(ApiError::TimeError(e).into()), + }; + + let current_cycle_time_result = if current_cycle == 0 { + Ok(api_settings.genesis_timestamp) + } else { + cycle_duration.checked_mul(current_cycle).and_then( + |elapsed_time_before_current_cycle| { + api_settings + .genesis_timestamp + .checked_add(elapsed_time_before_current_cycle) + }, + ) + }; + + let current_cycle_time = match current_cycle_time_result { + Ok(current_cycle_time) => current_cycle_time, + Err(e) => return Err(ApiError::TimeError(e).into()), + }; + + let next_cycle_time = match current_cycle_time.checked_add(cycle_duration) { + Ok(next_cycle_time) => next_cycle_time, + Err(e) => return Err(ApiError::TimeError(e).into()), + }; + Ok(NodeStatus { node_id, node_ip: network_config.routable_ip, version, current_time: now, + current_cycle_time, + next_cycle_time, connected_nodes, last_slot, next_slot, @@ -380,9 +427,7 @@ impl MassaRpcServer for API { network_stats, pool_stats, config, - current_cycle: last_slot - .unwrap_or_else(|| Slot::new(0, 0)) - .get_cycle(api_settings.periods_per_cycle), + current_cycle, }) } @@ -391,7 +436,10 @@ impl MassaRpcServer for API { Ok(consensus_controller.get_cliques()) } - async fn get_stakers(&self) -> RpcResult> { + async fn get_stakers( + &self, + page_request: Option, + ) -> RpcResult> { let execution_controller = self.0.execution_controller.clone(); let cfg = self.0.api_settings.clone(); @@ -418,14 +466,18 @@ impl MassaRpcServer for API { .get_cycle_active_rolls(curr_cycle) .into_iter() .collect::>(); + staker_vec .sort_by(|&(_, roll_counts_a), &(_, roll_counts_b)| roll_counts_b.cmp(&roll_counts_a)); - Ok(staker_vec) + + let paged_vec = PagedVec::new(staker_vec, page_request); + + Ok(paged_vec) } async fn get_operations(&self, ops: Vec) -> RpcResult> { // get the operations and the list of blocks that contain them from storage - let storage_info: Vec<(WrappedOperation, PreHashSet)> = { + let storage_info: Vec<(SecureShareOperation, PreHashSet)> = { let read_blocks = self.0.storage.read_blocks(); let read_ops = self.0.storage.read_operations(); ops.iter() @@ -490,9 +542,12 @@ impl MassaRpcServer for API { for (id, (operation, in_blocks), in_pool, is_final) in zipped_iterator { res.push(OperationInfo { id, - operation, in_pool, is_final, + thread: operation + .content_creator_address + .get_thread(api_cfg.thread_count), + operation, in_blocks: in_blocks.into_iter().collect(), }); } @@ -503,7 +558,7 @@ impl MassaRpcServer for API { async fn get_endorsements(&self, eds: Vec) -> RpcResult> { // get the endorsements and the list of blocks that contain them from storage - let storage_info: Vec<(WrappedEndorsement, PreHashSet)> = { + let storage_info: Vec<(SecureShareEndorsement, PreHashSet)> = { let read_blocks = self.0.storage.read_blocks(); let read_endos = self.0.storage.read_endorsements(); eds.iter() @@ -588,30 +643,33 @@ impl MassaRpcServer for API { let blocks = ids .into_iter() .filter_map(|id| { - if let Some(wrapped_block) = storage.read_blocks().get(&id).cloned() { - if let Some(graph_status) = consensus_controller - .get_block_statuses(&[id]) - .into_iter() - .next() - { - let is_final = graph_status == BlockGraphStatus::Final; - let is_in_blockclique = - graph_status == BlockGraphStatus::ActiveInBlockclique; - let is_candidate = graph_status == BlockGraphStatus::ActiveInBlockclique - || graph_status == BlockGraphStatus::ActiveInAlternativeCliques; - let is_discarded = graph_status == BlockGraphStatus::Discarded; - - return Some(BlockInfo { - id, - content: Some(BlockInfoContent { - is_final, - is_in_blockclique, - is_candidate, - is_discarded, - block: wrapped_block.content, - }), - }); - } + let content = if let Some(wrapped_block) = storage.read_blocks().get(&id) { + wrapped_block.content.clone() + } else { + return None; + }; + + if let Some(graph_status) = consensus_controller + .get_block_statuses(&[id]) + .into_iter() + .next() + { + let is_final = graph_status == BlockGraphStatus::Final; + let is_in_blockclique = graph_status == BlockGraphStatus::ActiveInBlockclique; + let is_candidate = graph_status == BlockGraphStatus::ActiveInBlockclique + || graph_status == BlockGraphStatus::ActiveInAlternativeCliques; + let is_discarded = graph_status == BlockGraphStatus::Discarded; + + return Some(BlockInfo { + id, + content: Some(BlockInfoContent { + is_final, + is_in_blockclique, + is_candidate, + is_discarded, + block: content, + }), + }); } None @@ -677,7 +735,7 @@ impl MassaRpcServer for API { is_stale: false, is_in_blockclique: blockclique.block_ids.contains(&id), slot: exported_block.header.content.slot, - creator: exported_block.header.creator_address, + creator: exported_block.header.content_creator_address, parents: exported_block.header.content.parents, }); } @@ -859,7 +917,7 @@ impl MassaRpcServer for API { if ops.len() as u64 > api_cfg.max_arguments { return Err(ApiError::BadRequest("too many arguments".into()).into()); } - let operation_deserializer = WrappedDeserializer::new(OperationDeserializer::new( + let operation_deserializer = SecureShareDeserializer::new(OperationDeserializer::new( api_cfg.max_datastore_value_length, api_cfg.max_function_name_length, api_cfg.max_parameter_size, @@ -874,7 +932,7 @@ impl MassaRpcServer for API { op_serialized.extend(op_input.signature.to_bytes()); op_serialized.extend(op_input.creator_public_key.to_bytes()); op_serialized.extend(op_input.serialized_content); - let (rest, op): (&[u8], WrappedOperation) = operation_deserializer + let (rest, op): (&[u8], SecureShareOperation) = operation_deserializer .deserialize::(&op_serialized) .map_err(|err| { ApiError::ModelsError(ModelsError::DeserializeError(err.to_string())) @@ -898,7 +956,7 @@ impl MassaRpcServer for API { } Err(e) => Err(e), }) - .collect::>>()?; + .collect::>>()?; to_send.store_operations(verified_ops.clone()); let ids: Vec = verified_ops.iter().map(|op| op.id).collect(); cmd_sender.add_operations(to_send.clone()); diff --git a/massa-async-pool/src/changes.rs b/massa-async-pool/src/changes.rs index 13073dafdc1..86d3efa5c5d 100644 --- a/massa-async-pool/src/changes.rs +++ b/massa-async-pool/src/changes.rs @@ -9,6 +9,7 @@ use nom::{ sequence::tuple, IResult, Parser, }; +use serde::{Deserialize, Serialize}; ///! Copyright (c) 2022 MASSA LABS @@ -19,7 +20,7 @@ use crate::{ }; /// Enum representing a value U with identifier T being added or deleted -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize)] pub enum Change { /// an item with identifier T and value U is added Add(T, U), @@ -39,7 +40,7 @@ enum ChangeId { } /// represents a list of additions and deletions to the asynchronous message pool -#[derive(Default, Debug, Clone, PartialEq, Eq)] +#[derive(Default, Debug, Clone, PartialEq, Eq, Deserialize, Serialize)] pub struct AsyncPoolChanges(pub Vec>); /// `AsyncPoolChanges` serializer diff --git a/massa-bootstrap/Cargo.toml b/massa-bootstrap/Cargo.toml index 4ebf71d2d0a..cb84c47e750 100644 --- a/massa-bootstrap/Cargo.toml +++ b/massa-bootstrap/Cargo.toml @@ -21,7 +21,7 @@ serde_json = "1.0" humantime = "2.1.0" thiserror = "1.0" parking_lot = { version = "0.12", features = ["deadlock_detection"] } -tokio = { version = "1.21", features = ["full"] } +tokio = { version = "1.23", features = ["full"] } tracing = "0.1" # custom modules diff --git a/massa-bootstrap/src/client.rs b/massa-bootstrap/src/client.rs index 1768b67b0d2..1f2533b3317 100644 --- a/massa-bootstrap/src/client.rs +++ b/massa-bootstrap/src/client.rs @@ -1,9 +1,9 @@ use humantime::format_duration; -use std::{net::SocketAddr, sync::Arc, time::Duration}; +use std::{collections::HashSet, net::SocketAddr, sync::Arc, time::Duration}; use massa_final_state::FinalState; use massa_logging::massa_trace; -use massa_models::{streaming_step::StreamingStep, version::Version}; +use massa_models::{node::NodeId, streaming_step::StreamingStep, version::Version}; use massa_signature::PublicKey; use massa_time::MassaTime; use parking_lot::RwLock; @@ -18,6 +18,7 @@ use crate::{ client_binder::BootstrapClientBinder, error::BootstrapError, messages::{BootstrapClientMessage, BootstrapServerMessage}, + settings::IpType, BootstrapConfig, Establisher, GlobalBootstrapState, }; @@ -407,6 +408,32 @@ async fn connect_to_server( )) } +fn filter_bootstrap_list( + bootstrap_list: Vec<(SocketAddr, NodeId)>, + ip_type: IpType, +) -> Vec<(SocketAddr, NodeId)> { + let ip_filter: fn(&(SocketAddr, NodeId)) -> bool = match ip_type { + IpType::IPv4 => |&(addr, _)| addr.is_ipv4(), + IpType::IPv6 => |&(addr, _)| addr.is_ipv6(), + IpType::Both => |_| true, + }; + + let prev_bootstrap_list_len = bootstrap_list.len(); + + let filtered_bootstrap_list: Vec<_> = bootstrap_list.into_iter().filter(ip_filter).collect(); + + let new_bootstrap_list_len = filtered_bootstrap_list.len(); + + debug!( + "Keeping {:?} bootstrap ips. Filtered out {} bootstrap addresses out of a total of {} bootstrap servers.", + ip_type, + prev_bootstrap_list_len as i32 - new_bootstrap_list_len as i32, + prev_bootstrap_list_len + ); + + filtered_bootstrap_list +} + /// Gets the state from a bootstrap server /// needs to be CANCELLABLE pub async fn get_state( @@ -437,15 +464,28 @@ pub async fn get_state( } return Ok(GlobalBootstrapState::new(final_state)); } + + // we filter the bootstrap list to keep only the ip addresses we are compatible with + let mut filtered_bootstrap_list = filter_bootstrap_list( + bootstrap_config.bootstrap_list.clone(), + bootstrap_config.bootstrap_protocol, + ); + // we are after genesis => bootstrap massa_trace!("bootstrap.lib.get_state.init_from_others", {}); - if bootstrap_config.bootstrap_list.is_empty() { + if filtered_bootstrap_list.is_empty() { return Err(BootstrapError::GeneralError( "no bootstrap nodes found in list".into(), )); } - let mut shuffled_list = bootstrap_config.bootstrap_list.clone(); - shuffled_list.shuffle(&mut StdRng::from_entropy()); + + // we shuffle the list + filtered_bootstrap_list.shuffle(&mut StdRng::from_entropy()); + + // we remove the duplicated node ids (if a bootstrap server appears both with its IPv4 and IPv6 address) + let mut unique_node_ids: HashSet = HashSet::new(); + filtered_bootstrap_list.retain(|e| unique_node_ids.insert(e.1)); + let mut next_bootstrap_message: BootstrapClientMessage = BootstrapClientMessage::AskBootstrapPart { last_slot: None, @@ -457,15 +497,23 @@ pub async fn get_state( last_consensus_step: StreamingStep::Started, }; let mut global_bootstrap_state = GlobalBootstrapState::new(final_state.clone()); + loop { - for (addr, pub_key) in shuffled_list.iter() { + for (addr, node_id) in filtered_bootstrap_list.iter() { if let Some(end) = end_timestamp { if MassaTime::now().expect("could not get now time") > end { panic!("This episode has come to an end, please get the latest testnet node version to continue"); } } info!("Start bootstrapping from {}", addr); - match connect_to_server(&mut establisher, bootstrap_config, addr, pub_key).await { + match connect_to_server( + &mut establisher, + bootstrap_config, + addr, + &node_id.get_public_key(), + ) + .await + { Ok(mut client) => { match bootstrap_from_server(bootstrap_config, &mut client, &mut next_bootstrap_message, &mut global_bootstrap_state,version) .await // cancellable diff --git a/massa-bootstrap/src/lib.rs b/massa-bootstrap/src/lib.rs index c9f3c934a4d..3f84a2e383f 100644 --- a/massa-bootstrap/src/lib.rs +++ b/massa-bootstrap/src/lib.rs @@ -37,6 +37,7 @@ pub use messages::{ }; pub use server::{start_bootstrap_server, BootstrapManager}; pub use settings::BootstrapConfig; +pub use settings::IpType; #[cfg(test)] pub mod tests; diff --git a/massa-bootstrap/src/messages.rs b/massa-bootstrap/src/messages.rs index 44800e8917c..24c1a3b5cc8 100644 --- a/massa-bootstrap/src/messages.rs +++ b/massa-bootstrap/src/messages.rs @@ -10,7 +10,7 @@ use massa_consensus_exports::bootstrapable_graph::{ use massa_executed_ops::{ExecutedOpsDeserializer, ExecutedOpsSerializer}; use massa_final_state::{StateChanges, StateChangesDeserializer, StateChangesSerializer}; use massa_ledger_exports::{KeyDeserializer, KeySerializer}; -use massa_models::block::{BlockId, BlockIdDeserializer, BlockIdSerializer}; +use massa_models::block_id::{BlockId, BlockIdDeserializer, BlockIdSerializer}; use massa_models::operation::OperationId; use massa_models::prehash::PreHashSet; use massa_models::serialization::{ diff --git a/massa-bootstrap/src/server.rs b/massa-bootstrap/src/server.rs index 7fa3160738b..21c2f5c3b01 100644 --- a/massa-bootstrap/src/server.rs +++ b/massa-bootstrap/src/server.rs @@ -6,7 +6,7 @@ use massa_consensus_exports::{bootstrapable_graph::BootstrapableGraph, Consensus use massa_final_state::{FinalState, FinalStateError}; use massa_logging::massa_trace; use massa_models::{ - block::BlockId, prehash::PreHashSet, slot::Slot, streaming_step::StreamingStep, + block_id::BlockId, prehash::PreHashSet, slot::Slot, streaming_step::StreamingStep, version::Version, }; use massa_network_exports::NetworkCommandSender; @@ -16,6 +16,7 @@ use parking_lot::RwLock; use std::{ collections::{hash_map, HashMap, HashSet}, net::{IpAddr, SocketAddr}, + path::PathBuf, sync::Arc, time::{Duration, Instant}, }; @@ -66,42 +67,6 @@ pub async fn start_bootstrap_server( if let Some(bind) = bootstrap_config.bind { let (manager_tx, manager_rx) = mpsc::channel::<()>(1); - let whitelist = if let Ok(whitelist) = - std::fs::read_to_string(&bootstrap_config.bootstrap_whitelist_path) - { - Some( - serde_json::from_str::>(whitelist.as_str()) - .map_err(|_| { - BootstrapError::GeneralError(String::from( - "Failed to parse bootstrap whitelist", - )) - })? - .into_iter() - .map(normalize_ip) - .collect(), - ) - } else { - None - }; - - let blacklist = if let Ok(blacklist) = - std::fs::read_to_string(&bootstrap_config.bootstrap_blacklist_path) - { - Some( - serde_json::from_str::>(blacklist.as_str()) - .map_err(|_| { - BootstrapError::GeneralError(String::from( - "Failed to parse bootstrap blacklist", - )) - })? - .into_iter() - .map(normalize_ip) - .collect(), - ) - } else { - None - }; - let join_handle = tokio::spawn(async move { BootstrapServer { consensus_controller, @@ -112,8 +77,6 @@ pub async fn start_bootstrap_server( bind, keypair, version, - whitelist, - blacklist, ip_hist_map: HashMap::with_capacity(bootstrap_config.ip_list_max_size), bootstrap_config, } @@ -139,26 +102,62 @@ struct BootstrapServer { keypair: KeyPair, bootstrap_config: BootstrapConfig, version: Version, - blacklist: Option>, - whitelist: Option>, ip_hist_map: HashMap, } +#[allow(clippy::result_large_err)] +#[allow(clippy::type_complexity)] +fn reload_whitelist_blacklist( + whitelist_path: &PathBuf, + blacklist_path: &PathBuf, +) -> Result<(Option>, Option>), BootstrapError> { + let whitelist = if let Ok(whitelist) = std::fs::read_to_string(whitelist_path) { + Some( + serde_json::from_str::>(whitelist.as_str()) + .map_err(|_| { + BootstrapError::GeneralError(String::from( + "Failed to parse bootstrap whitelist", + )) + })? + .into_iter() + .map(normalize_ip) + .collect(), + ) + } else { + None + }; + + let blacklist = if let Ok(blacklist) = std::fs::read_to_string(blacklist_path) { + Some( + serde_json::from_str::>(blacklist.as_str()) + .map_err(|_| { + BootstrapError::GeneralError(String::from( + "Failed to parse bootstrap blacklist", + )) + })? + .into_iter() + .map(normalize_ip) + .collect(), + ) + } else { + None + }; + Ok((whitelist, blacklist)) +} + impl BootstrapServer { pub async fn run(mut self) -> Result<(), BootstrapError> { debug!("starting bootstrap server"); massa_trace!("bootstrap.lib.run", {}); let mut listener = self.establisher.get_listener(self.bind).await?; let mut bootstrap_sessions = FuturesUnordered::new(); - // let cache_timeout = self.bootstrap_config.cache_duration.to_duration(); - // let mut bootstrap_data: Option<( - // BootstrapableGraph, - // BootstrapPeers, - // Arc>, - // )> = None; - // let cache_timer = sleep(cache_timeout); + let cache_timeout = self.bootstrap_config.cache_duration.to_duration(); + let (mut whitelist, mut blacklist) = reload_whitelist_blacklist( + &self.bootstrap_config.bootstrap_whitelist_path, + &self.bootstrap_config.bootstrap_blacklist_path, + )?; + let mut cache_interval = tokio::time::interval(cache_timeout); let per_ip_min_interval = self.bootstrap_config.per_ip_min_interval.to_duration(); - // tokio::pin!(cache_timer); /* select! without the "biased" modifier will randomly select the 1st branch to check, then will check the next ones in the order they are written. @@ -177,11 +176,10 @@ impl BootstrapServer { break }, - // cache cleanup timeout - // _ = &mut cache_timer, if bootstrap_data.is_some() => { - // massa_trace!("bootstrap.lib.run.cache_unload", {}); - // bootstrap_data = None; - // } + // Whitelist cache timeout + _ = cache_interval.tick() => { + (whitelist, blacklist) = reload_whitelist_blacklist(&self.bootstrap_config.bootstrap_whitelist_path, &self.bootstrap_config.bootstrap_blacklist_path)?; + } // bootstrap session finished Some(_) = bootstrap_sessions.next() => { @@ -189,7 +187,7 @@ impl BootstrapServer { } // listener - res_connection = listener.accept(&self.whitelist, &self.blacklist) => { + res_connection = listener.accept(&whitelist, &blacklist) => { let (dplx, remote_addr) = if res_connection.is_ok() { res_connection.unwrap() } else { @@ -260,16 +258,24 @@ impl BootstrapServer { bootstrap_sessions.push(async move { let mut server = BootstrapServerBinder::new(dplx, keypair, config.max_bytes_read_write, config.max_bootstrap_message_size, config.thread_count, config.max_datastore_key_length, config.randomness_size_bytes, config.consensus_bootstrap_part_size); - match manage_bootstrap(&config, &mut server, data_execution, version, consensus_command_sender, network_command_sender).await { - Ok(_) => { - info!("bootstrapped peer {}", remote_addr) - }, - Err(BootstrapError::ReceivedError(error)) => debug!("bootstrap serving error received from peer {}: {}", remote_addr, error), - Err(err) => { - debug!("bootstrap serving error for peer {}: {}", remote_addr, err); + debug!("awaiting on bootstrap of peer {}", remote_addr); + match tokio::time::timeout(config.bootstrap_timeout.into(), manage_bootstrap(&config, &mut server, data_execution, version, consensus_command_sender, network_command_sender)).await { + Ok(mgmt) => match mgmt { + Ok(_) => { + info!("bootstrapped peer {}", remote_addr) + }, + Err(BootstrapError::ReceivedError(error)) => debug!("bootstrap serving error received from peer {}: {}", remote_addr, error), + Err(err) => { + debug!("bootstrap serving error for peer {}: {}", remote_addr, err); + // We allow unused result because we don't care if an error is thrown when sending the error message to the server we will close the socket anyway. + let _ = tokio::time::timeout(config.write_error_timeout.into(), server.send(BootstrapServerMessage::BootstrapError { error: err.to_string() })).await; + }, + } + Err(_timeout) => { + debug!("bootstrap timeout for peer {}", remote_addr); // We allow unused result because we don't care if an error is thrown when sending the error message to the server we will close the socket anyway. - let _ = tokio::time::timeout(config.write_error_timeout.into(), server.send(BootstrapServerMessage::BootstrapError { error: err.to_string() })).await; - }, + let _ = tokio::time::timeout(config.write_error_timeout.into(), server.send(BootstrapServerMessage::BootstrapError { error: format!("Bootstrap process timedout ({})", format_duration(config.bootstrap_timeout.to_duration())) })).await; + } } }); diff --git a/massa-bootstrap/src/settings.rs b/massa-bootstrap/src/settings.rs index 5024b5697d3..afba6c982e5 100644 --- a/massa-bootstrap/src/settings.rs +++ b/massa-bootstrap/src/settings.rs @@ -1,15 +1,28 @@ // Copyright (c) 2022 MASSA LABS -use massa_signature::PublicKey; +use massa_models::node::NodeId; use massa_time::MassaTime; use serde::Deserialize; use std::{net::SocketAddr, path::PathBuf}; +/// Bootstrap IP protocol version setting. +#[derive(Debug, Deserialize, Clone, Copy)] +pub enum IpType { + /// Bootstrap with both IPv4 and IPv6 protocols (default). + Both, + /// Bootstrap only with IPv4. + IPv4, + /// Bootstrap only with IPv6. + IPv6, +} + /// Bootstrap configuration. #[derive(Debug, Deserialize, Clone)] pub struct BootstrapConfig { /// Ip address of our bootstrap nodes and their public key. - pub bootstrap_list: Vec<(SocketAddr, PublicKey)>, + pub bootstrap_list: Vec<(SocketAddr, NodeId)>, + /// IP version filter for bootstrap list, targeting IpType::IPv4, IpType::IPv6 or IpType::Both. Defaults to IpType::Both. + pub bootstrap_protocol: IpType, /// Path to the bootstrap whitelist file. This whitelist define IPs that can bootstrap on your node. pub bootstrap_whitelist_path: PathBuf, /// Path to the bootstrap blacklist file. This whitelist define IPs that will not be able to bootstrap on your node. This list is optional. @@ -18,6 +31,9 @@ pub struct BootstrapConfig { pub bind: Option, /// connection timeout pub connect_timeout: MassaTime, + /// Time allocated to managing the bootstrapping process, + /// i.e. providing the ledger and consensus + pub bootstrap_timeout: MassaTime, /// readout timeout pub read_timeout: MassaTime, /// write timeout diff --git a/massa-bootstrap/src/tests/binders.rs b/massa-bootstrap/src/tests/binders.rs index 4e9eb610f1e..520b070695b 100644 --- a/massa-bootstrap/src/tests/binders.rs +++ b/massa-bootstrap/src/tests/binders.rs @@ -16,6 +16,7 @@ use massa_models::config::{ MAX_EXECUTED_OPS_CHANGES_LENGTH, MAX_EXECUTED_OPS_LENGTH, MAX_LEDGER_CHANGES_COUNT, MAX_OPERATIONS_PER_BLOCK, MAX_PRODUCTION_STATS_LENGTH, MAX_ROLLS_COUNT_LENGTH, THREAD_COUNT, }; +use massa_models::node::NodeId; use massa_models::version::Version; use massa_signature::{KeyPair, PublicKey}; use serial_test::serial; @@ -24,7 +25,7 @@ use tokio::io::duplex; lazy_static::lazy_static! { pub static ref BOOTSTRAP_CONFIG_KEYPAIR: (BootstrapConfig, KeyPair) = { let keypair = KeyPair::generate(); - (get_bootstrap_config(keypair.get_public_key()), keypair) + (get_bootstrap_config(NodeId::new(keypair.get_public_key())), keypair) }; } @@ -76,8 +77,10 @@ async fn test_binders() { BOOTSTRAP_RANDOMNESS_SIZE_BYTES, CONSENSUS_BOOTSTRAP_PART_SIZE, ); - let mut client = - BootstrapClientBinder::test_default(client, bootstrap_config.bootstrap_list[0].1); + let mut client = BootstrapClientBinder::test_default( + client, + bootstrap_config.bootstrap_list[0].1.get_public_key(), + ); let server_thread = tokio::spawn(async move { // Test message 1 @@ -170,8 +173,10 @@ async fn test_binders_double_send_server_works() { BOOTSTRAP_RANDOMNESS_SIZE_BYTES, CONSENSUS_BOOTSTRAP_PART_SIZE, ); - let mut client = - BootstrapClientBinder::test_default(client, bootstrap_config.bootstrap_list[0].1); + let mut client = BootstrapClientBinder::test_default( + client, + bootstrap_config.bootstrap_list[0].1.get_public_key(), + ); let server_thread = tokio::spawn(async move { // Test message 1 @@ -249,8 +254,10 @@ async fn test_binders_try_double_send_client_works() { BOOTSTRAP_RANDOMNESS_SIZE_BYTES, CONSENSUS_BOOTSTRAP_PART_SIZE, ); - let mut client = - BootstrapClientBinder::test_default(client, bootstrap_config.bootstrap_list[0].1); + let mut client = BootstrapClientBinder::test_default( + client, + bootstrap_config.bootstrap_list[0].1.get_public_key(), + ); let server_thread = tokio::spawn(async move { // Test message 1 diff --git a/massa-bootstrap/src/tests/scenarios.rs b/massa-bootstrap/src/tests/scenarios.rs index 69555aa173b..6c8a6953656 100644 --- a/massa-bootstrap/src/tests/scenarios.rs +++ b/massa-bootstrap/src/tests/scenarios.rs @@ -27,7 +27,9 @@ use massa_final_state::{ }; use massa_hash::{Hash, HASH_SIZE_BYTES}; use massa_ledger_exports::LedgerConfig; -use massa_models::{address::Address, slot::Slot, streaming_step::StreamingStep, version::Version}; +use massa_models::{ + address::Address, node::NodeId, slot::Slot, streaming_step::StreamingStep, version::Version, +}; use massa_models::{ config::{ MAX_ASYNC_MESSAGE_DATA, MAX_ASYNC_POOL_LENGTH, MAX_DATASTORE_KEY_LENGTH, POS_SAVED_CYCLES, @@ -50,7 +52,7 @@ use tokio::sync::mpsc; lazy_static::lazy_static! { pub static ref BOOTSTRAP_CONFIG_KEYPAIR: (BootstrapConfig, KeyPair) = { let keypair = KeyPair::generate(); - (get_bootstrap_config(keypair.get_public_key()), keypair) + (get_bootstrap_config(NodeId::new(keypair.get_public_key())), keypair) }; } diff --git a/massa-bootstrap/src/tests/tools.rs b/massa-bootstrap/src/tests/tools.rs index 08486ce29d5..92a6d9ec610 100644 --- a/massa-bootstrap/src/tests/tools.rs +++ b/massa-bootstrap/src/tests/tools.rs @@ -1,7 +1,7 @@ // Copyright (c) 2022 MASSA LABS use super::mock_establisher::Duplex; -use crate::settings::BootstrapConfig; +use crate::settings::{BootstrapConfig, IpType}; use bitvec::vec::BitVec; use massa_async_pool::test_exports::{create_async_pool, get_random_message}; use massa_async_pool::{AsyncPoolChanges, Change}; @@ -28,18 +28,21 @@ use massa_models::config::{ MAX_OPERATION_DATASTORE_KEY_LENGTH, MAX_OPERATION_DATASTORE_VALUE_LENGTH, MAX_PARAMETERS_SIZE, MAX_PRODUCTION_STATS_LENGTH, MAX_ROLLS_COUNT_LENGTH, PERIODS_PER_CYCLE, THREAD_COUNT, }; +use massa_models::node::NodeId; use massa_models::{ address::Address, amount::Amount, + block::Block, block::BlockSerializer, - block::{Block, BlockHeader, BlockHeaderSerializer, BlockId}, + block_header::{BlockHeader, BlockHeaderSerializer}, + block_id::BlockId, endorsement::Endorsement, endorsement::EndorsementSerializer, operation::OperationId, prehash::PreHashMap, + secure_share::Id, + secure_share::SecureShareContent, slot::Slot, - wrapped::Id, - wrapped::WrappedContent, }; use massa_network_exports::{BootstrapPeers, NetworkCommand}; use massa_pos_exports::{CycleInfo, DeferredCredits, PoSChanges, PoSFinalState, ProductionStats}; @@ -272,9 +275,11 @@ pub fn get_dummy_signature(s: &str) -> Signature { priv_key.sign(&Hash::compute_from(s.as_bytes())).unwrap() } -pub fn get_bootstrap_config(bootstrap_public_key: PublicKey) -> BootstrapConfig { +pub fn get_bootstrap_config(bootstrap_public_key: NodeId) -> BootstrapConfig { BootstrapConfig { bind: Some("0.0.0.0:31244".parse().unwrap()), + bootstrap_protocol: IpType::Both, + bootstrap_timeout: 120000.into(), connect_timeout: 200.into(), retry_delay: 200.into(), max_ping: MassaTime::from_millis(500), @@ -368,15 +373,17 @@ pub fn assert_eq_bootstrap_graph(v1: &BootstrapableGraph, v2: &BootstrapableGrap pub fn get_boot_state() -> BootstrapableGraph { let keypair = KeyPair::generate(); - let block = Block::new_wrapped( + let block = Block::new_verifiable( Block { - header: BlockHeader::new_wrapped( + header: BlockHeader::new_verifiable( BlockHeader { - slot: Slot::new(1, 1), + // associated slot + // all header endorsements are supposed to point towards this one + slot: Slot::new(1, 0), parents: vec![get_dummy_block_id("p1"); THREAD_COUNT as usize], operation_merkle_root: Hash::compute_from("op_hash".as_bytes()), endorsements: vec![ - Endorsement::new_wrapped( + Endorsement::new_verifiable( Endorsement { slot: Slot::new(1, 0), index: 1, @@ -386,9 +393,9 @@ pub fn get_boot_state() -> BootstrapableGraph { &keypair, ) .unwrap(), - Endorsement::new_wrapped( + Endorsement::new_verifiable( Endorsement { - slot: Slot::new(4, 1), + slot: Slot::new(1, 0), index: 3, endorsed_block: get_dummy_block_id("p1"), }, diff --git a/massa-client/Cargo.toml b/massa-client/Cargo.toml index 7e389fc1317..d065ef096de 100644 --- a/massa-client/Cargo.toml +++ b/massa-client/Cargo.toml @@ -20,8 +20,9 @@ serde_json = "1.0" structopt = { version = "0.3", features = ["paw"] } strum = "0.24" strum_macros = "0.24" -tokio = { version = "1.21", features = ["full"] } +tokio = { version = "1.23", features = ["full"] } # custom modules +massa_api_exports = { path = "../massa-api-exports" } massa_models = { path = "../massa-models" } massa_signature = { path = "../massa-signature" } massa_time = { path = "../massa-time" } diff --git a/massa-client/base_config/config.toml b/massa-client/base_config/config.toml index db17f5e366d..586465305c7 100644 --- a/massa-client/base_config/config.toml +++ b/massa-client/base_config/config.toml @@ -3,11 +3,12 @@ history_file_path = "config/.massa_history" timeout = 1000 [default_node] +# The IP of your node. Works both with IPv4 (like 127.0.0.1) and IPv6 (like ::1) addresses, if the node is bound to the correct protocol. ip = "127.0.0.1" private_port = 33034 public_port = 33035 -api_port = 33036 -[http] + +[client] # maximum size in bytes of a request max_request_body_size = 52428800 # request timeout @@ -22,3 +23,7 @@ api_port = 33036 max_log_length = 4096 # custom headers passed to the server with every request (default is empty). headers = [] + + [client.http] + # whether to enable HTTP. + enabled = true \ No newline at end of file diff --git a/massa-client/src/cmds.rs b/massa-client/src/cmds.rs index b05f3e6336b..ea8967dfeaf 100644 --- a/massa-client/src/cmds.rs +++ b/massa-client/src/cmds.rs @@ -1,20 +1,23 @@ // Copyright (c) 2022 MASSA LABS use crate::repl::Output; -use anyhow::{anyhow, bail, Error, Result}; +use anyhow::{anyhow, bail, Result}; use console::style; -use massa_models::api::{ - AddressInfo, CompactAddressInfo, DatastoreEntryInput, EventFilter, OperationInput, +use massa_api_exports::{ + address::{AddressInfo, CompactAddressInfo}, + datastore::DatastoreEntryInput, + execution::{ReadOnlyBytecodeExecution, ReadOnlyCall}, + operation::OperationInput, }; -use massa_models::api::{ReadOnlyBytecodeExecution, ReadOnlyCall}; use massa_models::node::NodeId; use massa_models::prehash::PreHashMap; use massa_models::timeslots::get_current_latest_block_slot; use massa_models::{ address::Address, amount::Amount, - block::BlockId, + block_id::BlockId, endorsement::EndorsementId, + execution::EventFilter, operation::{Operation, OperationId, OperationType}, slot::Slot, }; @@ -23,7 +26,7 @@ use massa_signature::KeyPair; use massa_time::MassaTime; use massa_wallet::Wallet; use serde::Serialize; -use std::collections::{BTreeMap, HashMap}; +use std::collections::{BTreeMap, HashMap, HashSet}; use std::fmt::Write as _; use std::fmt::{Debug, Display}; use std::net::IpAddr; @@ -34,62 +37,80 @@ use strum_macros::{Display, EnumIter, EnumString}; /// All the client commands /// the order they are defined is the order they are displayed in so be careful /// Maybe it would be worth renaming some of them for consistency +/// Use props(pwd_not_needed = "true") if the command does not need an access to the wallet, to avoid unnecessary +/// prompting of the user. #[allow(non_camel_case_types)] #[derive(Debug, PartialEq, Eq, EnumIter, EnumMessage, EnumString, EnumProperty, Display)] pub enum Command { - #[strum(ascii_case_insensitive, message = "display this help")] + #[strum( + ascii_case_insensitive, + props(pwd_not_needed = "true"), + message = "display this help" + )] help, - #[strum(ascii_case_insensitive, message = "exit the prompt")] + #[strum( + ascii_case_insensitive, + props(pwd_not_needed = "true"), + message = "exit the prompt" + )] exit, #[strum( ascii_case_insensitive, - props(args = "IpAddr1 IpAddr2 ..."), + props(args = "IpAddr1 IpAddr2 ...", pwd_not_needed = "true"), message = "unban given IP address(es)" )] node_unban_by_ip, #[strum( ascii_case_insensitive, - props(args = "Id1 Id2 ..."), + props(args = "Id1 Id2 ...", pwd_not_needed = "true"), message = "unban given id(s)" )] node_unban_by_id, #[strum( ascii_case_insensitive, - props(args = "IpAddr1 IpAddr2 ..."), + props(args = "IpAddr1 IpAddr2 ...", pwd_not_needed = "true"), message = "ban given IP address(es)" )] node_ban_by_ip, #[strum( ascii_case_insensitive, - props(args = "Id1 Id2 ..."), + props(args = "Id1 Id2 ...", pwd_not_needed = "true"), message = "ban given id(s)" )] node_ban_by_id, - #[strum(ascii_case_insensitive, message = "stops the node")] + #[strum( + ascii_case_insensitive, + props(pwd_not_needed = "true"), + message = "stops the node" + )] node_stop, - #[strum(ascii_case_insensitive, message = "show staking addresses")] + #[strum( + ascii_case_insensitive, + props(pwd_not_needed = "true"), + message = "show staking addresses" + )] node_get_staking_addresses, #[strum( ascii_case_insensitive, props(args = "Address1 Address2 ..."), - message = "remove staking addresses" + message = "starts staking with the given addresses" )] - node_remove_staking_addresses, + node_start_staking, #[strum( ascii_case_insensitive, - props(args = "SecretKey1 SecretKey2 ..."), - message = "add staking secret keys" + props(args = "Address1 Address2 ..."), + message = "stops staking with the given addresses" )] - node_add_staking_secret_keys, + node_stop_staking, #[strum( ascii_case_insensitive, @@ -100,62 +121,63 @@ pub enum Command { #[strum( ascii_case_insensitive, - props(args = "(add, remove or allow-all) [IpAddr]"), - message = "Manage boostrap whitelist IP address(es).No args returns the whitelist blacklist" + props(args = "(add, remove or allow-all) [IpAddr]", pwd_not_needed = "true"), + message = "Manage boostrap whitelist IP address(es). No args returns the whitelist blacklist" )] - node_bootsrap_whitelist, + node_bootstrap_whitelist, #[strum( ascii_case_insensitive, - props(args = "(add or remove) [IpAddr]"), + props(args = "(add or remove) [IpAddr]", pwd_not_needed = "true"), message = "Manage boostrap blacklist IP address(es). No args returns the boostrap blacklist" )] - node_bootsrap_blacklist, + node_bootstrap_blacklist, #[strum( ascii_case_insensitive, - props(args = "(add or remove) [IpAddr]"), + props(args = "(add or remove) [IpAddr]", pwd_not_needed = "true"), message = "Manage peers whitelist IP address(es). No args returns the peers whitelist" )] node_peers_whitelist, #[strum( ascii_case_insensitive, + props(pwd_not_needed = "true"), message = "show the status of the node (reachable? number of peers connected, consensus, version, config parameter summary...)" )] get_status, #[strum( ascii_case_insensitive, - props(args = "Address1 Address2 ..."), + props(args = "Address1 Address2 ...", pwd_not_needed = "true"), message = "get info about a list of addresses (balances, block creation, ...)" )] get_addresses, #[strum( ascii_case_insensitive, - props(args = "Address Key"), + props(args = "Address Key", pwd_not_needed = "true"), message = "get a datastore entry (key must be UTF-8)" )] get_datastore_entry, #[strum( ascii_case_insensitive, - props(args = "BlockId"), + props(args = "BlockId", pwd_not_needed = "true"), message = "show info about a block (content, finality ...)" )] get_blocks, #[strum( ascii_case_insensitive, - props(args = "EndorsementId1 EndorsementId2 ..."), + props(args = "EndorsementId1 EndorsementId2 ...", pwd_not_needed = "true"), message = "show info about a list of endorsements (content, finality ...)" )] get_endorsements, #[strum( ascii_case_insensitive, - props(args = "OperationId1 OperationId2 ..."), + props(args = "OperationId1 OperationId2 ...", pwd_not_needed = "true"), message = "show info about a list of operations(content, finality ...) " )] get_operations, @@ -163,7 +185,8 @@ pub enum Command { #[strum( ascii_case_insensitive, props( - args = "start=Slot end=Slot emitter_address=Address caller_address=Address operation_id=OperationId is_final=bool is_error=bool" + args = "start=slot_period,slot_thread end=slot_period,slot_thread emitter_address=Address caller_address=Address operation_id=OperationId is_final=bool is_error=bool", + pwd_not_needed = "true" ), message = "show events emitted by smart contracts with various filters" )] @@ -171,10 +194,25 @@ pub enum Command { #[strum( ascii_case_insensitive, - message = "show wallet info (keys, addresses, balances ...)" + props(args = "show-all-keys"), + message = "show wallet info (addresses, balances ...)" )] wallet_info, + #[strum( + ascii_case_insensitive, + props(args = "Address1 Address2 .."), + message = "get public key of the given addresses" + )] + wallet_get_public_key, + + #[strum( + ascii_case_insensitive, + props(args = "Address1 Address2 ..."), + message = "get secret key of the given addresses" + )] + wallet_get_secret_key, + #[strum( ascii_case_insensitive, message = "generate a secret key and add it into the wallet" @@ -225,39 +263,50 @@ pub enum Command { #[strum( ascii_case_insensitive, - props(args = "SenderAddress PathToBytecode MaxGas Fee",), + props(args = "SenderAddress PathToBytecode MaxGas Fee"), message = "create and send an operation containing byte code" )] execute_smart_contract, #[strum( ascii_case_insensitive, - props(args = "SenderAddress TargetAddress FunctionName Parameter MaxGas Coins Fee",), + props(args = "SenderAddress TargetAddress FunctionName Parameter MaxGas Coins Fee"), message = "create and send an operation to call a function of a smart contract" )] call_smart_contract, #[strum( ascii_case_insensitive, - props(args = "PathToBytecode MaxGas Address",), - message = "execute byte code, address is optional. Nothing is really executed on chain" + props( + args = "PathToBytecode MaxGas Address IsFinal", + pwd_not_needed = "true" + ), + message = "execute byte code, address is optional, is_final is optional. Nothing is really executed on chain" )] read_only_execute_smart_contract, #[strum( ascii_case_insensitive, - props(args = "TargetAddress TargetFunction Parameter MaxGas SenderAddress",), - message = "call a smart contract function, sender address is optional. Nothing is really executed on chain" + props( + args = "TargetAddress TargetFunction Parameter MaxGas SenderAddress IsFinal", + pwd_not_needed = "true" + ), + message = "call a smart contract function, sender address is optional, is_final is optional. Nothing is really executed on chain" )] read_only_call, #[strum( ascii_case_insensitive, + props(pwd_not_needed = "true"), message = "show time remaining to end of current episode" )] when_episode_ends, - #[strum(ascii_case_insensitive, message = "tells you when moon")] + #[strum( + ascii_case_insensitive, + props(pwd_not_needed = "true"), + message = "tells you when moon" + )] when_moon, } @@ -313,12 +362,16 @@ struct ExtendedWalletEntry { pub keypair: KeyPair, /// address and balance information pub address_info: CompactAddressInfo, + /// whether to display the public/secret keys or just the address info + pub show_keys: bool, } impl Display for ExtendedWalletEntry { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - writeln!(f, "Secret key: {}", self.keypair)?; - writeln!(f, "Public key: {}", self.keypair.get_public_key())?; + if self.show_keys { + writeln!(f, "Secret key: {}", self.keypair)?; + writeln!(f, "Public key: {}", self.keypair.get_public_key())?; + } writeln!(f, "{}", self.address_info)?; writeln!(f, "\n=====\n")?; Ok(()) @@ -332,7 +385,7 @@ pub struct ExtendedWallet(PreHashMap); impl ExtendedWallet { /// Reorganize everything into an extended wallet - fn new(wallet: &Wallet, addresses_info: &[AddressInfo]) -> Result { + fn new(wallet: &Wallet, addresses_info: &[AddressInfo], show_keys: bool) -> Result { Ok(ExtendedWallet( addresses_info .iter() @@ -346,6 +399,7 @@ impl ExtendedWallet { ExtendedWalletEntry { keypair: keypair.clone(), address_info: x.compact(), + show_keys, }, )) }) @@ -388,18 +442,24 @@ impl Command { ) } + /// Returns true if the command needs wallet access + pub(crate) fn is_pwd_needed(&self) -> bool { + !(self.get_str("pwd_not_needed").is_some() + && self.get_str("pwd_not_needed").unwrap() == "true") + } + /// run a given command /// /// # parameters /// - client: the RPC client - /// - wallet: an access to the wallet + /// - wallet_opt: an optional access to the wallet /// - parameters: the parsed parameters /// - json: true if --json was passed as an option /// it means that we don't want to print anything we just want the json output pub(crate) async fn run( &self, client: &Client, - wallet: &mut Wallet, + wallet_opt: &mut Option, parameters: &[String], json: bool, ) -> Result> { @@ -493,36 +553,9 @@ impl Command { } } - Command::node_remove_staking_addresses => { - let addresses = parse_vec::
(parameters)?; - match client.private.remove_staking_addresses(addresses).await { - Ok(()) => { - if !json { - println!("Addresses successfully removed!") - } - } - Err(e) => rpc_error!(e), - } - Ok(Box::new(())) - } - - Command::node_add_staking_secret_keys => { - match client - .private - .add_staking_secret_keys(parameters.to_vec()) - .await - { - Ok(()) => { - if !json { - println!("Keys successfully added!") - } - } - Err(e) => rpc_error!(e), - }; - Ok(Box::new(())) - } - Command::node_testnet_rewards_program_ownership_proof => { + let wallet = wallet_opt.as_mut().unwrap(); + if parameters.len() != 2 { bail!("wrong number of parameters"); } @@ -583,8 +616,8 @@ impl Command { } Command::get_blocks => { - if parameters.len() != 1 { - bail!("wrong param numbers, expecting at least one IP address") + if parameters.is_empty() { + bail!("wrong param numbers, expecting at least one block id") } let block_ids = parse_vec::(parameters)?; match client.public.get_blocks(block_ids).await { @@ -625,17 +658,17 @@ impl Command { if s.len() == 2 && p_list.contains(&s[0]) { p.insert(s[0], s[1]); } else { - bail!("invalid parameter"); + bail!("invalid parameter: {}, type \"help get_filtered_sc_output_event\" to get the list of valid parameters", v); } } let filter = EventFilter { - start: parse_key_value(&p, p_list[0]), - end: parse_key_value(&p, p_list[1]), - emitter_address: parse_key_value(&p, p_list[2]), - original_caller_address: parse_key_value(&p, p_list[3]), - original_operation_id: parse_key_value(&p, p_list[4]), - is_final: parse_key_value(&p, p_list[5]), - is_error: parse_key_value(&p, p_list[6]), + start: parse_key_value(&p, p_list[0])?, + end: parse_key_value(&p, p_list[1])?, + emitter_address: parse_key_value(&p, p_list[2])?, + original_caller_address: parse_key_value(&p, p_list[3])?, + original_operation_id: parse_key_value(&p, p_list[4])?, + is_final: parse_key_value(&p, p_list[5])?, + is_error: parse_key_value(&p, p_list[6])?, }; match client.public.get_filtered_sc_output_event(filter).await { Ok(events) => Ok(Box::new(events)), @@ -644,34 +677,133 @@ impl Command { } Command::wallet_info => { - if !json { - client_warning!("do not share your key"); + let show_keys = parameters.len() == 1 && parameters[0] == "show-all-keys"; + + let wallet = wallet_opt.as_mut().unwrap(); + + if !json && show_keys { + client_warning!("do not share your secret key"); } match client .public .get_addresses(wallet.get_full_wallet().keys().copied().collect()) .await { - Ok(addresses_info) => { - Ok(Box::new(ExtendedWallet::new(wallet, &addresses_info)?)) + Ok(addresses_info) => Ok(Box::new(ExtendedWallet::new( + wallet, + &addresses_info, + show_keys, + )?)), + Err(_) => match show_keys { + true => Ok(Box::new(wallet.clone())), + false => Ok(Box::new(wallet.get_wallet_address_list())), + }, // FIXME + } + } + + Command::wallet_get_public_key => { + let wallet = wallet_opt.as_mut().unwrap(); + + let addresses = parse_vec::
(parameters)?; + + let hashset: HashSet<_> = addresses.into_iter().collect(); + + let keypair: Vec<(&Address, Option<&KeyPair>)> = hashset + .iter() + .map(|addr| (addr, wallet.get_full_wallet().get(addr))) + .filter(|kp| kp.1.is_some()) + .collect(); + + let addr_public_keys: Vec<_> = keypair + .iter() + .map(|kp| (*kp.0, kp.1.unwrap().get_public_key())) + .collect(); + + Ok(Box::new(addr_public_keys)) + } + + Command::wallet_get_secret_key => { + let wallet = wallet_opt.as_mut().unwrap(); + + if !json { + client_warning!("do not share your secret key"); + } + + let addresses = parse_vec::
(parameters)?; + + let hashset: HashSet<_> = addresses.into_iter().collect(); + + let keypair: Vec<(&Address, Option<&KeyPair>)> = hashset + .iter() + .map(|addr| (addr, wallet.get_full_wallet().get(addr))) + .filter(|kp| kp.1.is_some()) + .collect(); + + let addr_secret_keys: Vec<_> = keypair + .iter() + .map(|kp| (*kp.0, kp.1.unwrap().to_owned())) + .collect(); + + Ok(Box::new(addr_secret_keys)) + } + + Command::node_start_staking => { + let wallet = wallet_opt.as_mut().unwrap(); + + let addresses = parse_vec::
(parameters)?; + let secret: Vec> = addresses + .iter() + .map(|addr| wallet.get_full_wallet().get(addr)) + .collect(); + let secret_str = secret + .iter() + .filter(|a| a.is_some()) + .map(|s| format!("{}", s.unwrap())) + .collect(); + + match client.private.add_staking_secret_keys(secret_str).await { + Ok(()) => { + if !json { + println!("Keys successfully added!") + } } - Err(_) => Ok(Box::new(wallet.clone())), // FIXME + Err(e) => rpc_error!(e), + }; + Ok(Box::new(())) + } + + Command::node_stop_staking => { + let addresses = parse_vec::
(parameters)?; + match client.private.remove_staking_addresses(addresses).await { + Ok(()) => { + if !json { + println!("Addresses successfully removed!") + } + } + Err(e) => rpc_error!(e), } + Ok(Box::new(())) } Command::wallet_generate_secret_key => { + let wallet = wallet_opt.as_mut().unwrap(); + let key = KeyPair::generate(); let ad = wallet.add_keypairs(vec![key])?[0]; if json { Ok(Box::new(ad.to_string())) } else { println!("Generated {} address and added it to the wallet", ad); - println!("Type `node_add_staking_secret_keys ` to start staking with this key.\n"); + println!( + "Type `node_start_staking
` to start staking with this address.\n" + ); Ok(Box::new(())) } } Command::wallet_add_secret_keys => { + let wallet = wallet_opt.as_mut().unwrap(); + let keypairs = parse_vec::(parameters)?; let addresses = wallet.add_keypairs(keypairs)?; if json { @@ -680,12 +812,14 @@ impl Command { for address in addresses { println!("Derived and added address {} to the wallet.", address); } - println!("Type `node_add_staking_secret_keys ` to start staking with the corresponding key.\n"); + println!("Type `node_start_staking
` to start staking with the corresponding key.\n"); } Ok(Box::new(())) } Command::wallet_remove_addresses => { + let wallet = wallet_opt.as_mut().unwrap(); + let mut res = "".to_string(); let addresses = parse_vec::
(parameters)?; match wallet.remove_addresses(&addresses) { @@ -703,6 +837,8 @@ impl Command { } Command::buy_rolls => { + let wallet = wallet_opt.as_mut().unwrap(); + if parameters.len() != 3 { bail!("wrong number of parameters"); } @@ -741,7 +877,7 @@ impl Command { } if let Ok(staked_keys) = client.private.get_staking_addresses().await { if !staked_keys.contains(&addr) { - client_warning!("You are buying rolls with an address not registered for staking. Don't forget to run 'node_add_staking_secret_keys '"); } } } @@ -757,6 +893,8 @@ impl Command { } Command::sell_rolls => { + let wallet = wallet_opt.as_mut().unwrap(); + if parameters.len() != 3 { bail!("wrong number of parameters"); } @@ -791,6 +929,8 @@ impl Command { } Command::send_transaction => { + let wallet = wallet_opt.as_mut().unwrap(); + if parameters.len() != 4 { bail!("wrong number of parameters"); } @@ -854,6 +994,8 @@ impl Command { Ok(Box::new(())) } Command::execute_smart_contract => { + let wallet = wallet_opt.as_mut().unwrap(); + if parameters.len() != 4 { bail!("wrong number of parameters"); } @@ -902,6 +1044,8 @@ impl Command { .await } Command::call_smart_contract => { + let wallet = wallet_opt.as_mut().unwrap(); + if parameters.len() != 7 { bail!("wrong number of parameters"); } @@ -955,6 +1099,8 @@ impl Command { .await } Command::wallet_sign => { + let wallet = wallet_opt.as_mut().unwrap(); + if parameters.len() != 2 { bail!("wrong number of parameters"); } @@ -967,7 +1113,7 @@ impl Command { } } Command::read_only_execute_smart_contract => { - if parameters.len() != 2 && parameters.len() != 3 { + if parameters.len() != 2 && parameters.len() <= 4 { bail!("wrong number of parameters"); } @@ -978,6 +1124,11 @@ impl Command { } else { None }; + let is_final = if let Some(adr) = parameters.get(3) { + adr.parse::()? + } else { + false + }; let bytecode = get_file_as_byte_vec(&path).await?; match client .public @@ -986,6 +1137,7 @@ impl Command { bytecode, address, operation_datastore: None, // TODO - #3072 + is_final, }) .await { @@ -994,7 +1146,7 @@ impl Command { } } Command::read_only_call => { - if parameters.len() != 4 && parameters.len() != 5 { + if parameters.len() != 4 && parameters.len() <= 6 { bail!("wrong number of parameters"); } @@ -1007,6 +1159,11 @@ impl Command { } else { None }; + let is_final = if let Some(adr) = parameters.get(5) { + adr.parse::()? + } else { + false + }; match client .public .execute_read_only_call(ReadOnlyCall { @@ -1015,6 +1172,7 @@ impl Command { target_function, parameter, max_gas, + is_final, }) .await { @@ -1022,10 +1180,10 @@ impl Command { Err(e) => rpc_error!(e), } } - Command::node_bootsrap_blacklist => { + Command::node_bootstrap_blacklist => { if parameters.is_empty() { match client.private.node_bootstrap_blacklist().await { - Ok(bootsraplist_ips) => Ok(Box::new(bootsraplist_ips)), + Ok(bootstraplist_ips) => Ok(Box::new(bootstraplist_ips)), Err(e) => rpc_error!(e), } } else { @@ -1046,7 +1204,7 @@ impl Command { Ok(()) => { if !json { println!( - "Request of bootsrap blacklisting successfully sent!" + "Request of bootstrap blacklisting successfully sent!" ) } Ok(Box::new(())) @@ -1062,7 +1220,7 @@ impl Command { { Ok(()) => { if !json { - println!("Request of remove from bootsrap blacklist successfully sent!") + println!("Request of remove from bootstrap blacklist successfully sent!") } Ok(Box::new(())) } @@ -1076,12 +1234,12 @@ impl Command { res } } - Command::node_bootsrap_whitelist => { + Command::node_bootstrap_whitelist => { if parameters.is_empty() { match client.private.node_bootstrap_whitelist().await { - Ok(bootsraplist_ips) => Ok(Box::new(bootsraplist_ips)), + Ok(bootstraplist_ips) => Ok(Box::new(bootstraplist_ips)), Err(e) => { - client_warning!("if bootsrap whitelist configuration file does't exists, bootsrap is allowed for everyone !!!"); + client_warning!("if bootstrap whitelist configuration file does't exists, bootstrap is allowed for everyone !!!"); rpc_error!(e) } } @@ -1106,7 +1264,7 @@ impl Command { Ok(()) => { if !json { println!( - "Request of bootsrap whitelisting successfully sent!" + "Request of bootstrap whitelisting successfully sent!" ) } Ok(Box::new(())) @@ -1125,7 +1283,7 @@ impl Command { { Ok(()) => { if !json { - println!("Request of remove from bootsrap whitelist successfully sent!") + println!("Request of remove from bootstrap whitelist successfully sent!") } Ok(Box::new(())) } @@ -1137,7 +1295,7 @@ impl Command { Ok(()) => { if !json { println!( - "Request of bootsrap whitelisting everyone successfully sent!" + "Request of bootstrap whitelisting everyone successfully sent!" ) } Ok(Box::new(())) @@ -1238,7 +1396,7 @@ async fn send_operation( match client .public .send_operations(vec![OperationInput { - creator_public_key: op.creator_public_key, + creator_public_key: op.content_creator_pub_key, serialized_content: op.serialized_data, signature: op.signature, }]) @@ -1256,7 +1414,7 @@ async fn send_operation( /// TODO: ugly utilities functions /// takes a slice of string and makes it into a `Vec` -pub fn parse_vec(args: &[String]) -> anyhow::Result, Error> +pub fn parse_vec(args: &[String]) -> anyhow::Result, anyhow::Error> where T::Err: Display, { @@ -1273,16 +1431,21 @@ async fn get_file_as_byte_vec(filename: &std::path::Path) -> Result> { Ok(tokio::fs::read(filename).await?) } -// chains get_key_value with its parsing and displays a warning on parsing error -pub fn parse_key_value(p: &HashMap<&str, &str>, key: &str) -> Option { - p.get_key_value(key).and_then(|x| { - x.1.parse::() - .map_err(|_| { - client_warning!(format!( - "'{}' parameter was ignored because of wrong corresponding value", - key - )) - }) - .ok() - }) +// chains get_key_value with its parsing +pub fn parse_key_value( + p: &HashMap<&str, &str>, + key: &str, +) -> anyhow::Result, anyhow::Error> +where + T::Err: Display, +{ + if let Some(value) = p.get_key_value(key) { + value + .1 + .parse::() + .map(Option::Some) + .map_err(|e| anyhow!("failed to parse \"{}\" due to: {}", value.1, e)) + } else { + Ok(None) + } } diff --git a/massa-client/src/main.rs b/massa-client/src/main.rs index 5c7c1057167..2ccf3ba48cd 100644 --- a/massa-client/src/main.rs +++ b/massa-client/src/main.rs @@ -1,6 +1,5 @@ // Copyright (c) 2022 MASSA LABS //! Massa stateless CLI -#![feature(str_split_whitespace_as_str)] #![warn(missing_docs)] #![warn(unused_crate_dependencies)] use crate::settings::SETTINGS; @@ -9,9 +8,10 @@ use atty::Stream; use cmds::Command; use console::style; use dialoguer::Password; -use massa_sdk::{Client, HttpConfig}; +use massa_sdk::{Client, ClientConfig, HttpConfig}; use massa_wallet::Wallet; use serde::Serialize; +use std::env; use std::net::IpAddr; use std::path::{Path, PathBuf}; use std::sync::atomic::{AtomicUsize, Ordering}; @@ -64,7 +64,7 @@ struct JsonError { /// Ask for the wallet password /// If the wallet does not exist, it will require password confirmation -fn ask_password(wallet_path: &Path) -> String { +pub(crate) fn ask_password(wallet_path: &Path) -> String { if wallet_path.is_file() { Password::new() .with_prompt("Enter wallet password") @@ -95,14 +95,19 @@ fn main(args: Args) -> anyhow::Result<()> { } async fn run(args: Args) -> Result<()> { + let client_config = ClientConfig { + max_request_body_size: SETTINGS.client.max_request_body_size, + request_timeout: SETTINGS.client.request_timeout, + max_concurrent_requests: SETTINGS.client.max_concurrent_requests, + certificate_store: SETTINGS.client.certificate_store.clone(), + id_kind: SETTINGS.client.id_kind.clone(), + max_log_length: SETTINGS.client.max_log_length, + headers: SETTINGS.client.headers.clone(), + }; + let http_config = HttpConfig { - max_request_body_size: SETTINGS.http.max_request_body_size, - request_timeout: SETTINGS.http.request_timeout, - max_concurrent_requests: SETTINGS.http.max_concurrent_requests, - certificate_store: SETTINGS.http.certificate_store.clone(), - id_kind: SETTINGS.http.id_kind.clone(), - max_log_length: SETTINGS.http.max_log_length, - headers: SETTINGS.http.headers.clone(), + client_config, + enabled: SETTINGS.client.http.enabled, }; // TODO: move settings loading in another crate ... see #1277 @@ -131,18 +136,31 @@ async fn run(args: Args) -> Result<()> { std::process::exit(1); })); - // ... - let password = args.password.unwrap_or_else(|| ask_password(&args.wallet)); - let mut wallet = Wallet::new(args.wallet, password)?; let client = Client::new(address, public_port, private_port, &http_config).await; if atty::is(Stream::Stdout) && args.command == Command::help && !args.json { // Interactive mode - repl::run(&client, &mut wallet).await?; + repl::run(&client, &args.wallet, args.password).await?; } else { // Non-Interactive mode + + // Only prompt for password if the command needs wallet access. + let mut wallet_opt = match args.command.is_pwd_needed() { + true => { + let password = match (args.password, env::var("MASSA_CLIENT_PASSWORD")) { + (Some(pwd), _) => pwd, + (_, Ok(pwd)) => pwd, + _ => ask_password(&args.wallet), + }; + + let wallet = Wallet::new(args.wallet, password)?; + Some(wallet) + } + false => None, + }; + match args .command - .run(&client, &mut wallet, &args.parameters, args.json) + .run(&client, &mut wallet_opt, &args.parameters, args.json) .await { Ok(output) => { diff --git a/massa-client/src/repl.rs b/massa-client/src/repl.rs index 0e1cc9fdea5..929ab2d3320 100644 --- a/massa-client/src/repl.rs +++ b/massa-client/src/repl.rs @@ -1,26 +1,31 @@ // Copyright (c) 2022 MASSA LABS +use crate::ask_password; use crate::cmds::{Command, ExtendedWallet}; use crate::settings::SETTINGS; use anyhow::Result; use console::style; use erased_serde::{Serialize, Serializer}; -use massa_models::api::{ - AddressInfo, BlockInfo, DatastoreEntryOutput, EndorsementInfo, NodeStatus, OperationInfo, +use massa_api_exports::{ + address::AddressInfo, block::BlockInfo, datastore::DatastoreEntryOutput, + endorsement::EndorsementInfo, execution::ExecuteReadOnlyResponse, node::NodeStatus, + operation::OperationInfo, }; use massa_models::composite::PubkeySig; -use massa_models::execution::ExecuteReadOnlyResponse; use massa_models::output_event::SCOutputEvent; use massa_models::prehash::PreHashSet; use massa_models::{address::Address, operation::OperationId}; use massa_sdk::Client; +use massa_signature::{KeyPair, PublicKey}; use massa_wallet::Wallet; use rustyline::completion::{Completer, FilenameCompleter, Pair}; use rustyline::error::ReadlineError; use rustyline::validate::MatchingBracketValidator; use rustyline::{CompletionType, Config, Editor}; use rustyline_derive::{Completer, Helper, Highlighter, Hinter, Validator}; +use std::env; use std::net::IpAddr; +use std::path::Path; use std::str; use strum::IntoEnumIterator; use strum::ParseError; @@ -96,7 +101,11 @@ struct MyHelper { validator: MatchingBracketValidator, } -pub(crate) async fn run(client: &Client, wallet: &mut Wallet) -> Result<()> { +pub(crate) async fn run( + client: &Client, + wallet_path: &Path, + args_password: Option, +) -> Result<()> { massa_fancy_ascii_art_logo!(); println!("Use 'exit' or 'CTRL+D or CTRL+C' to quit the prompt"); println!("Use the Up/Down arrows to scroll through history"); @@ -118,6 +127,9 @@ pub(crate) async fn run(client: &Client, wallet: &mut Wallet) -> Result<()> { if rl.load_history(&SETTINGS.history_file_path).is_err() { println!("No previous history."); } + + let mut wallet_opt = None; + loop { let readline = rl.readline("command > "); match readline { @@ -132,10 +144,28 @@ pub(crate) async fn run(client: &Client, wallet: &mut Wallet) -> Result<()> { let parameters = input[1..].to_vec(); // Print result of evaluated command match cmd { - Ok(command) => match command.run(client, wallet, ¶meters, false).await { - Ok(output) => output.pretty_print(), - Err(e) => println!("{}", style(format!("Error: {}", e)).red()), - }, + Ok(command) => { + // Check if we need to prompt the user for their wallet password + if command.is_pwd_needed() && wallet_opt.is_none() { + let password = + match (args_password.clone(), env::var("MASSA_CLIENT_PASSWORD")) { + (Some(pwd), _) => pwd, + (_, Ok(pwd)) => pwd, + _ => ask_password(wallet_path), + }; + + let wallet = Wallet::new(wallet_path.to_path_buf(), password)?; + wallet_opt = Some(wallet); + } + + match command + .run(client, &mut wallet_opt, ¶meters, false) + .await + { + Ok(output) => output.pretty_print(), + Err(e) => println!("{}", style(format!("Error: {}", e)).red()), + } + } Err(_) => { println!("Command not found!\ntype \"help\" to get the list of commands") } @@ -219,6 +249,36 @@ impl Output for ExtendedWallet { } } +impl Output for Vec<(Address, PublicKey)> { + fn pretty_print(&self) { + match self.len() { + 1 => println!("{}", self[0].1), + _ => { + for address_pubkey in self { + println!("Address: {}", address_pubkey.0); + println!("Public key: {}", address_pubkey.1); + println!(); + } + } + } + } +} + +impl Output for Vec<(Address, KeyPair)> { + fn pretty_print(&self) { + match self.len() { + 1 => println!("{}", self[0].1), + _ => { + for address_seckey in self { + println!("Address: {}", address_seckey.0); + println!("Secret key: {}", address_seckey.1); + println!(); + } + } + } + } +} + impl Output for () { fn pretty_print(&self) {} } @@ -297,6 +357,14 @@ impl Output for Vec { } } +impl Output for Vec { + fn pretty_print(&self) { + for block_info in self { + println!("{}", block_info); + } + } +} + impl Output for Vec { fn pretty_print(&self) { for operation_id in self { diff --git a/massa-client/src/settings.rs b/massa-client/src/settings.rs index 0e7ad8e4be6..c35f7d9bc6f 100644 --- a/massa-client/src/settings.rs +++ b/massa-client/src/settings.rs @@ -16,7 +16,7 @@ pub struct Settings { pub history: usize, pub history_file_path: PathBuf, pub timeout: MassaTime, - pub http: HttpSettings, + pub client: ClientSettings, } #[derive(Debug, Deserialize, Clone)] @@ -26,10 +26,10 @@ pub struct DefaultNode { pub public_port: u16, } -/// Http Client settings. -/// the Http Client settings +/// Client settings +/// the client settings. #[derive(Debug, Deserialize, Clone)] -pub struct HttpSettings { +pub struct ClientSettings { pub max_request_body_size: u32, pub request_timeout: MassaTime, pub max_concurrent_requests: usize, @@ -37,6 +37,15 @@ pub struct HttpSettings { pub id_kind: String, pub max_log_length: u32, pub headers: Vec<(String, String)>, + pub http: HttpSettings, +} + +///TODO add WebSocket to CLI +/// Http client settings. +/// the Http client settings +#[derive(Debug, Deserialize, Clone)] +pub struct HttpSettings { + pub enabled: bool, } #[cfg(test)] diff --git a/massa-consensus-exports/Cargo.toml b/massa-consensus-exports/Cargo.toml index d37d46383cd..3996748a415 100644 --- a/massa-consensus-exports/Cargo.toml +++ b/massa-consensus-exports/Cargo.toml @@ -14,7 +14,7 @@ serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" thiserror = "1.0" jsonrpsee = { version = "0.16.2", features = ["server"] } -tokio = { version = "1.21", features = ["sync"] } +tokio = { version = "1.23", features = ["sync"] } #custom modules massa_hash = { path = "../massa-hash"} massa_execution_exports = { path = "../massa-execution-exports" } diff --git a/massa-consensus-exports/src/block_graph_export.rs b/massa-consensus-exports/src/block_graph_export.rs index bd8f5d27069..1221244b2cf 100644 --- a/massa-consensus-exports/src/block_graph_export.rs +++ b/massa-consensus-exports/src/block_graph_export.rs @@ -1,6 +1,6 @@ use massa_models::{ address::Address, - block::BlockId, + block_id::BlockId, clique::Clique, prehash::{PreHashMap, PreHashSet}, slot::Slot, diff --git a/massa-consensus-exports/src/block_status.rs b/massa-consensus-exports/src/block_status.rs index 2138a969489..89d393efab4 100644 --- a/massa-consensus-exports/src/block_status.rs +++ b/massa-consensus-exports/src/block_status.rs @@ -1,9 +1,6 @@ use massa_models::{ - active_block::ActiveBlock, - address::Address, - block::{Block, BlockId, WrappedHeader}, - prehash::PreHashSet, - slot::Slot, + active_block::ActiveBlock, address::Address, block::Block, block_header::SecuredHeader, + block_id::BlockId, prehash::PreHashSet, slot::Slot, }; use massa_storage::Storage; use serde::{Deserialize, Serialize}; @@ -11,7 +8,7 @@ use serde::{Deserialize, Serialize}; #[derive(Debug, Clone)] #[allow(clippy::large_enum_variant)] pub enum HeaderOrBlock { - Header(WrappedHeader), + Header(SecuredHeader), Block { id: BlockId, slot: Slot, @@ -100,7 +97,7 @@ pub enum ExportBlockStatus { #[derive(Debug, Clone, Serialize, Deserialize)] pub struct ExportCompiledBlock { /// Header of the corresponding block. - pub header: WrappedHeader, + pub header: SecuredHeader, /// For (i, set) in children, /// set contains the headers' hashes /// of blocks referencing exported block as a parent, diff --git a/massa-consensus-exports/src/bootstrapable_graph.rs b/massa-consensus-exports/src/bootstrapable_graph.rs index 4ee633653ff..b7988051b7d 100644 --- a/massa-consensus-exports/src/bootstrapable_graph.rs +++ b/massa-consensus-exports/src/bootstrapable_graph.rs @@ -39,7 +39,7 @@ impl Serializer for BootstrapableGraphSerializer { /// use massa_consensus_exports::bootstrapable_graph::{BootstrapableGraph, BootstrapableGraphSerializer}; /// use massa_serialization::Serializer; /// use massa_hash::Hash; - /// use massa_models::{prehash::PreHashMap, block::BlockId, config::THREAD_COUNT}; + /// use massa_models::{prehash::PreHashMap, block_id::BlockId, config::THREAD_COUNT}; /// let mut bootstrapable_graph = BootstrapableGraph { /// final_blocks: Vec::new(), /// }; @@ -106,7 +106,7 @@ impl Deserializer for BootstrapableGraphDeserializer { /// use massa_consensus_exports::bootstrapable_graph::{BootstrapableGraph, BootstrapableGraphDeserializer, BootstrapableGraphSerializer}; /// use massa_serialization::{Deserializer, Serializer, DeserializeError}; /// use massa_hash::Hash; - /// use massa_models::{prehash::PreHashMap, block::BlockId, config::THREAD_COUNT}; + /// use massa_models::{prehash::PreHashMap, block_id::BlockId, config::THREAD_COUNT}; /// let mut bootstrapable_graph = BootstrapableGraph { /// final_blocks: Vec::new(), /// }; diff --git a/massa-consensus-exports/src/channels.rs b/massa-consensus-exports/src/channels.rs index 2cc2a44bde6..d09fb6a515a 100644 --- a/massa-consensus-exports/src/channels.rs +++ b/massa-consensus-exports/src/channels.rs @@ -1,5 +1,6 @@ use massa_execution_exports::ExecutionController; -use massa_models::block::{Block, BlockHeader, FilledBlock}; +use massa_models::block::{Block, FilledBlock}; +use massa_models::block_header::BlockHeader; use massa_pool_exports::PoolController; use massa_pos_exports::SelectorController; use massa_protocol_exports::ProtocolCommandSender; diff --git a/massa-consensus-exports/src/controller_trait.rs b/massa-consensus-exports/src/controller_trait.rs index b46baf1764d..ecd9f22f1d4 100644 --- a/massa-consensus-exports/src/controller_trait.rs +++ b/massa-consensus-exports/src/controller_trait.rs @@ -3,12 +3,8 @@ use crate::{bootstrapable_graph::BootstrapableGraph, error::ConsensusError}; use massa_models::prehash::PreHashSet; use massa_models::streaming_step::StreamingStep; use massa_models::{ - api::BlockGraphStatus, - block::{BlockHeader, BlockId}, - clique::Clique, - slot::Slot, - stats::ConsensusStats, - wrapped::Wrapped, + block::BlockGraphStatus, block_header::BlockHeader, block_id::BlockId, clique::Clique, + secure_share::SecureShare, slot::Slot, stats::ConsensusStats, }; use massa_storage::Storage; @@ -107,14 +103,14 @@ pub trait ConsensusController: Send + Sync { /// # Arguments /// * `block_id`: the id of the block to register /// * `header`: the header of the block to register - fn register_block_header(&self, block_id: BlockId, header: Wrapped); + fn register_block_header(&self, block_id: BlockId, header: SecureShare); /// Mark a block as invalid in the graph /// /// # Arguments /// * `block_id`: the id of the block to mark as invalid /// * `header`: the header of the block to mark as invalid - fn mark_invalid_block(&self, block_id: BlockId, header: Wrapped); + fn mark_invalid_block(&self, block_id: BlockId, header: SecureShare); /// Returns a boxed clone of self. /// Useful to allow cloning `Box`. diff --git a/massa-consensus-exports/src/export_active_block.rs b/massa-consensus-exports/src/export_active_block.rs index 7f594f549ed..c9cd601540f 100644 --- a/massa-consensus-exports/src/export_active_block.rs +++ b/massa-consensus-exports/src/export_active_block.rs @@ -2,9 +2,10 @@ use crate::error::ConsensusError; use massa_hash::HashDeserializer; use massa_models::{ active_block::ActiveBlock, - block::{Block, BlockDeserializer, BlockId, WrappedBlock}, + block::{Block, BlockDeserializer, SecureShareBlock}, + block_id::BlockId, prehash::PreHashMap, - wrapped::{WrappedDeserializer, WrappedSerializer}, + secure_share::{SecureShareDeserializer, SecureShareSerializer}, }; use massa_serialization::{ Deserializer, SerializeError, Serializer, U64VarIntDeserializer, U64VarIntSerializer, @@ -27,7 +28,7 @@ use std::ops::Bound::Included; #[derive(Debug, Clone, Serialize, Deserialize)] pub struct ExportActiveBlock { /// The block. - pub block: WrappedBlock, + pub block: SecureShareBlock, /// one `(block id, period)` per thread ( if not genesis ) pub parents: Vec<(BlockId, u64)>, /// for example has its fitness reached the given threshold @@ -69,7 +70,7 @@ impl ExportActiveBlock { // create ActiveBlock let active_block = ActiveBlock { - creator_address: self.block.creator_address, + creator_address: self.block.content_creator_address, block_id: self.block.id, parents: self.parents.clone(), children: vec![PreHashMap::default(); thread_count as usize], // will be computed once the full graph is available @@ -89,7 +90,7 @@ impl ExportActiveBlock { /// Basic serializer of `ExportActiveBlock` #[derive(Default)] pub struct ExportActiveBlockSerializer { - wrapped_serializer: WrappedSerializer, + sec_share_serializer: SecureShareSerializer, period_serializer: U64VarIntSerializer, } @@ -97,7 +98,7 @@ impl ExportActiveBlockSerializer { /// Create a new `ExportActiveBlockSerializer` pub fn new() -> Self { ExportActiveBlockSerializer { - wrapped_serializer: WrappedSerializer::new(), + sec_share_serializer: SecureShareSerializer::new(), period_serializer: U64VarIntSerializer::new(), } } @@ -110,7 +111,7 @@ impl Serializer for ExportActiveBlockSerializer { buffer: &mut Vec, ) -> Result<(), SerializeError> { // block - self.wrapped_serializer.serialize(&value.block, buffer)?; + self.sec_share_serializer.serialize(&value.block, buffer)?; // parents with periods // note: there should be no parents for genesis blocks @@ -129,7 +130,7 @@ impl Serializer for ExportActiveBlockSerializer { /// Basic deserializer of `ExportActiveBlock` pub struct ExportActiveBlockDeserializer { - wrapped_block_deserializer: WrappedDeserializer, + sec_share_block_deserializer: SecureShareDeserializer, hash_deserializer: HashDeserializer, period_deserializer: U64VarIntDeserializer, thread_count: u8, @@ -140,7 +141,7 @@ impl ExportActiveBlockDeserializer { #[allow(clippy::too_many_arguments)] pub fn new(thread_count: u8, endorsement_count: u32, max_operations_per_block: u32) -> Self { ExportActiveBlockDeserializer { - wrapped_block_deserializer: WrappedDeserializer::new(BlockDeserializer::new( + sec_share_block_deserializer: SecureShareDeserializer::new(BlockDeserializer::new( thread_count, max_operations_per_block, endorsement_count, @@ -156,7 +157,9 @@ impl Deserializer for ExportActiveBlockDeserializer { /// ## Example: /// ```rust /// use massa_consensus_exports::export_active_block::{ExportActiveBlock, ExportActiveBlockDeserializer, ExportActiveBlockSerializer}; - /// use massa_models::{ledger_models::LedgerChanges, config::THREAD_COUNT, rolls::RollUpdates, block::{BlockId, Block, BlockSerializer, BlockHeader, BlockHeaderSerializer}, prehash::PreHashSet, endorsement::{Endorsement, EndorsementSerializerLW}, slot::Slot, wrapped::WrappedContent}; + /// use massa_models::{ledger::LedgerChanges, config::THREAD_COUNT, rolls::RollUpdates, block::{Block, BlockSerializer}, prehash::PreHashSet, endorsement::{Endorsement, EndorsementSerializer}, slot::Slot, secure_share::SecureShareContent}; + /// use massa_models::block_id::BlockId; + /// use massa_models::block_header::{BlockHeader, BlockHeaderSerializer}; /// use massa_hash::Hash; /// use std::collections::HashSet; /// use massa_signature::KeyPair; @@ -168,29 +171,29 @@ impl Deserializer for ExportActiveBlockDeserializer { /// .collect(); /// /// // create block header - /// let orig_header = BlockHeader::new_wrapped( + /// let orig_header = BlockHeader::new_verifiable( /// BlockHeader { /// slot: Slot::new(1, 1), /// parents, /// operation_merkle_root: Hash::compute_from("mno".as_bytes()), /// endorsements: vec![ - /// Endorsement::new_wrapped( + /// Endorsement::new_verifiable( /// Endorsement { /// slot: Slot::new(1, 1), /// index: 1, - /// endorsed_block: BlockId(Hash::compute_from("blk1".as_bytes())), + /// endorsed_block: BlockId(Hash::compute_from(&[1])), /// }, - /// EndorsementSerializerLW::new(), + /// EndorsementSerializer::new(), /// &keypair, /// ) /// .unwrap(), - /// Endorsement::new_wrapped( + /// Endorsement::new_verifiable( /// Endorsement { - /// slot: Slot::new(4, 0), + /// slot: Slot::new(1, 1), /// index: 3, - /// endorsed_block: BlockId(Hash::compute_from("blk2".as_bytes())), + /// endorsed_block: BlockId(Hash::compute_from(&[1])), /// }, - /// EndorsementSerializerLW::new(), + /// EndorsementSerializer::new(), /// &keypair, /// ) /// .unwrap(), @@ -207,7 +210,7 @@ impl Deserializer for ExportActiveBlockDeserializer { /// operations: Vec::new(), /// }; /// - /// let full_block = Block::new_wrapped(orig_block, BlockSerializer::new(), &keypair).unwrap(); + /// let full_block = Block::new_verifiable(orig_block, BlockSerializer::new(), &keypair).unwrap(); /// let export_active_block = ExportActiveBlock { /// block: full_block.clone(), /// parents: vec![], @@ -230,7 +233,7 @@ impl Deserializer for ExportActiveBlockDeserializer { tuple(( // block context("Failed block deserialization", |input| { - self.wrapped_block_deserializer.deserialize(input) + self.sec_share_block_deserializer.deserialize(input) }), // parents context( diff --git a/massa-consensus-exports/src/test_exports/mock.rs b/massa-consensus-exports/src/test_exports/mock.rs index f4603b16bf4..70b1c256607 100644 --- a/massa-consensus-exports/src/test_exports/mock.rs +++ b/massa-consensus-exports/src/test_exports/mock.rs @@ -6,14 +6,9 @@ use std::sync::{ }; use massa_models::{ - api::BlockGraphStatus, - block::{BlockHeader, BlockId}, - clique::Clique, - prehash::PreHashSet, - slot::Slot, - stats::ConsensusStats, + block::BlockGraphStatus, block_header::BlockHeader, block_id::BlockId, clique::Clique, + prehash::PreHashSet, secure_share::SecureShare, slot::Slot, stats::ConsensusStats, streaming_step::StreamingStep, - wrapped::Wrapped, }; use massa_storage::Storage; use massa_time::MassaTime; @@ -74,7 +69,7 @@ pub enum MockConsensusControllerMessage { }, MarkInvalidBlock { block_id: BlockId, - header: Wrapped, + header: SecureShare, }, RegisterBlock { block_id: BlockId, @@ -84,7 +79,7 @@ pub enum MockConsensusControllerMessage { }, RegisterBlockHeader { block_id: BlockId, - header: Wrapped, + header: SecureShare, }, } @@ -237,7 +232,7 @@ impl ConsensusController for MockConsensusController { response_rx.recv().unwrap() } - fn mark_invalid_block(&self, block_id: BlockId, header: Wrapped) { + fn mark_invalid_block(&self, block_id: BlockId, header: SecureShare) { self.0 .lock() .unwrap() @@ -258,7 +253,7 @@ impl ConsensusController for MockConsensusController { .unwrap(); } - fn register_block_header(&self, block_id: BlockId, header: Wrapped) { + fn register_block_header(&self, block_id: BlockId, header: SecureShare) { self.0 .lock() .unwrap() diff --git a/massa-consensus-worker/src/commands.rs b/massa-consensus-worker/src/commands.rs index 4ca74d79f94..5541cf8f296 100644 --- a/massa-consensus-worker/src/commands.rs +++ b/massa-consensus-worker/src/commands.rs @@ -1,13 +1,11 @@ use massa_models::{ - block::{BlockHeader, BlockId}, - slot::Slot, - wrapped::Wrapped, + block_header::BlockHeader, block_id::BlockId, secure_share::SecureShare, slot::Slot, }; use massa_storage::Storage; #[allow(clippy::large_enum_variant)] pub enum ConsensusCommand { RegisterBlock(BlockId, Slot, Storage, bool), - RegisterBlockHeader(BlockId, Wrapped), - MarkInvalidBlock(BlockId, Wrapped), + RegisterBlockHeader(BlockId, SecureShare), + MarkInvalidBlock(BlockId, SecureShare), } diff --git a/massa-consensus-worker/src/controller.rs b/massa-consensus-worker/src/controller.rs index d0f1928fd40..42043454290 100644 --- a/massa-consensus-worker/src/controller.rs +++ b/massa-consensus-worker/src/controller.rs @@ -4,15 +4,16 @@ use massa_consensus_exports::{ export_active_block::ExportActiveBlock, ConsensusChannels, ConsensusController, }; use massa_models::{ - api::BlockGraphStatus, - block::{BlockHeader, BlockId, FilledBlock}, + block::{BlockGraphStatus, FilledBlock}, + block_header::BlockHeader, + block_id::BlockId, clique::Clique, operation::{Operation, OperationId}, prehash::PreHashSet, + secure_share::SecureShare, slot::Slot, stats::ConsensusStats, streaming_step::StreamingStep, - wrapped::Wrapped, }; use massa_storage::Storage; use parking_lot::RwLock; @@ -227,15 +228,17 @@ impl ConsensusController for ConsensusControllerImpl { fn register_block(&self, block_id: BlockId, slot: Slot, block_storage: Storage, created: bool) { if self.broadcast_enabled { - if let Some(wrapped_block) = block_storage.read_blocks().get(&block_id) { - let operations: Vec<(OperationId, Option>)> = - wrapped_block + if let Some(verifiable_block) = block_storage.read_blocks().get(&block_id) { + let operations: Vec<(OperationId, Option>)> = + verifiable_block .content .operations .iter() .map(|operation_id| { match block_storage.read_operations().get(operation_id).cloned() { - Some(wrapped_operation) => (*operation_id, Some(wrapped_operation)), + Some(verifiable_operation) => { + (*operation_id, Some(verifiable_operation)) + } None => (*operation_id, None), } }) @@ -244,10 +247,10 @@ impl ConsensusController for ConsensusControllerImpl { let _block_receivers_count = self .channels .block_sender - .send(wrapped_block.content.clone()); + .send(verifiable_block.content.clone()); let _filled_block_receivers_count = self.channels.filled_block_sender.send(FilledBlock { - header: wrapped_block.content.header.clone(), + header: verifiable_block.content.header.clone(), operations, }); } else { @@ -271,7 +274,7 @@ impl ConsensusController for ConsensusControllerImpl { } } - fn register_block_header(&self, block_id: BlockId, header: Wrapped) { + fn register_block_header(&self, block_id: BlockId, header: SecureShare) { if self.broadcast_enabled { let _ = self .channels @@ -286,7 +289,7 @@ impl ConsensusController for ConsensusControllerImpl { } } - fn mark_invalid_block(&self, block_id: BlockId, header: Wrapped) { + fn mark_invalid_block(&self, block_id: BlockId, header: SecureShare) { if let Err(err) = self .command_sender .try_send(ConsensusCommand::MarkInvalidBlock(block_id, header)) diff --git a/massa-consensus-worker/src/state/graph.rs b/massa-consensus-worker/src/state/graph.rs index b2c08e5c6d9..11bce905b0d 100644 --- a/massa-consensus-worker/src/state/graph.rs +++ b/massa-consensus-worker/src/state/graph.rs @@ -5,7 +5,7 @@ use massa_consensus_exports::{ error::ConsensusError, }; use massa_logging::massa_trace; -use massa_models::{block::BlockId, clique::Clique, prehash::PreHashSet, slot::Slot}; +use massa_models::{block_id::BlockId, clique::Clique, prehash::PreHashSet, slot::Slot}; use super::ConsensusState; diff --git a/massa-consensus-worker/src/state/mod.rs b/massa-consensus-worker/src/state/mod.rs index 41beb4edf0f..7197110f1ac 100644 --- a/massa-consensus-worker/src/state/mod.rs +++ b/massa-consensus-worker/src/state/mod.rs @@ -12,8 +12,9 @@ use massa_consensus_exports::{ use massa_models::{ active_block::ActiveBlock, address::Address, - api::BlockGraphStatus, - block::{BlockId, WrappedHeader}, + block::BlockGraphStatus, + block_header::SecuredHeader, + block_id::BlockId, clique::Clique, prehash::{CapacityAllocator, PreHashMap, PreHashSet}, slot::Slot, @@ -86,7 +87,7 @@ pub struct ConsensusState { /// the time span considered for desynchronization detection pub stats_desync_detection_timespan: MassaTime, /// blocks we want - pub wishlist: PreHashMap>, + pub wishlist: PreHashMap>, /// previous blockclique notified to Execution pub prev_blockclique: PreHashMap, } @@ -466,8 +467,8 @@ impl ConsensusState { /// get the current block wish list, including the operations hash. pub fn get_block_wishlist( &self, - ) -> Result>, ConsensusError> { - let mut wishlist = PreHashMap::>::default(); + ) -> Result>, ConsensusError> { + let mut wishlist = PreHashMap::>::default(); for block_id in self.waiting_for_dependencies_index.iter() { if let Some(BlockStatus::WaitingForDependencies { unsatisfied_dependencies, diff --git a/massa-consensus-worker/src/state/process.rs b/massa-consensus-worker/src/state/process.rs index 04ddeb1fb66..b7ab4c808fa 100644 --- a/massa-consensus-worker/src/state/process.rs +++ b/massa-consensus-worker/src/state/process.rs @@ -11,7 +11,8 @@ use massa_logging::massa_trace; use massa_models::{ active_block::ActiveBlock, address::Address, - block::{BlockId, WrappedHeader}, + block_header::SecuredHeader, + block_id::BlockId, clique::Clique, prehash::{PreHashMap, PreHashSet}, slot::Slot, @@ -173,15 +174,17 @@ impl ConsensusState { massa_trace!("consensus.block_graph.process.incoming_header.discarded", {"block_id": block_id, "reason": reason}); // count stales if reason == DiscardReason::Stale { - self.new_stale_blocks - .insert(block_id, (header.creator_address, header.content.slot)); + self.new_stale_blocks.insert( + block_id, + (header.content_creator_address, header.content.slot), + ); } // discard self.block_statuses.insert( block_id, BlockStatus::Discarded { slot: header.content.slot, - creator: header.creator_address, + creator: header.content_creator_address, parents: header.content.parents, reason, sequence_number: { @@ -239,7 +242,7 @@ impl ConsensusState { "block_id": block_id }); ( - stored_block.content.header.creator_public_key, + stored_block.content.header.content_creator_pub_key, slot, parents_hash_period, incompatibilities, @@ -299,7 +302,7 @@ impl ConsensusState { self.new_stale_blocks.insert( block_id, ( - stored_block.content.header.creator_address, + stored_block.content.header.content_creator_address, stored_block.content.header.content.slot, ), ); @@ -309,7 +312,7 @@ impl ConsensusState { block_id, BlockStatus::Discarded { slot: stored_block.content.header.content.slot, - creator: stored_block.creator_address, + creator: stored_block.content_creator_address, parents: stored_block.content.header.content.parents.clone(), reason, sequence_number: { @@ -822,7 +825,7 @@ impl ConsensusState { // notify protocol of block wishlist let new_wishlist = self.get_block_wishlist()?; - let new_blocks: PreHashMap> = new_wishlist + let new_blocks: PreHashMap> = new_wishlist .iter() .filter_map(|(id, header)| { if !self.wishlist.contains_key(id) { diff --git a/massa-consensus-worker/src/state/process_commands.rs b/massa-consensus-worker/src/state/process_commands.rs index 3677ed3aa1c..5fec3eebfbc 100644 --- a/massa-consensus-worker/src/state/process_commands.rs +++ b/massa-consensus-worker/src/state/process_commands.rs @@ -5,10 +5,7 @@ use massa_consensus_exports::{ error::ConsensusError, }; use massa_logging::massa_trace; -use massa_models::{ - block::{BlockId, WrappedHeader}, - slot::Slot, -}; +use massa_models::{block_header::SecuredHeader, block_id::BlockId, slot::Slot}; use massa_storage::Storage; use massa_time::MassaTime; use tracing::debug; @@ -28,7 +25,7 @@ impl ConsensusState { pub fn register_block_header( &mut self, block_id: BlockId, - header: WrappedHeader, + header: SecuredHeader, current_slot: Option, ) -> Result<(), ConsensusError> { // ignore genesis blocks @@ -164,7 +161,7 @@ impl ConsensusState { /// # Arguments: /// * `block_id`: Block id of the block to mark as invalid /// * `header`: Header of the block to mark as invalid - pub fn mark_invalid_block(&mut self, block_id: &BlockId, header: WrappedHeader) { + pub fn mark_invalid_block(&mut self, block_id: &BlockId, header: SecuredHeader) { let reason = DiscardReason::Invalid("invalid".to_string()); self.maybe_note_attack_attempt(&reason, block_id); massa_trace!("consensus.block_graph.process.invalid_block", {"block_id": block_id, "reason": reason}); @@ -174,7 +171,7 @@ impl ConsensusState { *block_id, BlockStatus::Discarded { slot: header.content.slot, - creator: header.creator_address, + creator: header.content_creator_address, parents: header.content.parents, reason, sequence_number: { diff --git a/massa-consensus-worker/src/state/prune.rs b/massa-consensus-worker/src/state/prune.rs index d1387b25004..e60fa1609c5 100644 --- a/massa-consensus-worker/src/state/prune.rs +++ b/massa-consensus-worker/src/state/prune.rs @@ -5,7 +5,7 @@ use massa_consensus_exports::{ use massa_logging::massa_trace; use massa_models::{ active_block::ActiveBlock, - block::BlockId, + block_id::BlockId, prehash::{PreHashMap, PreHashSet}, slot::Slot, }; @@ -57,7 +57,7 @@ impl ConsensusState { )) })?; block_slot = block.content.header.content.slot; - block_creator = block.creator_address; + block_creator = block.content_creator_address; block_parents = block.content.header.content.parents.clone(); }; @@ -301,15 +301,17 @@ impl ConsensusState { if let Some(reason) = reason_opt { // add to stats if reason is Stale if reason == DiscardReason::Stale { - self.new_stale_blocks - .insert(block_id, (header.creator_address, header.content.slot)); + self.new_stale_blocks.insert( + block_id, + (header.content_creator_address, header.content.slot), + ); } // transition to Discarded only if there is a reason self.block_statuses.insert( block_id, BlockStatus::Discarded { slot: header.content.slot, - creator: header.creator_address, + creator: header.content_creator_address, parents: header.content.parents.clone(), reason, sequence_number: { diff --git a/massa-consensus-worker/src/state/tick.rs b/massa-consensus-worker/src/state/tick.rs index 3165bc82669..4e4ac156098 100644 --- a/massa-consensus-worker/src/state/tick.rs +++ b/massa-consensus-worker/src/state/tick.rs @@ -2,7 +2,7 @@ use std::collections::BTreeSet; use massa_consensus_exports::{block_status::BlockStatus, error::ConsensusError}; use massa_logging::massa_trace; -use massa_models::{block::BlockId, slot::Slot}; +use massa_models::{block_id::BlockId, slot::Slot}; use super::ConsensusState; diff --git a/massa-consensus-worker/src/state/verifications.rs b/massa-consensus-worker/src/state/verifications.rs index 9fc6dc11be8..bb2eddce944 100644 --- a/massa-consensus-worker/src/state/verifications.rs +++ b/massa-consensus-worker/src/state/verifications.rs @@ -6,9 +6,7 @@ use massa_consensus_exports::{ }; use massa_logging::massa_trace; use massa_models::{ - block::{BlockId, WrappedHeader}, - prehash::PreHashSet, - slot::Slot, + block_header::SecuredHeader, block_id::BlockId, prehash::PreHashSet, slot::Slot, }; /// Possible output of a header check @@ -66,7 +64,7 @@ impl ConsensusState { pub fn check_header( &self, block_id: &BlockId, - header: &WrappedHeader, + header: &SecuredHeader, current_slot: Option, read_shared_state: &ConsensusState, ) -> Result { @@ -77,7 +75,7 @@ impl ConsensusState { Vec::with_capacity(self.config.thread_count as usize); let mut incomp = PreHashSet::::default(); let mut missing_deps = PreHashSet::::default(); - let creator_addr = header.creator_address; + let creator_addr = header.content_creator_address; // check that is older than the latest final block in that thread // Note: this excludes genesis blocks @@ -376,7 +374,7 @@ impl ConsensusState { /// * endorsed slot is `parent_in_own_thread` slot pub fn check_endorsements( &self, - header: &WrappedHeader, + header: &SecuredHeader, ) -> Result { // check endorsements let endorsement_draws = match self @@ -389,7 +387,8 @@ impl ConsensusState { }; for endorsement in header.content.endorsements.iter() { // check that the draw is correct - if endorsement.creator_address != endorsement_draws[endorsement.content.index as usize] + if endorsement.content_creator_address + != endorsement_draws[endorsement.content.index as usize] { return Ok(EndorsementsCheckOutcome::Discard(DiscardReason::Invalid( format!( diff --git a/massa-consensus-worker/src/worker/init.rs b/massa-consensus-worker/src/worker/init.rs index f2adeee6d5d..48e49e0cdd5 100644 --- a/massa-consensus-worker/src/worker/init.rs +++ b/massa-consensus-worker/src/worker/init.rs @@ -6,11 +6,13 @@ use massa_hash::Hash; use massa_models::{ active_block::ActiveBlock, address::Address, - block::{Block, BlockHeader, BlockHeaderSerializer, BlockId, BlockSerializer, WrappedBlock}, + block::{Block, BlockSerializer, SecureShareBlock}, + block_header::{BlockHeader, BlockHeaderSerializer}, + block_id::BlockId, prehash::PreHashMap, + secure_share::SecureShareContent, slot::Slot, timeslots::{get_block_slot_timestamp, get_latest_block_slot_at_timestamp}, - wrapped::WrappedContent, }; use massa_storage::Storage; use massa_time::MassaTime; @@ -36,9 +38,9 @@ use super::ConsensusWorker; pub fn create_genesis_block( cfg: &ConsensusConfig, thread_number: u8, -) -> Result { +) -> Result { let keypair = &cfg.genesis_key; - let header = BlockHeader::new_wrapped( + let header = BlockHeader::new_verifiable( BlockHeader { slot: Slot::new(0, thread_number), parents: Vec::new(), @@ -49,7 +51,7 @@ pub fn create_genesis_block( keypair, )?; - Ok(Block::new_wrapped( + Ok(Block::new_verifiable( Block { header, operations: Default::default(), @@ -102,7 +104,7 @@ impl ConsensusWorker { block.id, BlockStatus::Active { a_block: Box::new(ActiveBlock { - creator_address: block.creator_address, + creator_address: block.content_creator_address, parents: Vec::new(), children: vec![PreHashMap::default(); config.thread_count as usize], descendants: Default::default(), diff --git a/massa-consensus-worker/src/worker/mod.rs b/massa-consensus-worker/src/worker/mod.rs index 298b2341d99..42793c21569 100644 --- a/massa-consensus-worker/src/worker/mod.rs +++ b/massa-consensus-worker/src/worker/mod.rs @@ -2,7 +2,7 @@ use massa_consensus_exports::{ bootstrapable_graph::BootstrapableGraph, ConsensusChannels, ConsensusConfig, ConsensusController, ConsensusManager, }; -use massa_models::block::BlockId; +use massa_models::block_id::BlockId; use massa_models::clique::Clique; use massa_models::config::CHANNEL_SIZE; use massa_models::prehash::PreHashSet; diff --git a/massa-executed-ops/src/executed_ops.rs b/massa-executed-ops/src/executed_ops.rs index c0265778e2c..b0c3d50ea8b 100644 --- a/massa-executed-ops/src/executed_ops.rs +++ b/massa-executed-ops/src/executed_ops.rs @@ -8,9 +8,9 @@ use massa_hash::{Hash, HASH_SIZE_BYTES}; use massa_models::{ operation::{OperationId, OperationIdDeserializer}, prehash::PreHashSet, + secure_share::Id, slot::{Slot, SlotDeserializer, SlotSerializer}, streaming_step::StreamingStep, - wrapped::Id, }; use massa_serialization::{ Deserializer, SerializeError, Serializer, U64VarIntDeserializer, U64VarIntSerializer, @@ -164,7 +164,7 @@ impl ExecutedOps { #[test] fn test_executed_ops_xor_computing() { use massa_models::prehash::PreHashMap; - use massa_models::wrapped::Id; + use massa_models::secure_share::Id; // initialize the executed ops config let config = ExecutedOpsConfig { diff --git a/massa-execution-exports/Cargo.toml b/massa-execution-exports/Cargo.toml index 7086b5e7988..2b18e80a532 100644 --- a/massa-execution-exports/Cargo.toml +++ b/massa-execution-exports/Cargo.toml @@ -18,7 +18,7 @@ massa_storage = { path = "../massa-storage" } massa_final_state = { path = "../massa-final-state" } massa_ledger_exports = { path = "../massa-ledger-exports", optional = true } parking_lot = { version = "0.12", features = ["deadlock_detection"], optional = true } -massa-sc-runtime = { git = "https://github.com/massalabs/massa-sc-runtime" } +massa-sc-runtime = { git = "https://github.com/massalabs/massa-sc-runtime", branch = "main" } # for more information on what are the following features used for, see the cargo.toml at workspace level [features] diff --git a/massa-execution-exports/src/controller_traits.rs b/massa-execution-exports/src/controller_traits.rs index c66c4063c41..74e72dbd49b 100644 --- a/massa-execution-exports/src/controller_traits.rs +++ b/massa-execution-exports/src/controller_traits.rs @@ -7,8 +7,8 @@ use crate::ExecutionError; use crate::{ExecutionAddressInfo, ReadOnlyExecutionOutput}; use massa_models::address::Address; use massa_models::amount::Amount; -use massa_models::api::EventFilter; -use massa_models::block::BlockId; +use massa_models::block_id::BlockId; +use massa_models::execution::EventFilter; use massa_models::operation::OperationId; use massa_models::output_event::SCOutputEvent; use massa_models::prehash::PreHashMap; diff --git a/massa-execution-exports/src/event_store.rs b/massa-execution-exports/src/event_store.rs index 332ab49d631..295de896a2a 100644 --- a/massa-execution-exports/src/event_store.rs +++ b/massa-execution-exports/src/event_store.rs @@ -3,7 +3,7 @@ //! This module represents an event store allowing to store, search and retrieve //! a config-limited number of execution-generated events -use massa_models::api::EventFilter; +use massa_models::execution::EventFilter; use massa_models::output_event::SCOutputEvent; use std::collections::VecDeque; diff --git a/massa-execution-exports/src/settings.rs b/massa-execution-exports/src/settings.rs index 7a140dd39cd..a109cc85b23 100644 --- a/massa-execution-exports/src/settings.rs +++ b/massa-execution-exports/src/settings.rs @@ -57,6 +57,8 @@ pub struct ExecutionConfig { pub max_bytecode_size: u64, /// Max datastore value size pub max_datastore_value_size: u64, + /// Max number of compiled modules in the cache + pub max_module_cache_size: u32, /// Storage cost constants pub storage_costs_constants: StorageCostsConstants, /// Max gas for read only executions diff --git a/massa-execution-exports/src/test_exports/config.rs b/massa-execution-exports/src/test_exports/config.rs index 951a1fa60bd..761e6d0e8b4 100644 --- a/massa-execution-exports/src/test_exports/config.rs +++ b/massa-execution-exports/src/test_exports/config.rs @@ -55,6 +55,7 @@ impl Default for ExecutionConfig { .into(), ) .unwrap(), + max_module_cache_size: 1000, } } } diff --git a/massa-execution-exports/src/test_exports/mock.rs b/massa-execution-exports/src/test_exports/mock.rs index ac98810ed59..ad4d775698a 100644 --- a/massa-execution-exports/src/test_exports/mock.rs +++ b/massa-execution-exports/src/test_exports/mock.rs @@ -10,8 +10,8 @@ use massa_ledger_exports::LedgerEntry; use massa_models::{ address::Address, amount::Amount, - api::EventFilter, - block::BlockId, + block_id::BlockId, + execution::EventFilter, operation::OperationId, output_event::SCOutputEvent, prehash::{PreHashMap, PreHashSet}, diff --git a/massa-execution-exports/src/types.rs b/massa-execution-exports/src/types.rs index 978270df5d8..6fe1a9d3906 100644 --- a/massa-execution-exports/src/types.rs +++ b/massa-execution-exports/src/types.rs @@ -6,7 +6,7 @@ use crate::event_store::EventStore; use massa_final_state::StateChanges; use massa_models::datastore::Datastore; use massa_models::{ - address::Address, address::ExecutionAddressCycleInfo, amount::Amount, block::BlockId, + address::Address, address::ExecutionAddressCycleInfo, amount::Amount, block_id::BlockId, slot::Slot, }; use std::collections::{BTreeMap, BTreeSet}; @@ -69,6 +69,10 @@ pub struct ReadOnlyExecutionRequest { pub call_stack: Vec, /// Target of the request pub target: ReadOnlyExecutionTarget, + /// execution start state + /// + /// Whether to start execution from final or active state + pub is_final: bool, } /// structure describing different possible targets of a read-only execution request @@ -101,6 +105,10 @@ pub struct ReadOnlyCallRequest { pub target_func: String, /// Parameter to pass to the target function pub parameter: String, + /// execution start state + /// + /// Whether to start execution from final or active state + pub is_final: bool, } /// Structure describing an element of the execution stack. diff --git a/massa-execution-worker/Cargo.toml b/massa-execution-worker/Cargo.toml index 6d403163f69..9ae34093684 100644 --- a/massa-execution-worker/Cargo.toml +++ b/massa-execution-worker/Cargo.toml @@ -10,12 +10,14 @@ edition = "2021" anyhow = "1.0" rand = "0.8" rand_xoshiro = "0.6" -criterion = {version = "0.4", optional = true} +criterion = { version = "0.4", optional = true } parking_lot = { version = "0.12", features = ["deadlock_detection"] } tracing = "0.1" serde_json = "1.0" num = { version = "0.4", features = ["serde"] } -tempfile = { version = "3.3", optional = true } # use with gas_calibration feature +schnellru = "0.2.0" +# use with gas_calibration feature +tempfile = { version = "3.3", optional = true } # custom modules massa_async_pool = { path = "../massa-async-pool" } massa_executed_ops = { path = "../massa-executed-ops" } @@ -23,7 +25,7 @@ massa_execution_exports = { path = "../massa-execution-exports" } massa_models = { path = "../massa-models" } massa_storage = { path = "../massa-storage" } massa_hash = { path = "../massa-hash" } -massa-sc-runtime = { git = "https://github.com/massalabs/massa-sc-runtime" } +massa-sc-runtime = { git = "https://github.com/massalabs/massa-sc-runtime", branch = "main" } massa_signature = { path = "../massa-signature" } massa_time = { path = "../massa-time" } massa_ledger_worker = { path = "../massa-ledger-worker", optional = true } @@ -36,7 +38,7 @@ massa_final_state = { path = "../massa-final-state" } massa_pos_worker = { path = "../massa-pos-worker" } serial_test = "0.10" tempfile = "3.2" -massa_ledger_worker = { path = "../massa-ledger-worker"} +massa_ledger_worker = { path = "../massa-ledger-worker" } # custom modules with testing enabled massa_execution_exports = { path = "../massa-execution-exports", features = [ "testing", @@ -49,7 +51,14 @@ harness = false [features] sandbox = ["massa_async_pool/sandbox"] -gas_calibration = ["massa_execution_exports/gas_calibration", "massa_final_state/testing", "massa_pos_worker", "massa_ledger_worker", "tempfile"] +gas_calibration = [ + "massa-sc-runtime/gas_calibration", + "massa_execution_exports/gas_calibration", + "massa_final_state/testing", + "massa_pos_worker", + "massa_ledger_worker", + "tempfile", +] testing = [ "massa_execution_exports/testing", "massa_ledger_exports/testing", @@ -58,4 +67,10 @@ testing = [ ] # This feature is useful as we want to have code that is compiled only when running benchmarks -benchmarking = ["criterion", "massa_pos_worker", "massa_ledger_worker", "tempfile"] \ No newline at end of file +benchmarking = [ + "massa-sc-runtime/gas_calibration", + "criterion", + "massa_pos_worker", + "massa_ledger_worker", + "tempfile", +] diff --git a/massa-execution-worker/src/context.rs b/massa-execution-worker/src/context.rs index 2a39831ff2c..affc762eb6e 100644 --- a/massa-execution-worker/src/context.rs +++ b/massa-execution-worker/src/context.rs @@ -7,6 +7,7 @@ //! More generally, the context acts only on its own state //! and does not write anything persistent to the consensus state. +use crate::module_cache::ModuleCache; use crate::speculative_async_pool::SpeculativeAsyncPool; use crate::speculative_executed_ops::SpeculativeExecutedOps; use crate::speculative_ledger::SpeculativeLedger; @@ -22,7 +23,7 @@ use massa_models::address::ExecutionAddressCycleInfo; use massa_models::{ address::Address, amount::Amount, - block::BlockId, + block_id::BlockId, operation::OperationId, output_event::{EventExecutionContext, SCOutputEvent}, slot::Slot, @@ -126,6 +127,9 @@ pub struct ExecutionContext { /// operation id that originally caused this execution (if any) pub origin_operation_id: Option, + + // cache of compiled runtime modules + pub module_cache: Arc>, } impl ExecutionContext { @@ -143,6 +147,7 @@ impl ExecutionContext { config: ExecutionConfig, final_state: Arc>, active_history: Arc>, + module_cache: Arc>, ) -> Self { ExecutionContext { speculative_ledger: SpeculativeLedger::new( @@ -174,6 +179,7 @@ impl ExecutionContext { unsafe_rng: Xoshiro256PlusPlus::from_seed([0u8; 32]), creator_address: Default::default(), origin_operation_id: Default::default(), + module_cache, config, } } @@ -247,6 +253,8 @@ impl ExecutionContext { call_stack: Vec, final_state: Arc>, active_history: Arc>, + + module_cache: Arc>, ) -> Self { // Deterministically seed the unsafe RNG to allow the bytecode to use it. // Note that consecutive read-only calls for the same slot will get the same random seed. @@ -270,7 +278,7 @@ impl ExecutionContext { stack: call_stack, read_only: true, unsafe_rng, - ..ExecutionContext::new(config, final_state, active_history) + ..ExecutionContext::new(config, final_state, active_history, module_cache) } } @@ -310,6 +318,7 @@ impl ExecutionContext { opt_block_id: Option, final_state: Arc>, active_history: Arc>, + module_cache: Arc>, ) -> Self { // Deterministically seed the unsafe RNG to allow the bytecode to use it. @@ -331,7 +340,7 @@ impl ExecutionContext { slot, opt_block_id, unsafe_rng, - ..ExecutionContext::new(config, final_state, active_history) + ..ExecutionContext::new(config, final_state, active_history, module_cache) } } @@ -686,6 +695,9 @@ impl ExecutionContext { pub fn settle_slot(&mut self) -> ExecutionOutput { let slot = self.slot; + // execute the deferred credits coming from roll sells + self.execute_deferred_credits(&slot); + // settle emitted async messages and reimburse the senders of deleted messages let ledger_changes = self.speculative_ledger.take(); let deleted_messages = self @@ -695,9 +707,6 @@ impl ExecutionContext { self.cancel_async_message(&msg); } - // execute the deferred credits coming from roll sells - self.execute_deferred_credits(&slot); - // if the current slot is last in cycle check the production stats and act accordingly if self .slot diff --git a/massa-execution-worker/src/controller.rs b/massa-execution-worker/src/controller.rs index 030106c9239..2f6fc629a6a 100644 --- a/massa-execution-worker/src/controller.rs +++ b/massa-execution-worker/src/controller.rs @@ -9,12 +9,12 @@ use massa_execution_exports::{ ExecutionAddressInfo, ExecutionConfig, ExecutionController, ExecutionError, ExecutionManager, ReadOnlyExecutionOutput, ReadOnlyExecutionRequest, }; -use massa_models::api::EventFilter; +use massa_models::execution::EventFilter; use massa_models::output_event::SCOutputEvent; use massa_models::prehash::{PreHashMap, PreHashSet}; use massa_models::stats::ExecutionStats; use massa_models::{address::Address, amount::Amount, operation::OperationId}; -use massa_models::{block::BlockId, slot::Slot}; +use massa_models::{block_id::BlockId, slot::Slot}; use massa_storage::Storage; use parking_lot::{Condvar, Mutex, RwLock}; use std::collections::{BTreeMap, HashMap}; diff --git a/massa-execution-worker/src/execution.rs b/massa-execution-worker/src/execution.rs index 4e14a67b5fa..713e56a786d 100644 --- a/massa-execution-worker/src/execution.rs +++ b/massa-execution-worker/src/execution.rs @@ -11,6 +11,7 @@ use crate::active_history::{ActiveHistory, HistorySearchResult}; use crate::context::ExecutionContext; use crate::interface_impl::InterfaceImpl; +use crate::module_cache::ModuleCache; use crate::stats::ExecutionStatsCounter; use massa_async_pool::AsyncMessage; use massa_execution_exports::{ @@ -20,18 +21,18 @@ use massa_execution_exports::{ use massa_final_state::FinalState; use massa_ledger_exports::{SetOrDelete, SetUpdateOrDelete}; use massa_models::address::ExecutionAddressCycleInfo; -use massa_models::api::EventFilter; +use massa_models::execution::EventFilter; use massa_models::output_event::SCOutputEvent; use massa_models::prehash::PreHashSet; use massa_models::stats::ExecutionStats; use massa_models::{ address::Address, - block::BlockId, - operation::{OperationId, OperationType, WrappedOperation}, + block_id::BlockId, + operation::{OperationId, OperationType, SecureShareOperation}, }; use massa_models::{amount::Amount, slot::Slot}; use massa_pos_exports::SelectorController; -use massa_sc_runtime::Interface; +use massa_sc_runtime::{Interface, Response, RuntimeModule}; use massa_storage::Storage; use parking_lot::{Mutex, RwLock}; use std::collections::{BTreeMap, BTreeSet}; @@ -70,6 +71,8 @@ pub(crate) struct ExecutionState { execution_interface: Box, // execution statistics stats_counter: ExecutionStatsCounter, + // cache of pre compiled sc modules + module_cache: Arc>, } impl ExecutionState { @@ -89,11 +92,18 @@ impl ExecutionState { // Create default active history let active_history: Arc> = Default::default(); + // Initialize the SC module cache + let module_cache = Arc::new(RwLock::new(ModuleCache::new( + config.gas_costs.clone(), + config.max_module_cache_size, + ))); + // Create an empty placeholder execution context, with shared atomic access let execution_context = Arc::new(Mutex::new(ExecutionContext::new( config.clone(), final_state.clone(), active_history.clone(), + module_cache.clone(), ))); // Instantiate the interface providing ABI access to the VM, share the execution context with it @@ -115,6 +125,7 @@ impl ExecutionState { active_cursor: last_final_slot, final_cursor: last_final_slot, stats_counter: ExecutionStatsCounter::new(config.stats_time_window_duration), + module_cache, config, } } @@ -192,7 +203,7 @@ impl ExecutionState { /// * `block_credits`: mutable reference towards the total block reward/fee credits pub fn execute_operation( &self, - operation: &WrappedOperation, + operation: &SecureShareOperation, block_slot: Slot, remaining_block_gas: &mut u64, block_credits: &mut Amount, @@ -214,7 +225,7 @@ impl ExecutionState { })?; // get the operation's sender address - let sender_addr = operation.creator_address; + let sender_addr = operation.content_creator_address; // get the thread to which the operation belongs let op_thread = sender_addr.get_thread(self.config.thread_count); @@ -270,7 +281,7 @@ impl ExecutionState { context.max_gas = operation.get_gas_usage(); // set the creator address - context.creator_address = Some(operation.creator_address); + context.creator_address = Some(operation.content_creator_address); // set the context origin operation ID context.origin_operation_id = Some(operation_id); @@ -503,17 +514,23 @@ impl ExecutionState { }; // run the VM on the bytecode contained in the operation + let module = RuntimeModule::new(bytecode, *max_gas, self.config.gas_costs.clone()) + .map_err(|err| { + ExecutionError::RuntimeError(format!( + "compilation error in execute_executesc_op: {}", + err + )) + })?; match massa_sc_runtime::run_main( - bytecode, - *max_gas, &*self.execution_interface, + module, + *max_gas, self.config.gas_costs.clone(), ) { Ok(_response) => {} Err(err) => { - // there was an error during bytecode execution return Err(ExecutionError::RuntimeError(format!( - "bytecode execution error: {}", + "module execution error in execute_executesc_op: {}", err ))); } @@ -597,25 +614,25 @@ impl ExecutionState { } // run the VM on the bytecode loaded from the target address + let mut module_lock = self.module_cache.write(); + let module = module_lock.get_module(&bytecode, max_gas)?; match massa_sc_runtime::run_function( - &bytecode, - max_gas, + &*self.execution_interface, + module.clone(), target_func, param, - &*self.execution_interface, + max_gas, self.config.gas_costs.clone(), ) { - Ok(_response) => {} - Err(err) => { - // there was an error during bytecode execution - return Err(ExecutionError::RuntimeError(format!( - "bytecode execution error: {}", - err - ))); + Ok(Response { init_cost, .. }) => { + module_lock.save_module(&bytecode, module, init_cost); + Ok(()) } + Err(err) => Err(ExecutionError::RuntimeError(format!( + "module execution error in execute_callsc_op: {}", + err + ))), } - - Ok(()) } /// Tries to execute an asynchronous message @@ -686,26 +703,32 @@ impl ExecutionState { bytecode }; - // run the target function - if let Err(err) = massa_sc_runtime::run_function( - &bytecode, - message.max_gas, + // run the VM on the bytecode contained in the operation + let mut module_lock = self.module_cache.write(); + let module = module_lock.get_module(&bytecode, message.max_gas)?; + match massa_sc_runtime::run_function( + &*self.execution_interface, + module.clone(), &message.handler, &message.data, - &*self.execution_interface, + message.max_gas, self.config.gas_costs.clone(), ) { - // execution failed: reset context to snapshot and reimburse sender - let err = ExecutionError::RuntimeError(format!( - "async message runtime execution error: {}", - err - )); - let mut context = context_guard!(self); - context.reset_to_snapshot(context_snapshot, err.clone()); - context.cancel_async_message(&message); - Err(err) - } else { - Ok(()) + Ok(Response { init_cost, .. }) => { + module_lock.save_module(&bytecode, module, init_cost); + Ok(()) + } + Err(err) => { + // execution failed: reset context to snapshot and reimburse sender + let err = ExecutionError::RuntimeError(format!( + "module execution error in execute_async_message: {}", + err + )); + let mut context = context_guard!(self); + context.reset_to_snapshot(context_snapshot, err.clone()); + context.cancel_async_message(&message); + Err(err) + } } } @@ -732,6 +755,7 @@ impl ExecutionState { exec_target.as_ref().map(|(b_id, _)| *b_id), self.final_state.clone(), self.active_history.clone(), + self.module_cache.clone(), ); // Get asynchronous messages to execute @@ -780,7 +804,7 @@ impl ExecutionState { .content .endorsements .iter() - .map(|endo| (endo.creator_address, endo.content.endorsed_block)) + .map(|endo| (endo.content_creator_address, endo.content.endorsed_block)) .unzip(); // deduce endorsement target block creators @@ -792,7 +816,7 @@ impl ExecutionState { blocks .get(b_id) .expect("endorsed block absent from storage") - .creator_address + .content_creator_address }) .collect::>() }; @@ -820,7 +844,7 @@ impl ExecutionState { } // Get block creator address - let block_creator_addr = stored_block.creator_address; + let block_creator_addr = stored_block.content_creator_address; // acquire lock on execution context let mut context = context_guard!(self); @@ -1023,11 +1047,16 @@ impl ExecutionState { ))); } - // set the execution slot to be the one after the latest executed active slot - let slot = self - .active_cursor - .get_next_slot(self.config.thread_count) - .expect("slot overflow in readonly execution"); + // set the execution slot to be the one after the latest executed active or final slot + let slot = if req.is_final { + self.final_cursor + .get_next_slot(self.config.thread_count) + .expect("slot overflow in readonly execution from final slot") + } else { + self.active_cursor + .get_next_slot(self.config.thread_count) + .expect("slot overflow in readonly execution from active slot") + }; // create a readonly execution context let execution_context = ExecutionContext::readonly( @@ -1037,6 +1066,7 @@ impl ExecutionState { req.call_stack, self.final_state.clone(), self.active_history.clone(), + self.module_cache.clone(), ); // run the interpreter according to the target type @@ -1046,13 +1076,26 @@ impl ExecutionState { *context_guard!(self) = execution_context; // run the bytecode's main function + let module = + RuntimeModule::new(&bytecode, req.max_gas, self.config.gas_costs.clone()) + .map_err(|err| { + ExecutionError::RuntimeError(format!( + "compilation error in execute_readonly_request: {}", + err + )) + })?; massa_sc_runtime::run_main( - &bytecode, - req.max_gas, &*self.execution_interface, + module, + req.max_gas, self.config.gas_costs.clone(), ) - .map_err(|err| ExecutionError::RuntimeError(err.to_string()))? + .map_err(|err| { + ExecutionError::RuntimeError(format!( + "module execution error in execute_readonly_request BytecodeExecution: {}", + err, + )) + })? } ReadOnlyExecutionTarget::FunctionCall { target_addr, @@ -1068,15 +1111,24 @@ impl ExecutionState { *context_guard!(self) = execution_context; // run the target function in the bytecode - massa_sc_runtime::run_function( - &bytecode, - req.max_gas, + let mut module_lock = self.module_cache.write(); + let module = module_lock.get_module(&bytecode, req.max_gas)?; + let response = massa_sc_runtime::run_function( + &*self.execution_interface, + module.clone(), &target_func, ¶meter, - &*self.execution_interface, + req.max_gas, self.config.gas_costs.clone(), ) - .map_err(|err| ExecutionError::RuntimeError(err.to_string()))? + .map_err(|err| { + ExecutionError::RuntimeError(format!( + "module execution error in execute_readonly_request BytecodeExecution: {}", + err, + )) + })?; + module_lock.save_module(&bytecode, module, response.init_cost); + response } }; diff --git a/massa-execution-worker/src/interface_impl.rs b/massa-execution-worker/src/interface_impl.rs index e13c3b71f40..c89437033f5 100644 --- a/massa-execution-worker/src/interface_impl.rs +++ b/massa-execution-worker/src/interface_impl.rs @@ -14,6 +14,7 @@ use massa_models::config::MAX_DATASTORE_KEY_LENGTH; use massa_models::{ address::Address, amount::Amount, slot::Slot, timeslots::get_block_slot_timestamp, }; +use massa_sc_runtime::RuntimeModule; use massa_sc_runtime::{Interface, InterfaceClone}; use parking_lot::Mutex; use rand::Rng; @@ -57,12 +58,20 @@ impl InterfaceImpl { sender_addr: Address, operation_datastore: Option, ) -> InterfaceImpl { + use crate::module_cache::ModuleCache; use massa_ledger_exports::{LedgerEntry, SetUpdateOrDelete}; + use massa_sc_runtime::GasCosts; + use parking_lot::RwLock; let config = ExecutionConfig::default(); let (final_state, _tempfile, _tempdir) = crate::tests::get_sample_state().unwrap(); - let mut execution_context = - ExecutionContext::new(config.clone(), final_state, Default::default()); + let module_cache = Arc::new(RwLock::new(ModuleCache::new(GasCosts::default(), 1000))); + let mut execution_context = ExecutionContext::new( + config.clone(), + final_state, + Default::default(), + module_cache, + ); execution_context.stack = vec![ExecutionStackElement { address: sender_addr, coins: Amount::zero(), @@ -170,6 +179,16 @@ impl Interface for InterfaceImpl { Ok(()) } + /// Get the module from cache if possible, compile it if not + /// + /// # Returns + /// A `massa-sc-runtime` compiled module + fn get_module(&self, bytecode: &[u8], limit: u64) -> Result { + let context = context_guard!(self); + let module = context.module_cache.write().get_module(bytecode, limit)?; + Ok(module) + } + /// Gets the balance of the current address address (top of the stack). /// /// # Returns diff --git a/massa-execution-worker/src/lib.rs b/massa-execution-worker/src/lib.rs index 2c89fa46d72..cd937a9778f 100644 --- a/massa-execution-worker/src/lib.rs +++ b/massa-execution-worker/src/lib.rs @@ -88,6 +88,7 @@ mod context; mod controller; mod execution; mod interface_impl; +mod module_cache; mod request_queue; mod slot_sequencer; mod speculative_async_pool; diff --git a/massa-execution-worker/src/module_cache.rs b/massa-execution-worker/src/module_cache.rs new file mode 100644 index 00000000000..ffff1be0d73 --- /dev/null +++ b/massa-execution-worker/src/module_cache.rs @@ -0,0 +1,65 @@ +use massa_execution_exports::ExecutionError; +use massa_hash::Hash; +use massa_models::prehash::BuildHashMapper; +use massa_sc_runtime::{GasCosts, RuntimeModule}; +use schnellru::{ByLength, LruMap}; + +/// `LruMap` specialization for `PreHashed` keys +pub type PreHashLruMap = LruMap>; + +/// LRU cache of compiled runtime modules. +/// The LRU caching scheme is to remove the least recently used module when the cache is full. +/// +/// * key: raw bytecode (which is hashed on insertion in LruMap) +/// * value.0: corresponding compiled module +/// * value.1: instance initialization cost +pub struct ModuleCache { + gas_costs: GasCosts, + cache: PreHashLruMap, +} + +impl ModuleCache { + pub fn new(gas_costs: GasCosts, cache_size: u32) -> Self { + Self { + gas_costs, + cache: LruMap::with_hasher(ByLength::new(cache_size), BuildHashMapper::default()), + } + } + + /// If the module is contained in the cache: + /// * retrieve a copy of it + /// * move it up in the LRU cache + /// + /// If the module is not contained in the cache: + /// * create the module + /// * retrieve it + pub fn get_module( + &mut self, + bytecode: &[u8], + limit: u64, + ) -> Result { + if let Some((cached_module, init_cost)) = self.cache.get(&Hash::compute_from(bytecode)) { + if limit < *init_cost { + return Err(ExecutionError::RuntimeError( + "given gas cannot cover the initialization costs".to_string(), + )); + } + Ok(cached_module.clone()) + } else { + let new_module = + RuntimeModule::new(bytecode, limit, self.gas_costs.clone()).map_err(|err| { + ExecutionError::RuntimeError(format!( + "compilation of missing cache module failed: {}", + err + )) + })?; + Ok(new_module) + } + } + + /// Save a module in the cache + pub fn save_module(&mut self, bytecode: &[u8], module: RuntimeModule, init_cost: u64) { + self.cache + .insert(Hash::compute_from(bytecode), (module, init_cost)); + } +} diff --git a/massa-execution-worker/src/slot_sequencer.rs b/massa-execution-worker/src/slot_sequencer.rs index 9274857c099..e26350125a1 100644 --- a/massa-execution-worker/src/slot_sequencer.rs +++ b/massa-execution-worker/src/slot_sequencer.rs @@ -6,7 +6,7 @@ use std::collections::{HashMap, VecDeque}; use massa_execution_exports::ExecutionConfig; use massa_models::{ - block::BlockId, + block_id::BlockId, prehash::PreHashMap, slot::Slot, timeslots::{get_block_slot_timestamp, get_latest_block_slot_at_timestamp}, diff --git a/massa-execution-worker/src/speculative_ledger.rs b/massa-execution-worker/src/speculative_ledger.rs index 49fa3e3128f..a251f3c050d 100644 --- a/massa-execution-worker/src/speculative_ledger.rs +++ b/massa-execution-worker/src/speculative_ledger.rs @@ -147,10 +147,11 @@ impl SpeculativeLedger { if let Some(from_addr) = from_addr { let new_balance = self .get_balance(&from_addr) - .ok_or_else(|| ExecutionError::RuntimeError("source addr not found".to_string()))? + .ok_or_else(|| ExecutionError::RuntimeError(format!("spending address {} not found", from_addr)))? .checked_sub(amount) .ok_or_else(|| { - ExecutionError::RuntimeError("insufficient from_addr balance".into()) + ExecutionError::RuntimeError(format!("failed to transfer {} from spending address {} due to insufficient balance {}", amount, from_addr, self + .get_balance(&from_addr).unwrap_or_default())) })?; changes.set_balance(from_addr, new_balance); } @@ -161,14 +162,17 @@ impl SpeculativeLedger { let old_balance = changes.get_balance_or_else(&to_addr, || self.get_balance(&to_addr)); match (old_balance, from_addr) { // if `to_addr` exists we increase the balance - (Some(old_balance), _) => { + (Some(old_balance), _from_addr) => { let new_balance = old_balance.checked_add(amount).ok_or_else(|| { - ExecutionError::RuntimeError("overflow in to_addr balance".into()) + ExecutionError::RuntimeError(format!( + "overflow in crediting address {} balance {} due to adding {}", + to_addr, old_balance, amount + )) })?; changes.set_balance(to_addr, new_balance); } // if `to_addr` doesn't exist but `from_addr` is defined. `from_addr` will create the address using the coins sent. - (None, Some(_)) => { + (None, Some(_from_addr)) => { //TODO: Remove when stabilized debug!("Creating address {} from coins in transactions", to_addr); if amount >= self.storage_costs_constants.ledger_entry_base_cost { @@ -179,17 +183,18 @@ impl SpeculativeLedger { .checked_sub(self.storage_costs_constants.ledger_entry_base_cost) .ok_or_else(|| { ExecutionError::RuntimeError( - "overflow in subtract ledger cost for addr".to_string(), + format!("underflow in subtract ledger cost {} for new crediting address {}", to_addr, self.storage_costs_constants.ledger_entry_base_cost), ) })?, ); } else { - return Err(ExecutionError::RuntimeError( - "insufficient amount to create receiver address".to_string(), - )); + return Err(ExecutionError::RuntimeError(format!( + "insufficient amount {} to create crediting address {}", + amount, to_addr + ))); } } - // if `from_addr` is none and `to_addr` doesn't exist try to create it from coins sent + // if `from_addr` is none and `to_addr` doesn't exist(in the ledger) try to create it from coins sent (None, None) => { //TODO: Remove when stabilized debug!("Creating address {} from coins generated", to_addr); @@ -202,7 +207,7 @@ impl SpeculativeLedger { .checked_sub(self.storage_costs_constants.ledger_entry_base_cost) .ok_or_else(|| { ExecutionError::RuntimeError( - "overflow in subtract ledger cost for addr".to_string(), + format!("underflow in subtract ledger cost {} for new crediting address {}", to_addr, self.storage_costs_constants.ledger_entry_base_cost), ) })?, ); @@ -455,9 +460,12 @@ impl SpeculativeLedger { .fetch_active_history_data_entry(addr, key) { HistorySearchResult::Present(_entry) => true, - HistorySearchResult::NoInfo => { - self.final_state.read().ledger.has_data_entry(addr, key) - } + HistorySearchResult::NoInfo => self + .final_state + .read() + .ledger + .get_data_entry(addr, key) + .is_some(), HistorySearchResult::Absent => false, } }) diff --git a/massa-execution-worker/src/speculative_roll_state.rs b/massa-execution-worker/src/speculative_roll_state.rs index 7da56cb3035..7a2d0ad7a52 100644 --- a/massa-execution-worker/src/speculative_roll_state.rs +++ b/massa-execution-worker/src/speculative_roll_state.rs @@ -5,7 +5,7 @@ use massa_execution_exports::ExecutionError; use massa_final_state::FinalState; use massa_models::address::ExecutionAddressCycleInfo; use massa_models::{ - address::Address, amount::Amount, block::BlockId, prehash::PreHashMap, slot::Slot, + address::Address, amount::Amount, block_id::BlockId, prehash::PreHashMap, slot::Slot, }; use massa_pos_exports::{DeferredCredits, PoSChanges, ProductionStats}; use num::rational::Ratio; diff --git a/massa-execution-worker/src/tests/mock.rs b/massa-execution-worker/src/tests/mock.rs index 119ae0226fa..e5fff335d06 100644 --- a/massa-execution-worker/src/tests/mock.rs +++ b/massa-execution-worker/src/tests/mock.rs @@ -1,8 +1,18 @@ +use massa_execution_exports::ExecutionError; use massa_final_state::{FinalState, FinalStateConfig}; -use massa_ledger_exports::LedgerEntry; -use massa_ledger_exports::{LedgerConfig, LedgerController, LedgerError}; +use massa_hash::Hash; +use massa_ledger_exports::{LedgerConfig, LedgerController, LedgerEntry, LedgerError}; use massa_ledger_worker::FinalLedger; -use massa_models::{address::Address, amount::Amount, config::THREAD_COUNT}; +use massa_models::{ + address::Address, + amount::Amount, + block::{Block, BlockSerializer, SecureShareBlock}, + block_header::{BlockHeader, BlockHeaderSerializer}, + config::THREAD_COUNT, + operation::SecureShareOperation, + secure_share::SecureShareContent, + slot::Slot, +}; use massa_pos_exports::SelectorConfig; use massa_pos_worker::start_selector_worker; use massa_signature::KeyPair; @@ -14,22 +24,7 @@ use std::{ io::Seek, sync::Arc, }; -use tempfile::NamedTempFile; -use tempfile::TempDir; - -#[cfg(feature = "testing")] -use massa_models::{ - block::{Block, BlockHeader, BlockHeaderSerializer, BlockSerializer, WrappedBlock}, - operation::WrappedOperation, - slot::Slot, - wrapped::WrappedContent, -}; - -#[cfg(feature = "testing")] -use massa_execution_exports::ExecutionError; - -#[cfg(feature = "testing")] -use massa_hash::Hash; +use tempfile::{NamedTempFile, TempDir}; fn get_initials() -> (NamedTempFile, HashMap) { let file = NamedTempFile::new().unwrap(); @@ -100,7 +95,7 @@ fn get_initials() -> (NamedTempFile, HashMap) { /// Same as `get_random_address()` and return `keypair` associated /// to the address. -#[cfg(feature = "testing")] +#[allow(dead_code)] // to avoid warnings on gas_calibration feature pub fn get_random_address_full() -> (Address, KeyPair) { let keypair = KeyPair::generate(); (Address::from_public_key(&keypair.get_public_key()), keypair) @@ -136,19 +131,19 @@ pub fn get_sample_state() -> Result<(Arc>, NamedTempFile, Tem /// creator. /// /// Return a result that should be unwrapped in the root `#[test]` routine. -#[cfg(feature = "testing")] +#[allow(dead_code)] // to avoid warnings on gas_calibration feature pub fn create_block( creator_keypair: KeyPair, - operations: Vec, + operations: Vec, slot: Slot, -) -> Result { +) -> Result { let operation_merkle_root = Hash::compute_from( &operations.iter().fold(Vec::new(), |acc, v| { [acc, v.serialized_data.clone()].concat() })[..], ); - let header = BlockHeader::new_wrapped( + let header = BlockHeader::new_verifiable( BlockHeader { slot, parents: vec![], @@ -159,7 +154,7 @@ pub fn create_block( &creator_keypair, )?; - Ok(Block::new_wrapped( + Ok(Block::new_verifiable( Block { header, operations: operations.into_iter().map(|op| op.id).collect(), diff --git a/massa-execution-worker/src/tests/scenarios_mandatories.rs b/massa-execution-worker/src/tests/scenarios_mandatories.rs index 2ca0e35490f..758faf813f6 100644 --- a/massa-execution-worker/src/tests/scenarios_mandatories.rs +++ b/massa-execution-worker/src/tests/scenarios_mandatories.rs @@ -10,11 +10,11 @@ use massa_models::config::{LEDGER_ENTRY_BASE_SIZE, LEDGER_ENTRY_DATASTORE_BASE_S use massa_models::prehash::PreHashMap; use massa_models::{address::Address, amount::Amount, slot::Slot}; use massa_models::{ - api::EventFilter, - block::BlockId, + block_id::BlockId, datastore::Datastore, - operation::{Operation, OperationSerializer, OperationType, WrappedOperation}, - wrapped::WrappedContent, + execution::EventFilter, + operation::{Operation, OperationSerializer, OperationType, SecureShareOperation}, + secure_share::SecureShareContent, }; use massa_signature::KeyPair; use massa_storage::Storage; @@ -57,12 +57,26 @@ fn test_sending_command() { #[test] #[serial] fn test_readonly_execution() { + // setup the period duration + let exec_cfg = ExecutionConfig { + t0: 100.into(), + cursor_delay: 0.into(), + ..ExecutionConfig::default() + }; + // get a sample final state let (sample_state, _keep_file, _keep_dir) = get_sample_state().unwrap(); + // init the storage + let storage = Storage::create_root(); + // start the execution worker let (mut manager, controller) = start_execution_worker( - ExecutionConfig::default(), + exec_cfg.clone(), sample_state.clone(), sample_state.read().pos_state.selector.clone(), ); + // initialize the execution system with genesis blocks + init_execution_worker(&exec_cfg, &storage, controller.clone()); + std::thread::sleep(Duration::from_millis(1000)); + let mut res = controller .execute_readonly_request(ReadOnlyExecutionRequest { max_gas: 1_000_000, @@ -70,12 +84,25 @@ fn test_readonly_execution() { target: ReadOnlyExecutionTarget::BytecodeExecution( include_bytes!("./wasm/event_test.wasm").to_vec(), ), + is_final: true, }) .expect("readonly execution failed"); - + assert_eq!(res.out.slot, Slot::new(1, 0)); assert!(res.gas_cost > 0); assert_eq!(res.out.events.take().len(), 1, "wrong number of events"); + let res = controller + .execute_readonly_request(ReadOnlyExecutionRequest { + max_gas: 1_000_000, + call_stack: vec![], + target: ReadOnlyExecutionTarget::BytecodeExecution( + include_bytes!("./wasm/event_test.wasm").to_vec(), + ), + is_final: false, + }) + .expect("readonly execution failed"); + assert!(res.out.slot.period > 8); + manager.stop(); } @@ -409,9 +436,11 @@ fn local_execution() { &Address::from_str("A12eS5qggxuvqviD5eQ72oM2QhGwnmNbT1BaxVXU4hqQ8rAYXFe").unwrap() ); assert_eq!(events[2].data, "one local execution completed"); - assert_eq!( - Amount::from_raw(events[5].data.parse().unwrap()), - Amount::from_str("299_979.05275").unwrap() // start (299_000) - fee (1000) - storage cost + let amount = Amount::from_raw(events[5].data.parse().unwrap()); + assert!( + // start (299_000) - fee (1000) - storage cost + Amount::from_str("299_979").unwrap() < amount + && amount < Amount::from_str("299_980").unwrap() ); assert_eq!(events[5].context.call_stack.len(), 1); assert_eq!( @@ -684,7 +713,7 @@ pub fn send_and_receive_transaction() { KeyPair::from_str("S1JJeHiZv1C1zZN5GLFcbz6EXYiccmUPLkYuDFA3kayjxP39kFQ").unwrap(); let (recipient_address, _keypair) = get_random_address_full(); // create the operation - let operation = Operation::new_wrapped( + let operation = Operation::new_verifiable( Operation { fee: Amount::zero(), expire_period: 10, @@ -761,7 +790,7 @@ pub fn roll_buy() { let keypair = KeyPair::from_str("S1JJeHiZv1C1zZN5GLFcbz6EXYiccmUPLkYuDFA3kayjxP39kFQ").unwrap(); let address = Address::from_public_key(&keypair.get_public_key()); // create the operation - let operation = Operation::new_wrapped( + let operation = Operation::new_verifiable( Operation { fee: Amount::zero(), expire_period: 10, @@ -834,13 +863,16 @@ pub fn roll_sell() { let keypair = KeyPair::from_str("S1JJeHiZv1C1zZN5GLFcbz6EXYiccmUPLkYuDFA3kayjxP39kFQ").unwrap(); let address = Address::from_public_key(&keypair.get_public_key()); + // get initial balance + let balance_initial = sample_state.read().ledger.get_balance(&address).unwrap(); + // get initial roll count let roll_count_initial = sample_state.read().pos_state.get_rolls_for(&address); let roll_sell_1 = 10; let roll_sell_2 = 1; // create operation 1 - let operation1 = Operation::new_wrapped( + let operation1 = Operation::new_verifiable( Operation { fee: Amount::zero(), expire_period: 10, @@ -852,7 +884,7 @@ pub fn roll_sell() { &keypair, ) .unwrap(); - let operation2 = Operation::new_wrapped( + let operation2 = Operation::new_verifiable( Operation { fee: Amount::zero(), expire_period: 10, @@ -915,6 +947,20 @@ pub fn roll_sell() { credits ); + // Now check balance + let balances = controller.get_final_and_candidate_balance(&[address]); + let candidate_balance = balances.get(0).unwrap().1.unwrap(); + + assert_eq!( + candidate_balance, + exec_cfg + .roll_price + .checked_mul_u64(roll_sell_1 + roll_sell_2) + .unwrap() + .checked_add(balance_initial) + .unwrap() + ); + // stop the execution controller manager.stop(); } @@ -1024,7 +1070,7 @@ fn sc_datastore() { let mut block_storage: PreHashMap = Default::default(); block_storage.insert(block.id, storage.clone()); controller.update_blockclique_status(finalized_blocks, Some(Default::default()), block_storage); - std::thread::sleep(Duration::from_millis(10)); + std::thread::sleep(Duration::from_millis(100)); // retrieve the event emitted by the execution error let events = controller.get_filtered_sc_output_event(EventFilter::default()); @@ -1283,13 +1329,13 @@ fn create_execute_sc_operation( sender_keypair: &KeyPair, data: &[u8], datastore: Datastore, -) -> Result { +) -> Result { let op = OperationType::ExecuteSC { data: data.to_vec(), max_gas: 1_000_000, datastore, }; - let op = Operation::new_wrapped( + let op = Operation::new_verifiable( Operation { fee: Amount::from_mantissa_scale(10, 0), expire_period: 10, @@ -1310,7 +1356,7 @@ fn create_call_sc_operation( target_addr: Address, target_func: String, param: Vec, -) -> Result { +) -> Result { let op = OperationType::CallSC { max_gas, target_addr, @@ -1318,7 +1364,7 @@ fn create_call_sc_operation( target_func, param, }; - let op = Operation::new_wrapped( + let op = Operation::new_verifiable( Operation { fee, expire_period: 10, diff --git a/massa-execution-worker/src/tests/wasm/datastore.wasm b/massa-execution-worker/src/tests/wasm/datastore.wasm index 699ded6b422..f0d782b705b 100644 Binary files a/massa-execution-worker/src/tests/wasm/datastore.wasm and b/massa-execution-worker/src/tests/wasm/datastore.wasm differ diff --git a/massa-execution-worker/src/tests/wasm/datastore_manipulations.wasm b/massa-execution-worker/src/tests/wasm/datastore_manipulations.wasm index 54c72144045..7fb64a5d782 100644 Binary files a/massa-execution-worker/src/tests/wasm/datastore_manipulations.wasm and b/massa-execution-worker/src/tests/wasm/datastore_manipulations.wasm differ diff --git a/massa-execution-worker/src/tests/wasm/deploy_sc.wasm b/massa-execution-worker/src/tests/wasm/deploy_sc.wasm index 9c1d6b83aba..5e88d4dd0c9 100644 Binary files a/massa-execution-worker/src/tests/wasm/deploy_sc.wasm and b/massa-execution-worker/src/tests/wasm/deploy_sc.wasm differ diff --git a/massa-execution-worker/src/tests/wasm/execution_error.wasm b/massa-execution-worker/src/tests/wasm/execution_error.wasm index 12fd84d7d03..427c7bfe9be 100644 Binary files a/massa-execution-worker/src/tests/wasm/execution_error.wasm and b/massa-execution-worker/src/tests/wasm/execution_error.wasm differ diff --git a/massa-execution-worker/src/tests/wasm/init_sc.wasm b/massa-execution-worker/src/tests/wasm/init_sc.wasm index 15e22d7587d..b8635206a23 100644 Binary files a/massa-execution-worker/src/tests/wasm/init_sc.wasm and b/massa-execution-worker/src/tests/wasm/init_sc.wasm differ diff --git a/massa-execution-worker/src/tests/wasm/local.wasm b/massa-execution-worker/src/tests/wasm/local.wasm new file mode 100644 index 00000000000..3e0c560e6ba Binary files /dev/null and b/massa-execution-worker/src/tests/wasm/local.wasm differ diff --git a/massa-execution-worker/src/tests/wasm/local_call.wasm b/massa-execution-worker/src/tests/wasm/local_call.wasm index e7cbd30347c..1548e6aaf10 100644 Binary files a/massa-execution-worker/src/tests/wasm/local_call.wasm and b/massa-execution-worker/src/tests/wasm/local_call.wasm differ diff --git a/massa-execution-worker/src/tests/wasm/local_execution.wasm b/massa-execution-worker/src/tests/wasm/local_execution.wasm index d0bd976d3af..b4433fec193 100644 Binary files a/massa-execution-worker/src/tests/wasm/local_execution.wasm and b/massa-execution-worker/src/tests/wasm/local_execution.wasm differ diff --git a/massa-execution-worker/src/tests/wasm/local_function.wasm b/massa-execution-worker/src/tests/wasm/local_function.wasm index 51ccf225167..cc965cf45a2 100644 Binary files a/massa-execution-worker/src/tests/wasm/local_function.wasm and b/massa-execution-worker/src/tests/wasm/local_function.wasm differ diff --git a/massa-execution-worker/src/tests/wasm/nested_call.wasm b/massa-execution-worker/src/tests/wasm/nested_call.wasm index 291d1f7b4bb..6db9aa27342 100644 Binary files a/massa-execution-worker/src/tests/wasm/nested_call.wasm and b/massa-execution-worker/src/tests/wasm/nested_call.wasm differ diff --git a/massa-execution-worker/src/tests/wasm/receive_message.wasm b/massa-execution-worker/src/tests/wasm/receive_message.wasm index 48f1d4fcc6f..0d3304ad84e 100644 Binary files a/massa-execution-worker/src/tests/wasm/receive_message.wasm and b/massa-execution-worker/src/tests/wasm/receive_message.wasm differ diff --git a/massa-execution-worker/src/tests/wasm/send_message.wasm b/massa-execution-worker/src/tests/wasm/send_message.wasm index 2acdf4feff6..55f66fe4920 100644 Binary files a/massa-execution-worker/src/tests/wasm/send_message.wasm and b/massa-execution-worker/src/tests/wasm/send_message.wasm differ diff --git a/massa-execution-worker/src/tests/wasm/send_message_condition.wasm b/massa-execution-worker/src/tests/wasm/send_message_condition.wasm index 0a463af986f..4e455612815 100644 Binary files a/massa-execution-worker/src/tests/wasm/send_message_condition.wasm and b/massa-execution-worker/src/tests/wasm/send_message_condition.wasm differ diff --git a/massa-execution-worker/src/tests/wasm/send_message_condition_deployer.wasm b/massa-execution-worker/src/tests/wasm/send_message_condition_deployer.wasm index 42e22ea9b50..42514ce24d5 100644 Binary files a/massa-execution-worker/src/tests/wasm/send_message_condition_deployer.wasm and b/massa-execution-worker/src/tests/wasm/send_message_condition_deployer.wasm differ diff --git a/massa-execution-worker/src/tests/wasm/send_message_trigger.wasm b/massa-execution-worker/src/tests/wasm/send_message_trigger.wasm index 54b73674343..01a8b26b3f0 100644 Binary files a/massa-execution-worker/src/tests/wasm/send_message_trigger.wasm and b/massa-execution-worker/src/tests/wasm/send_message_trigger.wasm differ diff --git a/massa-execution-worker/src/tests/wasm/send_message_wrong_trigger.wasm b/massa-execution-worker/src/tests/wasm/send_message_wrong_trigger.wasm index 0eb06a893b9..0e07a54748c 100644 Binary files a/massa-execution-worker/src/tests/wasm/send_message_wrong_trigger.wasm and b/massa-execution-worker/src/tests/wasm/send_message_wrong_trigger.wasm differ diff --git a/massa-execution-worker/src/tests/wasm/set_bytecode_fail.wasm b/massa-execution-worker/src/tests/wasm/set_bytecode_fail.wasm index f425ff63861..dbc146e5520 100644 Binary files a/massa-execution-worker/src/tests/wasm/set_bytecode_fail.wasm and b/massa-execution-worker/src/tests/wasm/set_bytecode_fail.wasm differ diff --git a/massa-execution-worker/src/tests/wasm/smart-contract.wasm b/massa-execution-worker/src/tests/wasm/smart-contract.wasm index 7e26ded01f8..87e783fbf68 100644 Binary files a/massa-execution-worker/src/tests/wasm/smart-contract.wasm and b/massa-execution-worker/src/tests/wasm/smart-contract.wasm differ diff --git a/massa-execution-worker/src/tests/wasm/test.wasm b/massa-execution-worker/src/tests/wasm/test.wasm index b01f9f4d5b2..1a9571d9d9b 100644 Binary files a/massa-execution-worker/src/tests/wasm/test.wasm and b/massa-execution-worker/src/tests/wasm/test.wasm differ diff --git a/massa-execution-worker/src/tests/wasm/use_builtins.wasm b/massa-execution-worker/src/tests/wasm/use_builtins.wasm index e850657066c..d7518bc31be 100644 Binary files a/massa-execution-worker/src/tests/wasm/use_builtins.wasm and b/massa-execution-worker/src/tests/wasm/use_builtins.wasm differ diff --git a/massa-execution-worker/src/worker.rs b/massa-execution-worker/src/worker.rs index 667d2feaee8..6ca82570f7c 100644 --- a/massa-execution-worker/src/worker.rs +++ b/massa-execution-worker/src/worker.rs @@ -14,7 +14,7 @@ use massa_execution_exports::{ ReadOnlyExecutionOutput, ReadOnlyExecutionRequest, }; use massa_final_state::FinalState; -use massa_models::block::BlockId; +use massa_models::block_id::BlockId; use massa_models::slot::Slot; use massa_pos_exports::SelectorController; use massa_storage::Storage; @@ -85,8 +85,8 @@ impl ExecutionThread { if let Some(req_resp) = self.readonly_requests.pop() { let (req, resp_tx) = req_resp.into_request_sender_pair(); - // Acquire read access to the execution state and execute the read-only request - let outcome = self.execution_state.read().execute_readonly_request(req); + // Acquire write access to the execution state (for cache updates) and execute the read-only request + let outcome = self.execution_state.write().execute_readonly_request(req); // Send the execution output through resp_tx. // Ignore errors because they just mean that the request emitter dropped the received diff --git a/massa-factory-exports/src/test_exports/tools.rs b/massa-factory-exports/src/test_exports/tools.rs index 6634f703e7b..1427748a508 100644 --- a/massa-factory-exports/src/test_exports/tools.rs +++ b/massa-factory-exports/src/test_exports/tools.rs @@ -1,14 +1,15 @@ use massa_hash::Hash; use massa_models::{ - block::{Block, BlockHeader, BlockHeaderSerializer, BlockSerializer, WrappedBlock}, + block::{Block, BlockSerializer, SecureShareBlock}, + block_header::{BlockHeader, BlockHeaderSerializer}, + secure_share::SecureShareContent, slot::Slot, - wrapped::WrappedContent, }; use massa_signature::KeyPair; /// Create an empty block for testing. Can be used to generate genesis blocks. -pub fn create_empty_block(keypair: &KeyPair, slot: &Slot) -> WrappedBlock { - let header = BlockHeader::new_wrapped( +pub fn create_empty_block(keypair: &KeyPair, slot: &Slot) -> SecureShareBlock { + let header = BlockHeader::new_verifiable( BlockHeader { slot: *slot, parents: Vec::new(), @@ -20,7 +21,7 @@ pub fn create_empty_block(keypair: &KeyPair, slot: &Slot) -> WrappedBlock { ) .unwrap(); - Block::new_wrapped( + Block::new_verifiable( Block { header, operations: Default::default(), diff --git a/massa-factory-worker/src/block_factory.rs b/massa-factory-worker/src/block_factory.rs index 39cf3c9f9c6..8655aa7de7c 100644 --- a/massa-factory-worker/src/block_factory.rs +++ b/massa-factory-worker/src/block_factory.rs @@ -3,12 +3,14 @@ use massa_factory_exports::{FactoryChannels, FactoryConfig}; use massa_hash::Hash; use massa_models::{ - block::{Block, BlockHeader, BlockHeaderSerializer, BlockId, BlockSerializer, WrappedHeader}, - endorsement::WrappedEndorsement, + block::{Block, BlockSerializer}, + block_header::{BlockHeader, BlockHeaderSerializer, SecuredHeader}, + block_id::BlockId, + endorsement::SecureShareEndorsement, prehash::PreHashSet, + secure_share::SecureShareContent, slot::Slot, timeslots::{get_block_slot_timestamp, get_closest_slot_to_timestamp}, - wrapped::WrappedContent, }; use massa_time::MassaTime; use massa_wallet::Wallet; @@ -171,7 +173,7 @@ impl BlockFactoryWorker { .get_block_endorsements(&same_thread_parent_id, &slot); //TODO: Do we want ot populate only with endorsement id in the future ? - let endorsements: Vec = { + let endorsements: Vec = { let endo_read = endo_storage.read_endorsements(); endorsements_ids .into_iter() @@ -197,7 +199,7 @@ impl BlockFactoryWorker { ); // create header - let header: WrappedHeader = BlockHeader::new_wrapped::( + let header: SecuredHeader = BlockHeader::new_verifiable::( BlockHeader { slot, parents: parents.into_iter().map(|(id, _period)| id).collect(), @@ -210,7 +212,7 @@ impl BlockFactoryWorker { .expect("error while producing block header"); // create block - let block = Block::new_wrapped( + let block = Block::new_verifiable( Block { header, operations: op_ids.into_iter().collect(), diff --git a/massa-factory-worker/src/endorsement_factory.rs b/massa-factory-worker/src/endorsement_factory.rs index 797c10b797d..1e9f2dc8f0f 100644 --- a/massa-factory-worker/src/endorsement_factory.rs +++ b/massa-factory-worker/src/endorsement_factory.rs @@ -2,11 +2,11 @@ use massa_factory_exports::{FactoryChannels, FactoryConfig}; use massa_models::{ - block::BlockId, - endorsement::{Endorsement, EndorsementSerializer, WrappedEndorsement}, + block_id::BlockId, + endorsement::{Endorsement, EndorsementSerializer, SecureShareEndorsement}, + secure_share::SecureShareContent, slot::Slot, timeslots::{get_block_slot_timestamp, get_closest_slot_to_timestamp}, - wrapped::WrappedContent, }; use massa_signature::KeyPair; use massa_time::MassaTime; @@ -167,9 +167,10 @@ impl EndorsementFactoryWorker { .get_latest_blockclique_block_at_slot(slot); // produce endorsements - let mut endorsements: Vec = Vec::with_capacity(producers_indices.len()); + let mut endorsements: Vec = + Vec::with_capacity(producers_indices.len()); for (keypair, index) in producers_indices { - let endorsement = Endorsement::new_wrapped( + let endorsement = Endorsement::new_verifiable( Endorsement { slot, index: index as u32, @@ -183,7 +184,7 @@ impl EndorsementFactoryWorker { // log endorsement creation debug!( "endorsement {} created at slot {} by address {}", - endorsement.id, endorsement.content.slot, endorsement.creator_address + endorsement.id, endorsement.content.slot, endorsement.content_creator_address ); endorsements.push(endorsement); diff --git a/massa-factory-worker/src/tests/scenarios.rs b/massa-factory-worker/src/tests/scenarios.rs index fc3fef81c19..45267a5150e 100644 --- a/massa-factory-worker/src/tests/scenarios.rs +++ b/massa-factory-worker/src/tests/scenarios.rs @@ -2,7 +2,7 @@ use super::TestFactory; use massa_models::{ amount::Amount, operation::{Operation, OperationSerializer, OperationType}, - wrapped::WrappedContent, + secure_share::SecureShareContent, }; use massa_signature::KeyPair; use std::str::FromStr; @@ -29,7 +29,8 @@ fn basic_creation_with_operation() { expire_period: 2, op: OperationType::RollBuy { roll_count: 1 }, }; - let operation = Operation::new_wrapped(content, OperationSerializer::new(), &keypair).unwrap(); + let operation = + Operation::new_verifiable(content, OperationSerializer::new(), &keypair).unwrap(); let (block_id, storage) = test_factory.get_next_created_block(Some(vec![operation]), None); let block = storage.read_blocks().get(&block_id).unwrap().clone(); @@ -51,7 +52,8 @@ fn basic_creation_with_multiple_operations() { expire_period: 2, op: OperationType::RollBuy { roll_count: 1 }, }; - let operation = Operation::new_wrapped(content, OperationSerializer::new(), &keypair).unwrap(); + let operation = + Operation::new_verifiable(content, OperationSerializer::new(), &keypair).unwrap(); let (block_id, storage) = test_factory.get_next_created_block(Some(vec![operation.clone(), operation]), None); diff --git a/massa-factory-worker/src/tests/tools.rs b/massa-factory-worker/src/tests/tools.rs index c0f8acb0ca8..afb1336d9de 100644 --- a/massa-factory-worker/src/tests/tools.rs +++ b/massa-factory-worker/src/tests/tools.rs @@ -12,9 +12,9 @@ use massa_factory_exports::{ test_exports::create_empty_block, FactoryChannels, FactoryConfig, FactoryManager, }; use massa_models::{ - address::Address, block::BlockId, config::ENDORSEMENT_COUNT, endorsement::WrappedEndorsement, - operation::WrappedOperation, prehash::PreHashMap, slot::Slot, - test_exports::get_next_slot_instant, + address::Address, block_id::BlockId, config::ENDORSEMENT_COUNT, + endorsement::SecureShareEndorsement, operation::SecureShareOperation, prehash::PreHashMap, + slot::Slot, test_exports::get_next_slot_instant, }; use massa_pool_exports::test_exports::{ MockPoolController, MockPoolControllerMessage, PoolEventReceiver, @@ -110,8 +110,8 @@ impl TestFactory { /// - `endorsements`: Optional list of endorsements to include in the block pub fn get_next_created_block( &mut self, - operations: Option>, - endorsements: Option>, + operations: Option>, + endorsements: Option>, ) -> (BlockId, Storage) { let now = MassaTime::now().expect("could not get current time"); let next_slot_instant = get_next_slot_instant( diff --git a/massa-final-state/Cargo.toml b/massa-final-state/Cargo.toml index 29ae05d130b..d5acaed693d 100644 --- a/massa-final-state/Cargo.toml +++ b/massa-final-state/Cargo.toml @@ -6,6 +6,7 @@ edition = "2021" [dependencies] displaydoc = "0.2" +serde = { version = "1.0", features = ["derive"] } nom = "7.1" thiserror = "1.0" tracing = "0.1" diff --git a/massa-final-state/src/state_changes.rs b/massa-final-state/src/state_changes.rs index 69142cc17fc..06a0dc705bc 100644 --- a/massa-final-state/src/state_changes.rs +++ b/massa-final-state/src/state_changes.rs @@ -16,9 +16,10 @@ use nom::{ sequence::tuple, IResult, Parser, }; +use serde::{Deserialize, Serialize}; /// represents changes that can be applied to the execution state -#[derive(Default, Debug, Clone)] +#[derive(Default, Debug, Clone, Deserialize, Serialize)] pub struct StateChanges { /// ledger changes pub ledger_changes: LedgerChanges, diff --git a/massa-ledger-exports/src/controller.rs b/massa-ledger-exports/src/controller.rs index 016b08f07ed..e25f43e27bb 100644 --- a/massa-ledger-exports/src/controller.rs +++ b/massa-ledger-exports/src/controller.rs @@ -42,16 +42,6 @@ pub trait LedgerController: Send + Sync + Debug { /// A copy of the datastore value, or `None` if the ledger entry or datastore entry was not found fn get_data_entry(&self, addr: &Address, key: &[u8]) -> Option>; - /// Checks for the existence of a datastore entry for a given address. - /// - /// # Arguments - /// * `addr`: target address - /// * `key`: datastore key - /// - /// # Returns - /// true if the datastore entry was found, or false if the ledger entry or datastore entry was not found - fn has_data_entry(&self, addr: &Address, key: &[u8]) -> bool; - /// Get every key of the datastore for a given address. /// /// # Returns diff --git a/massa-ledger-exports/src/ledger_changes.rs b/massa-ledger-exports/src/ledger_changes.rs index 3b74466db01..4b7cd61cf1c 100644 --- a/massa-ledger-exports/src/ledger_changes.rs +++ b/massa-ledger-exports/src/ledger_changes.rs @@ -19,11 +19,12 @@ use nom::error::{context, ContextError, ParseError}; use nom::multi::length_count; use nom::sequence::tuple; use nom::{IResult, Parser}; +use serde::{Deserialize, Serialize}; use std::collections::{hash_map, BTreeMap}; use std::ops::Bound::Included; /// represents an update to one or more fields of a `LedgerEntry` -#[derive(Default, Debug, Clone, PartialEq, Eq)] +#[derive(Default, Debug, Clone, PartialEq, Eq, Deserialize, Serialize)] pub struct LedgerEntryUpdate { /// change the balance pub balance: SetOrKeep, @@ -324,7 +325,7 @@ impl Applicable for LedgerEntryUpdate { } /// represents a list of changes to multiple ledger entries -#[derive(Default, Debug, Clone, PartialEq, Eq)] +#[derive(Default, Debug, Clone, PartialEq, Eq, Deserialize, Serialize)] pub struct LedgerChanges( pub PreHashMap>, ); diff --git a/massa-ledger-exports/src/types.rs b/massa-ledger-exports/src/types.rs index bf1f11ed12e..9e595b51c4e 100644 --- a/massa-ledger-exports/src/types.rs +++ b/massa-ledger-exports/src/types.rs @@ -7,6 +7,7 @@ use nom::{ error::{ContextError, ParseError}, IResult, }; +use serde::{Deserialize, Serialize}; /// Trait marking a structure that supports another one (V) being applied to it pub trait Applicable { @@ -15,7 +16,7 @@ pub trait Applicable { } /// Enumeration representing set/update/delete change on a value T -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize)] pub enum SetUpdateOrDelete, V: Applicable + Clone> { /// Sets the value T a new absolute value T Set(T), @@ -184,7 +185,7 @@ where } /// `Enum` representing a set/delete change on a value T -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize)] pub enum SetOrDelete { /// sets a new absolute value T Set(T), @@ -270,7 +271,7 @@ impl Applicable> for SetOrDelete { } /// represents a set/keep change -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize)] pub enum SetOrKeep { /// sets a new absolute value T Set(T), diff --git a/massa-ledger-worker/src/ledger.rs b/massa-ledger-worker/src/ledger.rs index fc168b31e1a..b21bb1413c6 100644 --- a/massa-ledger-worker/src/ledger.rs +++ b/massa-ledger-worker/src/ledger.rs @@ -134,29 +134,12 @@ impl LedgerController for FinalLedger { .get_sub_entry(addr, LedgerSubEntry::Datastore(key.to_owned())) } - /// Checks for the existence of a datastore entry for a given address. - /// - /// # Arguments - /// * `addr`: target address - /// * `key`: datastore key - /// - /// # Returns - /// true if the datastore entry was found, or false if the ledger entry or datastore entry was not found - fn has_data_entry(&self, addr: &Address, key: &[u8]) -> bool { - self.sorted_ledger - .get_sub_entry(addr, LedgerSubEntry::Datastore(key.to_owned())) - .is_some() - } - /// Get every key of the datastore for a given address. /// /// # Returns /// A `BTreeSet` of the datastore keys fn get_datastore_keys(&self, addr: &Address) -> Option>> { - match self.entry_exists(addr) { - true => Some(self.sorted_ledger.get_datastore_keys(addr)), - false => None, - } + self.sorted_ledger.get_datastore_keys(addr) } /// Get the current disk ledger hash diff --git a/massa-ledger-worker/src/ledger_db.rs b/massa-ledger-worker/src/ledger_db.rs index 4f01b8f4a61..4493c2c61e2 100644 --- a/massa-ledger-worker/src/ledger_db.rs +++ b/massa-ledger-worker/src/ledger_db.rs @@ -52,6 +52,16 @@ pub enum LedgerSubEntry { Datastore(Vec), } +impl LedgerSubEntry { + fn derive_key(&self, addr: &Address) -> Vec { + match self { + LedgerSubEntry::Balance => balance_key!(addr), + LedgerSubEntry::Bytecode => bytecode_key!(addr), + LedgerSubEntry::Datastore(hash) => data_key!(addr, hash), + } + } +} + /// Disk ledger DB module /// /// Contains a `RocksDB` DB instance @@ -73,29 +83,6 @@ impl Debug for LedgerDB { } } -/// For a given start prefix (inclusive), returns the correct end prefix (non-inclusive). -/// This assumes the key bytes are ordered in lexicographical order. -/// Since key length is not limited, for some case we return `None` because there is -/// no bounded limit (every keys in the series `[]`, `[255]`, `[255, 255]` ...). -fn end_prefix(prefix: &[u8]) -> Option> { - let mut end_range = prefix.to_vec(); - while let Some(0xff) = end_range.last() { - end_range.pop(); - } - if let Some(byte) = end_range.last_mut() { - *byte += 1; - Some(end_range) - } else { - None - } -} - -#[test] -fn test_end_prefix() { - assert_eq!(end_prefix(&[5, 6, 7]), Some(vec![5, 6, 8])); - assert_eq!(end_prefix(&[5, 6, 255]), Some(vec![5, 7])); -} - /// Batch containing write operations to perform on disk and cache for the ledger hash computing pub struct LedgerBatch { // Rocksdb write batch @@ -210,6 +197,164 @@ impl LedgerDB { self.write_batch(batch); } + /// Get the current disk ledger hash + pub fn get_ledger_hash(&self) -> Hash { + let handle = self.db.cf_handle(METADATA_CF).expect(CF_ERROR); + if let Some(ledger_hash_bytes) = self + .db + .get_pinned_cf(handle, LEDGER_HASH_KEY) + .expect(CRUD_ERROR) + .as_deref() + { + Hash::from_bytes(ledger_hash_bytes.try_into().expect(LEDGER_HASH_ERROR)) + } else { + // initial ledger_hash value to avoid matching an option in every XOR operation + // because of a one time case being an empty ledger + // also note that the if you XOR a hash with itself result is LEDGER_HASH_INITIAL_BYTES + Hash::from_bytes(LEDGER_HASH_INITIAL_BYTES) + } + } + + /// Get the given sub-entry of a given address. + /// + /// # Arguments + /// * `addr`: associated address + /// * `ty`: type of the queried sub-entry + /// + /// # Returns + /// An Option of the sub-entry value as bytes + pub fn get_sub_entry(&self, addr: &Address, ty: LedgerSubEntry) -> Option> { + let handle = self.db.cf_handle(LEDGER_CF).expect(CF_ERROR); + self.db + .get_cf(handle, ty.derive_key(addr)) + .expect(CRUD_ERROR) + } + + /// Get every key of the datastore for a given address. + /// + /// # Returns + /// A `BTreeSet` of the datastore keys + pub fn get_datastore_keys(&self, addr: &Address) -> Option>> { + let handle = self.db.cf_handle(LEDGER_CF).expect(CF_ERROR); + + let mut opt = ReadOptions::default(); + opt.set_iterate_range(data_prefix!(addr).clone()..end_prefix(data_prefix!(addr)).unwrap()); + + let mut iter = self + .db + .iterator_cf_opt(handle, opt, IteratorMode::Start) + .flatten() + .map(|(key, _)| key.split_at(ADDRESS_SIZE_BYTES + 1).1.to_vec()) + .peekable(); + + // Return None if empty + // TODO: function should return None if complete entry does not exist + // and Some([]) if it does but datastore is empty + iter.peek()?; + Some(iter.collect()) + } + + /// Get a part of the disk Ledger. + /// Mainly used in the bootstrap process. + /// + /// # Arguments + /// * `last_key`: key where the part retrieving must start + /// + /// # Returns + /// A tuple containing: + /// * The ledger part as bytes + /// * The last taken key (this is an optimization to easily keep a reference to the last key) + pub fn get_ledger_part( + &self, + cursor: StreamingStep>, + ) -> Result<(Vec, StreamingStep>), ModelsError> { + let handle = self.db.cf_handle(LEDGER_CF).expect(CF_ERROR); + let opt = ReadOptions::default(); + let ser = VecU8Serializer::new(); + let key_serializer = KeySerializer::new(); + let mut ledger_part = Vec::new(); + + // Creates an iterator from the next element after the last if defined, otherwise initialize it at the first key of the ledger. + let (db_iterator, mut new_cursor) = match cursor { + StreamingStep::Started => ( + self.db.iterator_cf_opt(handle, opt, IteratorMode::Start), + StreamingStep::Started, + ), + StreamingStep::Ongoing(last_key) => { + let mut iter = self.db.iterator_cf_opt( + handle, + opt, + IteratorMode::From(&last_key, Direction::Forward), + ); + iter.next(); + (iter, StreamingStep::Finished(None)) + } + StreamingStep::Finished(_) => return Ok((ledger_part, cursor)), + }; + + // Iterates over the whole database + for (key, entry) in db_iterator.flatten() { + if (ledger_part.len() as u64) < (self.ledger_part_size_message_bytes) { + key_serializer.serialize(&key.to_vec(), &mut ledger_part)?; + ser.serialize(&entry.to_vec(), &mut ledger_part)?; + new_cursor = StreamingStep::Ongoing(key.to_vec()); + } else { + break; + } + } + Ok((ledger_part, new_cursor)) + } + + /// Set a part of the ledger in the database. + /// We deserialize in this function because we insert in the ledger while deserializing. + /// Used for bootstrap. + /// + /// # Arguments + /// * data: must be the serialized version provided by `get_ledger_part` + /// + /// # Returns + /// The last key of the inserted entry (this is an optimization to easily keep a reference to the last key) + pub fn set_ledger_part<'a>( + &self, + data: &'a [u8], + ) -> Result>, ModelsError> { + let handle = self.db.cf_handle(LEDGER_CF).expect(CF_ERROR); + let vec_u8_deserializer = + VecU8Deserializer::new(Bound::Included(0), Bound::Excluded(u64::MAX)); + let key_deserializer = KeyDeserializer::new(self.max_datastore_key_length); + let mut last_key = Rc::new(Vec::new()); + let mut batch = LedgerBatch::new(self.get_ledger_hash()); + + // Since this data is coming from the network, deser to address and ser back to bytes for a security check. + let (rest, _) = many0(|input: &'a [u8]| { + let (rest, (key, value)) = tuple(( + |input| key_deserializer.deserialize(input), + |input| vec_u8_deserializer.deserialize(input), + ))(input)?; + *Rc::get_mut(&mut last_key).ok_or_else(|| { + nom::Err::Error(nom::error::Error::new(input, nom::error::ErrorKind::Fail)) + })? = key.clone(); + self.put_entry_value(handle, &mut batch, &key, &value); + Ok((rest, ())) + })(data) + .map_err(|_| ModelsError::SerializeError("Error in deserialization".to_string()))?; + + // Every byte should have been read + if last_key.is_empty() { + Ok(StreamingStep::Finished(None)) + } else if rest.is_empty() { + self.write_batch(batch); + Ok(StreamingStep::Ongoing((*last_key).clone())) + } else { + Err(ModelsError::SerializeError( + "rest is not empty.".to_string(), + )) + } + } +} + +// Private helpers +impl LedgerDB { /// Apply the given operation batch to the disk ledger fn write_batch(&self, mut batch: LedgerBatch) { let handle = self.db.cf_handle(METADATA_CF).expect(CF_ERROR); @@ -235,26 +380,12 @@ impl LedgerDB { .write_batch .put_cf(handle, SLOT_KEY, slot_bytes.clone()); // XOR previous slot and new one - if let Some(prev_bytes) = self.db.get_cf(handle, SLOT_KEY).expect(CRUD_ERROR) { + if let Some(prev_bytes) = self.db.get_pinned_cf(handle, SLOT_KEY).expect(CRUD_ERROR) { batch.ledger_hash ^= Hash::compute_from(&prev_bytes); } batch.ledger_hash ^= Hash::compute_from(&slot_bytes); } - /// Get the current disk ledger hash - pub fn get_ledger_hash(&self) -> Hash { - let handle = self.db.cf_handle(METADATA_CF).expect(CF_ERROR); - if let Some(ledger_hash_bytes) = self.db.get_cf(handle, LEDGER_HASH_KEY).expect(CRUD_ERROR) - { - Hash::from_bytes(&ledger_hash_bytes.try_into().expect(LEDGER_HASH_ERROR)) - } else { - // initial ledger_hash value to avoid matching an option in every XOR operation - // because of a one time case being an empty ledger - // also note that the if you XOR a hash with itself result is LEDGER_HASH_INITIAL_BYTES - Hash::from_bytes(LEDGER_HASH_INITIAL_BYTES) - } - } - /// Internal function to put a key & value and perform the ledger hash XORs fn put_entry_value( &self, @@ -299,54 +430,6 @@ impl LedgerDB { } } - /// Get the given sub-entry of a given address. - /// - /// # Arguments - /// * `addr`: associated address - /// * `ty`: type of the queried sub-entry - /// - /// # Returns - /// An Option of the sub-entry value as bytes - pub fn get_sub_entry(&self, addr: &Address, ty: LedgerSubEntry) -> Option> { - let handle = self.db.cf_handle(LEDGER_CF).expect(CF_ERROR); - - match ty { - LedgerSubEntry::Balance => self - .db - .get_cf(handle, balance_key!(addr)) - .expect(CRUD_ERROR), - LedgerSubEntry::Bytecode => self - .db - .get_cf(handle, bytecode_key!(addr)) - .expect(CRUD_ERROR), - LedgerSubEntry::Datastore(hash) => self - .db - .get_cf(handle, data_key!(addr, hash)) - .expect(CRUD_ERROR), - } - } - - /// Get every key of the datastore for a given address. - /// - /// # Returns - /// A `BTreeSet` of the datastore keys - pub fn get_datastore_keys(&self, addr: &Address) -> BTreeSet> { - let handle = self.db.cf_handle(LEDGER_CF).expect(CF_ERROR); - - let mut opt = ReadOptions::default(); - opt.set_iterate_upper_bound(end_prefix(data_prefix!(addr)).unwrap()); - - self.db - .iterator_cf_opt( - handle, - opt, - IteratorMode::From(data_prefix!(addr), Direction::Forward), - ) - .flatten() - .map(|(key, _)| key.split_at(ADDRESS_SIZE_BYTES + 1).1.to_vec()) - .collect() - } - /// Internal function to update a key & value and perform the ledger hash XORs fn update_key_value( &self, @@ -361,7 +444,7 @@ impl LedgerDB { .expect(KEY_LEN_SER_ERROR); if let Some(added_hash) = batch.aeh_list.get(key) { batch.ledger_hash ^= *added_hash; - } else if let Some(prev_bytes) = self.db.get_cf(handle, key).expect(CRUD_ERROR) { + } else if let Some(prev_bytes) = self.db.get_pinned_cf(handle, key).expect(CRUD_ERROR) { batch.ledger_hash ^= Hash::compute_from(&[&len_bytes, key, &prev_bytes].concat()); } let hash = Hash::compute_from(&[&len_bytes, key, value].concat()); @@ -413,7 +496,7 @@ impl LedgerDB { fn delete_key(&self, handle: &ColumnFamily, batch: &mut LedgerBatch, key: &[u8]) { if let Some(added_hash) = batch.aeh_list.get(key) { batch.ledger_hash ^= *added_hash; - } else if let Some(prev_bytes) = self.db.get_cf(handle, key).expect(CRUD_ERROR) { + } else if let Some(prev_bytes) = self.db.get_pinned_cf(handle, key).expect(CRUD_ERROR) { let mut len_bytes = Vec::new(); self.len_serializer .serialize(&(key.len() as u64), &mut len_bytes) @@ -451,112 +534,16 @@ impl LedgerDB { self.delete_key(handle, batch, &key); } } - - /// Get a part of the disk Ledger. - /// Mainly used in the bootstrap process. - /// - /// # Arguments - /// * `last_key`: key where the part retrieving must start - /// - /// # Returns - /// A tuple containing: - /// * The ledger part as bytes - /// * The last taken key (this is an optimization to easily keep a reference to the last key) - pub fn get_ledger_part( - &self, - cursor: StreamingStep>, - ) -> Result<(Vec, StreamingStep>), ModelsError> { - let handle = self.db.cf_handle(LEDGER_CF).expect(CF_ERROR); - let opt = ReadOptions::default(); - let ser = VecU8Serializer::new(); - let key_serializer = KeySerializer::new(); - let mut ledger_part = Vec::new(); - - // Creates an iterator from the next element after the last if defined, otherwise initialize it at the first key of the ledger. - let (db_iterator, mut new_cursor) = match cursor { - StreamingStep::Started => ( - self.db.iterator_cf_opt(handle, opt, IteratorMode::Start), - StreamingStep::Started, - ), - StreamingStep::Ongoing(last_key) => { - let mut iter = self.db.iterator_cf_opt( - handle, - opt, - IteratorMode::From(&last_key, Direction::Forward), - ); - iter.next(); - (iter, StreamingStep::Finished(None)) - } - StreamingStep::Finished(_) => return Ok((ledger_part, cursor)), - }; - - // Iterates over the whole database - for (key, entry) in db_iterator.flatten() { - if (ledger_part.len() as u64) < (self.ledger_part_size_message_bytes) { - key_serializer.serialize(&key.to_vec(), &mut ledger_part)?; - ser.serialize(&entry.to_vec(), &mut ledger_part)?; - new_cursor = StreamingStep::Ongoing(key.to_vec()); - } else { - break; - } - } - Ok((ledger_part, new_cursor)) - } - - /// Set a part of the ledger in the database. - /// We deserialize in this function because we insert in the ledger while deserializing. - /// Used for bootstrap. - /// - /// # Arguments - /// * data: must be the serialized version provided by `get_ledger_part` - /// - /// # Returns - /// The last key of the inserted entry (this is an optimization to easily keep a reference to the last key) - pub fn set_ledger_part<'a>( - &self, - data: &'a [u8], - ) -> Result>, ModelsError> { - let handle = self.db.cf_handle(LEDGER_CF).expect(CF_ERROR); - let vec_u8_deserializer = - VecU8Deserializer::new(Bound::Included(0), Bound::Excluded(u64::MAX)); - let key_deserializer = KeyDeserializer::new(self.max_datastore_key_length); - let mut last_key = Rc::new(Vec::new()); - let mut batch = LedgerBatch::new(self.get_ledger_hash()); - - // Since this data is coming from the network, deser to address and ser back to bytes for a security check. - let (rest, _) = many0(|input: &'a [u8]| { - let (rest, (key, value)) = tuple(( - |input| key_deserializer.deserialize(input), - |input| vec_u8_deserializer.deserialize(input), - ))(input)?; - *Rc::get_mut(&mut last_key).ok_or_else(|| { - nom::Err::Error(nom::error::Error::new(input, nom::error::ErrorKind::Fail)) - })? = key.clone(); - self.put_entry_value(handle, &mut batch, &key, &value); - Ok((rest, ())) - })(data) - .map_err(|_| ModelsError::SerializeError("Error in deserialization".to_string()))?; - - // Every byte should have been read - if last_key.is_empty() { - Ok(StreamingStep::Finished(None)) - } else if rest.is_empty() { - self.write_batch(batch); - Ok(StreamingStep::Ongoing((*last_key).clone())) - } else { - Err(ModelsError::SerializeError( - "rest is not empty.".to_string(), - )) - } - } - +} +// test helpers +impl LedgerDB { /// Get every address and their corresponding balance. /// /// IMPORTANT: This should only be used for debug purposes. /// /// # Returns /// A `BTreeMap` with the address as key and the balance as value - #[cfg(feature = "testing")] + #[cfg(any(feature = "testing"))] pub fn get_every_address( &self, ) -> std::collections::BTreeMap { @@ -620,10 +607,26 @@ impl LedgerDB { } } +/// For a given start prefix (inclusive), returns the correct end prefix (non-inclusive). +/// This assumes the key bytes are ordered in lexicographical order. +/// Since key length is not limited, for some case we return `None` because there is +/// no bounded limit (every keys in the series `[]`, `[255]`, `[255, 255]` ...). +fn end_prefix(prefix: &[u8]) -> Option> { + let mut end_range = prefix.to_vec(); + while let Some(0xff) = end_range.last() { + end_range.pop(); + } + if let Some(byte) = end_range.last_mut() { + *byte += 1; + Some(end_range) + } else { + None + } +} + #[cfg(test)] mod tests { - use super::LedgerDB; - use crate::ledger_db::{LedgerBatch, LedgerSubEntry, LEDGER_HASH_INITIAL_BYTES}; + use super::*; use massa_hash::Hash; use massa_ledger_exports::{LedgerEntry, LedgerEntryUpdate, SetOrKeep}; use massa_models::{ @@ -716,4 +719,10 @@ mod tests { let res = db.get_ledger_part(StreamingStep::Started).unwrap(); db.set_ledger_part(&res.0[..]).unwrap(); } + + #[test] + fn test_end_prefix() { + assert_eq!(end_prefix(&[5, 6, 7]), Some(vec![5, 6, 8])); + assert_eq!(end_prefix(&[5, 6, 255]), Some(vec![5, 7])); + } } diff --git a/massa-models/Cargo.toml b/massa-models/Cargo.toml index 98f934f2b68..b397edb7311 100644 --- a/massa-models/Cargo.toml +++ b/massa-models/Cargo.toml @@ -11,7 +11,6 @@ num_enum = "0.5" rust_decimal = "1.26" serde = { version = "1.0", features = ["derive"] } serde_with = "2.1.0" -strum = { version = "0.24", features = ["derive"] } thiserror = "1.0" num = { version = "0.4", features = ["serde"] } directories = "4.0" @@ -25,6 +24,7 @@ massa_hash = { path = "../massa-hash" } massa_serialization = { path = "../massa-serialization" } massa_signature = { path = "../massa-signature" } massa_time = { path = "../massa-time" } +const_format = "0.2.30" [dev-dependencies] serial_test = "0.10" diff --git a/massa-models/src/active_block.rs b/massa-models/src/active_block.rs index 9f79c2c58dc..50a5f7293bc 100644 --- a/massa-models/src/active_block.rs +++ b/massa-models/src/active_block.rs @@ -1,6 +1,6 @@ use crate::{ address::Address, - block::BlockId, + block_id::BlockId, prehash::{PreHashMap, PreHashSet}, slot::Slot, }; diff --git a/massa-models/src/address.rs b/massa-models/src/address.rs index 63266f78a3c..384c01f0c6a 100644 --- a/massa-models/src/address.rs +++ b/massa-models/src/address.rs @@ -41,6 +41,8 @@ impl std::fmt::Display for Address { } } +// See https://github.com/massalabs/massa/pull/3479#issuecomment-1408694720 +// as to why more information is not provided impl std::fmt::Debug for Address { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!(f, "{}", self) diff --git a/massa-models/src/api.rs b/massa-models/src/api.rs deleted file mode 100644 index 931ab7f48ab..00000000000 --- a/massa-models/src/api.rs +++ /dev/null @@ -1,619 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -use crate::address::ExecutionAddressCycleInfo; -use crate::endorsement::{EndorsementId, WrappedEndorsement}; -use crate::ledger_models::LedgerData; -use crate::node::NodeId; -use crate::operation::{OperationId, WrappedOperation}; -use crate::stats::{ConsensusStats, ExecutionStats, NetworkStats}; -use crate::{ - address::Address, amount::Amount, block::Block, block::BlockId, config::CompactConfig, - slot::Slot, version::Version, -}; -use massa_signature::{PublicKey, Signature}; -use massa_time::MassaTime; -use serde::{Deserialize, Serialize}; -use std::collections::BTreeMap; -use std::net::IpAddr; -use strum::Display; - -/// operation input -#[derive(Serialize, Deserialize, Debug)] -pub struct OperationInput { - /// The public key of the creator of the TX - pub creator_public_key: PublicKey, - /// The signature of the operation - pub signature: Signature, - /// The serialized version of the content `base58` encoded - pub serialized_content: Vec, -} - -/// node status -#[derive(Debug, Deserialize, Serialize)] -pub struct NodeStatus { - /// our node id - pub node_id: NodeId, - /// optional node ip - pub node_ip: Option, - /// node version - pub version: Version, - /// now - pub current_time: MassaTime, - /// current cycle - pub current_cycle: u64, - /// connected nodes (node id, ip address, true if the connection is outgoing, false if incoming) - pub connected_nodes: BTreeMap, - /// latest slot, none if now is before genesis timestamp - pub last_slot: Option, - /// next slot - pub next_slot: Slot, - /// consensus stats - pub consensus_stats: ConsensusStats, - /// pool stats (operation count and endorsement count) - pub pool_stats: (usize, usize), - /// network stats - pub network_stats: NetworkStats, - /// execution stats - pub execution_stats: ExecutionStats, - /// compact configuration - pub config: CompactConfig, -} - -impl std::fmt::Display for NodeStatus { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - writeln!(f, "Node's ID: {}", self.node_id)?; - if self.node_ip.is_some() { - writeln!(f, "Node's IP: {}", self.node_ip.unwrap())?; - } else { - writeln!(f, "No routable IP set")?; - } - writeln!(f)?; - - writeln!(f, "Version: {}", self.version)?; - writeln!(f, "Config:\n{}", self.config)?; - writeln!(f)?; - - writeln!(f, "Current time: {}", self.current_time.to_utc_string())?; - writeln!(f, "Current cycle: {}", self.current_cycle)?; - if self.last_slot.is_some() { - writeln!(f, "Last slot: {}", self.last_slot.unwrap())?; - } - writeln!(f, "Next slot: {}", self.next_slot)?; - writeln!(f)?; - - writeln!(f, "{}", self.consensus_stats)?; - - writeln!(f, "Pool stats:")?; - writeln!(f, "\tOperations count: {}", self.pool_stats.0)?; - writeln!(f, "\tEndorsements count: {}", self.pool_stats.1)?; - writeln!(f)?; - - writeln!(f, "{}", self.network_stats)?; - - writeln!(f, "{}", self.execution_stats)?; - - writeln!(f, "Connected nodes:")?; - for (node_id, (ip_addr, is_outgoing)) in &self.connected_nodes { - writeln!( - f, - "Node's ID: {} / IP address: {} / {} connection", - node_id, - ip_addr, - if *is_outgoing { "Out" } else { "In" } - )? - } - Ok(()) - } -} - -/// Operation and contextual info about it -#[derive(Debug, Deserialize, Serialize)] -pub struct OperationInfo { - /// id - pub id: OperationId, - /// true if operation is still in pool - pub in_pool: bool, - /// the operation appears in `in_blocks` - /// if it appears in multiple blocks, these blocks are in different cliques - pub in_blocks: Vec, - /// true if the operation is final (for example in a final block) - pub is_final: bool, - /// the operation itself - pub operation: WrappedOperation, -} - -impl std::fmt::Display for OperationInfo { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - writeln!( - f, - "Operation {}{}{}", - self.id, - display_if_true(self.in_pool, " (in pool)"), - display_if_true(self.is_final, " (final)") - )?; - writeln!(f, "In blocks:")?; - for block_id in &self.in_blocks { - writeln!(f, "\t- {}", block_id)?; - } - writeln!(f, "{}", self.operation)?; - Ok(()) - } -} - -/// Block status within the graph -#[derive(Eq, PartialEq, Debug, Deserialize, Serialize)] -pub enum BlockGraphStatus { - /// received but not yet graph-processed - Incoming, - /// waiting for its slot - WaitingForSlot, - /// waiting for a missing dependency - WaitingForDependencies, - /// active in alternative cliques - ActiveInAlternativeCliques, - /// active in blockclique - ActiveInBlockclique, - /// forever applies - Final, - /// discarded for any reason - Discarded, - /// not found in graph - NotFound, -} - -/// Current balance ledger info -#[derive(Debug, Deserialize, Serialize, Clone, Copy)] -pub struct LedgerInfo { - /// final data - pub final_ledger_info: LedgerData, - /// latest data - pub candidate_ledger_info: LedgerData, - /// locked balance, for example balance due to a roll sell - pub locked_balance: Amount, -} - -impl std::fmt::Display for LedgerInfo { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - writeln!(f, "\tFinal balance: {}", self.final_ledger_info.balance)?; - writeln!( - f, - "\tCandidate balance: {}", - self.candidate_ledger_info.balance - )?; - writeln!(f, "\tLocked balance: {}", self.locked_balance)?; - Ok(()) - } -} - -/// Roll counts -#[derive(Debug, Deserialize, Serialize, Clone, Copy)] -pub struct RollsInfo { - /// count taken into account for the current cycle - pub active_rolls: u64, - /// at final blocks - pub final_rolls: u64, - /// at latest blocks - pub candidate_rolls: u64, -} - -impl std::fmt::Display for RollsInfo { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - writeln!(f, "\tActive rolls: {}", self.active_rolls)?; - writeln!(f, "\tFinal rolls: {}", self.final_rolls)?; - writeln!(f, "\tCandidate rolls: {}", self.candidate_rolls)?; - Ok(()) - } -} - -/// All you ever dream to know about an address -#[derive(Debug, Deserialize, Serialize)] -pub struct AddressInfo { - /// the address - pub address: Address, - /// the thread the address belongs to - pub thread: u8, - - /// final balance - pub final_balance: Amount, - /// final roll count - pub final_roll_count: u64, - /// final datastore keys - pub final_datastore_keys: Vec>, - - /// candidate balance - pub candidate_balance: Amount, - /// candidate roll count - pub candidate_roll_count: u64, - /// candidate datastore keys - pub candidate_datastore_keys: Vec>, - - /// deferred credits - pub deferred_credits: Vec, - - /// next block draws - pub next_block_draws: Vec, - /// next endorsement draws - pub next_endorsement_draws: Vec, - - /// created blocks - pub created_blocks: Vec, - /// created operations - pub created_operations: Vec, - /// created endorsements - pub created_endorsements: Vec, - - /// cycle information - pub cycle_infos: Vec, -} - -impl std::fmt::Display for AddressInfo { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - writeln!(f, "Address {} (thread {}):", self.address, self.thread)?; - writeln!( - f, - "\tBalance: final={}, candidate={}", - self.final_balance, self.candidate_balance - )?; - writeln!( - f, - "\tRolls: final={}, candidate={}", - self.final_roll_count, self.candidate_roll_count - )?; - write!(f, "\tLocked coins:")?; - if self.deferred_credits.is_empty() { - writeln!(f, "0")?; - } else { - for slot_amount in &self.deferred_credits { - writeln!( - f, - "\t\t{} locked coins will be unlocked at slot {}", - slot_amount.amount, slot_amount.slot - )?; - } - } - writeln!(f, "\tCycle infos:")?; - for cycle_info in &self.cycle_infos { - writeln!( - f, - "\t\tCycle {} ({}): produced {} and missed {} blocks{}", - cycle_info.cycle, - if cycle_info.is_final { - "final" - } else { - "candidate" - }, - cycle_info.ok_count, - cycle_info.nok_count, - match cycle_info.active_rolls { - Some(rolls) => format!(" with {} active rolls", rolls), - None => "".into(), - }, - )?; - } - //writeln!(f, "\tProduced blocks: {}", self.created_blocks.iter().map(|id| id.to_string()).intersperse(", ".into()).collect())?; - //writeln!(f, "\tProduced operations: {}", self.created_operations.iter().map(|id| id.to_string()).intersperse(", ".into()).collect())?; - //writeln!(f, "\tProduced endorsements: {}", self.created_endorsements.iter().map(|id| id.to_string()).intersperse(", ".into()).collect())?; - Ok(()) - } -} - -impl AddressInfo { - /// Only essential info about an address - pub fn compact(&self) -> CompactAddressInfo { - CompactAddressInfo { - address: self.address, - thread: self.thread, - active_rolls: self - .cycle_infos - .last() - .and_then(|c| c.active_rolls) - .unwrap_or_default(), - final_rolls: self.final_roll_count, - candidate_rolls: self.candidate_roll_count, - final_balance: self.final_balance, - candidate_balance: self.candidate_balance, - } - } -} - -/// When an address is drawn to create an endorsement it is selected for a specific index -#[derive(Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] -pub struct IndexedSlot { - /// slot - pub slot: Slot, - /// endorsement index in the slot - pub index: usize, -} - -impl std::fmt::Display for IndexedSlot { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - writeln!(f, "Slot: {}, Index: {}", self.slot, self.index) - } -} - -/// Less information about an address -#[derive(Debug, Serialize, Deserialize)] -pub struct CompactAddressInfo { - /// the address - pub address: Address, - /// the thread it is - pub thread: u8, - /// candidate rolls - pub candidate_rolls: u64, - /// final rolls - pub final_rolls: u64, - /// active rolls - pub active_rolls: u64, - /// final balance - pub final_balance: Amount, - /// candidate balance - pub candidate_balance: Amount, -} - -impl std::fmt::Display for CompactAddressInfo { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - writeln!(f, "Address: {} (thread {}):", self.address, self.thread)?; - writeln!( - f, - "\tBalance: final={}, candidate={}", - self.final_balance, self.candidate_balance - )?; - writeln!( - f, - "\tRolls: active={}, final={}, candidate={}", - self.active_rolls, self.final_rolls, self.candidate_rolls - )?; - Ok(()) - } -} - -/// All you wanna know about an endorsement -#[derive(Debug, Deserialize, Serialize, Clone)] -pub struct EndorsementInfo { - /// id - pub id: EndorsementId, - /// true if endorsement is still in pool - pub in_pool: bool, - /// the endorsement appears in `in_blocks` - /// if it appears in multiple blocks, these blocks are in different cliques - pub in_blocks: Vec, - /// true if the endorsement is final (for example in a final block) - pub is_final: bool, - /// the endorsement itself - pub endorsement: WrappedEndorsement, -} - -impl std::fmt::Display for EndorsementInfo { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - writeln!( - f, - "Endorsement {}{}{}", - self.id, - display_if_true(self.in_pool, " (in pool)"), - display_if_true(self.is_final, " (final)") - )?; - writeln!(f, "In blocks:")?; - for block_id in &self.in_blocks { - writeln!(f, "\t- {}", block_id)?; - } - writeln!(f, "{}", self.endorsement)?; - Ok(()) - } -} - -/// slot / amount pair -#[derive(Debug, Deserialize, Serialize)] -pub struct SlotAmount { - /// slot - pub slot: Slot, - /// amount - pub amount: Amount, -} - -/// refactor to delete -#[derive(Debug, Deserialize, Serialize)] -pub struct BlockInfo { - /// block id - pub id: BlockId, - /// optional block info content - pub content: Option, -} - -/// Block content -#[derive(Debug, Deserialize, Serialize)] -pub struct BlockInfoContent { - /// true if final - pub is_final: bool, - /// true if in the greatest clique (and not final) - pub is_in_blockclique: bool, - /// true if candidate (active any clique but not final) - pub is_candidate: bool, - /// true if discarded - pub is_discarded: bool, - /// block - pub block: Block, -} - -impl std::fmt::Display for BlockInfo { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - if let Some(content) = &self.content { - writeln!( - f, - "Block ID: {}{}{}{}{}", - self.id, - display_if_true(content.is_final, " (final)"), - display_if_true(content.is_candidate, " (candidate)"), - display_if_true(content.is_in_blockclique, " (blockclique)"), - display_if_true(content.is_discarded, " (discarded)"), - )?; - writeln!(f, "Block: {}", content.block)?; - } else { - writeln!(f, "Block {} not found", self.id)?; - } - Ok(()) - } -} - -/// A block resume (without the block itself) -#[derive(Debug, Deserialize, Serialize)] -pub struct BlockSummary { - /// id - pub id: BlockId, - /// true if in a final block - pub is_final: bool, - /// true if incompatible with a final block - pub is_stale: bool, - /// true if in the greatest block clique - pub is_in_blockclique: bool, - /// the slot the block is in - pub slot: Slot, - /// the block creator - pub creator: Address, - /// the block parents - pub parents: Vec, -} - -impl std::fmt::Display for BlockSummary { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - writeln!( - f, - "Block's ID: {}{}{}{}", - self.id, - display_if_true(self.is_final, "final"), - display_if_true(self.is_stale, "stale"), - display_if_true(self.is_in_blockclique, "in blockclique"), - )?; - writeln!(f, "Slot: {}", self.slot)?; - writeln!(f, "Creator: {}", self.creator)?; - writeln!(f, "Parents' IDs:")?; - for parent in &self.parents { - writeln!(f, "\t- {}", parent)?; - } - Ok(()) - } -} - -/// Dumb utils function to display nicely boolean value -fn display_if_true(value: bool, text: &str) -> String { - if value { - format!("[{}]", text) - } else { - String::from("") - } -} - -/// Just a wrapper with a optional beginning and end -#[derive(Debug, Deserialize, Clone, Copy, Serialize)] -pub struct TimeInterval { - /// optional start slot - pub start: Option, - /// optional end slot - pub end: Option, -} - -/// Datastore entry query input structure -#[derive(Debug, Deserialize, Clone, Serialize)] -pub struct DatastoreEntryInput { - /// associated address of the entry - pub address: Address, - /// datastore key - pub key: Vec, -} - -/// Datastore entry query output structure -#[derive(Debug, Deserialize, Clone, Serialize)] -pub struct DatastoreEntryOutput { - /// final datastore entry value - pub final_value: Option>, - /// candidate datastore entry value - pub candidate_value: Option>, -} - -impl std::fmt::Display for DatastoreEntryOutput { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - writeln!(f, "final value: {:?}", self.final_value)?; - writeln!(f, "candidate value: {:?}", self.candidate_value)?; - Ok(()) - } -} - -/// filter used when retrieving SC output events -#[derive(Default, Debug, Deserialize, Clone, Serialize)] -pub struct EventFilter { - /// optional start slot - pub start: Option, - /// optional end slot - pub end: Option, - /// optional emitter address - pub emitter_address: Option
, - /// optional caller address - pub original_caller_address: Option
, - /// optional operation id - pub original_operation_id: Option, - /// optional event status - /// - /// Some(true) means final - /// Some(false) means candidate - /// None means final _and_ candidate - pub is_final: Option, - /// optional execution status - /// - /// Some(true) means events coming from a failed sc execution - /// Some(false) means events coming from a succeeded sc execution - /// None means both - pub is_error: Option, -} - -/// read only bytecode execution request -#[derive(Debug, Deserialize, Clone, Serialize)] -pub struct ReadOnlyBytecodeExecution { - /// max available gas - pub max_gas: u64, - /// byte code - pub bytecode: Vec, - /// caller's address, optional - pub address: Option
, - /// Operation datastore, optional - pub operation_datastore: Option>, -} - -/// read SC call request -#[derive(Debug, Deserialize, Clone, Serialize)] -pub struct ReadOnlyCall { - /// max available gas - pub max_gas: u64, - /// target address - pub target_address: Address, - /// target function - pub target_function: String, - /// function parameter - pub parameter: Vec, - /// caller's address, optional - pub caller_address: Option
, -} - -/// SCRUD operations -#[derive(Display)] -#[strum(serialize_all = "snake_case")] -pub enum ScrudOperation { - /// search operation - Search, - /// create operation - Create, - /// read operation - Read, - /// update operation - Update, - /// delete operation - Delete, -} - -/// Bootsrap lists types -#[derive(Display)] -#[strum(serialize_all = "snake_case")] -pub enum ListType { - /// contains banned entry - Blacklist, - /// contains allowed entry - Whitelist, -} diff --git a/massa-models/src/block.rs b/massa-models/src/block.rs index f0e5bae6b1f..152fe94645a 100644 --- a/massa-models/src/block.rs +++ b/massa-models/src/block.rs @@ -1,193 +1,54 @@ //! Copyright (c) 2022 MASSA LABS -use crate::endorsement::{EndorsementId, EndorsementSerializer, EndorsementSerializerLW}; -use crate::prehash::PreHashed; -use crate::wrapped::{Id, Wrapped, WrappedContent, WrappedDeserializer, WrappedSerializer}; +// use crate::config::THREAD_COUNT; +// use crate::endorsement::{EndorsementId, EndorsementSerializer, EndorsementSerializerLW}; +// use crate::prehash::PreHashed; +use crate::secure_share::{ + Id, SecureShare, SecureShareContent, SecureShareDeserializer, SecureShareSerializer, +}; use crate::{ - endorsement::{Endorsement, EndorsementDeserializerLW, WrappedEndorsement}, + // endorsement::{Endorsement, EndorsementDeserializerLW, SecureShareEndorsement}, error::ModelsError, - operation::{OperationId, OperationIdsDeserializer, OperationIdsSerializer, WrappedOperation}, - slot::{Slot, SlotDeserializer, SlotSerializer}, + operation::{ + OperationId, OperationIdsDeserializer, OperationIdsSerializer, SecureShareOperation, + }, + // slot::{Slot, SlotDeserializer, SlotSerializer}, }; -use massa_hash::{Hash, HashDeserializer}; +// use massa_hash::{Hash, HashDeserializer}; use massa_serialization::{ - DeserializeError, Deserializer, SerializeError, Serializer, U32VarIntDeserializer, - U32VarIntSerializer, U64VarIntDeserializer, U64VarIntSerializer, + // DeserializeError, + Deserializer, + SerializeError, + Serializer, + // U32VarIntDeserializer, + // U32VarIntSerializer, U64VarIntDeserializer, U64VarIntSerializer, }; use massa_signature::{KeyPair, PublicKey, Signature}; -use nom::branch::alt; -use nom::bytes::complete::tag; +// use nom::branch::alt; +// use nom::bytes::complete::tag; use nom::error::context; -use nom::multi::{count, length_count}; -use nom::sequence::{preceded, tuple}; +// use nom::multi::{count, length_count}; +use nom::sequence::tuple; use nom::Parser; use nom::{ error::{ContextError, ParseError}, IResult, }; use serde::{Deserialize, Serialize}; -use serde_with::{DeserializeFromStr, SerializeDisplay}; -use std::convert::TryInto; +// use serde_with::{DeserializeFromStr, SerializeDisplay}; +// use std::collections::HashSet; +// use std::convert::TryInto; use std::fmt::Formatter; -use std::ops::Bound::{Excluded, Included}; -use std::str::FromStr; - -/// Size in bytes of a serialized block ID -const BLOCK_ID_SIZE_BYTES: usize = massa_hash::HASH_SIZE_BYTES; - -/// block id -#[derive( - Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash, SerializeDisplay, DeserializeFromStr, -)] -pub struct BlockId(pub Hash); - -impl PreHashed for BlockId {} - -impl Id for BlockId { - fn new(hash: Hash) -> Self { - BlockId(hash) - } - - fn get_hash(&self) -> &Hash { - &self.0 - } -} - -const BLOCKID_PREFIX: char = 'B'; -const BLOCKID_VERSION: u64 = 0; - -impl std::fmt::Display for BlockId { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - let u64_serializer = U64VarIntSerializer::new(); - // might want to allocate the vector with capacity in order to avoid re-allocation - let mut bytes: Vec = Vec::new(); - u64_serializer - .serialize(&BLOCKID_VERSION, &mut bytes) - .map_err(|_| std::fmt::Error)?; - bytes.extend(self.0.to_bytes()); - write!( - f, - "{}{}", - BLOCKID_PREFIX, - bs58::encode(bytes).with_check().into_string() - ) - } -} - -impl std::fmt::Debug for BlockId { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}", self) - } -} - -impl FromStr for BlockId { - type Err = ModelsError; - /// ## Example - /// ```rust - /// # use massa_hash::Hash; - /// # use std::str::FromStr; - /// # use massa_models::block::BlockId; - /// # let hash = Hash::compute_from(b"test"); - /// # let block_id = BlockId(hash); - /// let ser = block_id.to_string(); - /// let res_block_id = BlockId::from_str(&ser).unwrap(); - /// assert_eq!(block_id, res_block_id); - /// ``` - fn from_str(s: &str) -> Result { - let mut chars = s.chars(); - match chars.next() { - Some(prefix) if prefix == BLOCKID_PREFIX => { - let data = chars.collect::(); - let decoded_bs58_check = bs58::decode(data) - .with_check(None) - .into_vec() - .map_err(|_| ModelsError::BlockIdParseError)?; - let u64_deserializer = U64VarIntDeserializer::new(Included(0), Included(u64::MAX)); - let (rest, _version) = u64_deserializer - .deserialize::(&decoded_bs58_check[..]) - .map_err(|_| ModelsError::BlockIdParseError)?; - Ok(BlockId(Hash::from_bytes( - rest.try_into() - .map_err(|_| ModelsError::BlockIdParseError)?, - ))) - } - _ => Err(ModelsError::BlockIdParseError), - } - } -} - -impl BlockId { - /// block id to bytes - pub fn to_bytes(&self) -> &[u8; BLOCK_ID_SIZE_BYTES] { - self.0.to_bytes() - } - - /// block id into bytes - pub fn into_bytes(self) -> [u8; BLOCK_ID_SIZE_BYTES] { - self.0.into_bytes() - } - - /// block id from bytes - pub fn from_bytes(data: &[u8; BLOCK_ID_SIZE_BYTES]) -> BlockId { - BlockId(Hash::from_bytes(data)) - } - - /// first bit of the hashed block id - pub fn get_first_bit(&self) -> bool { - self.to_bytes()[0] >> 7 == 1 - } -} - -/// Serializer for `BlockId` -#[derive(Default, Clone)] -pub struct BlockIdSerializer; - -impl BlockIdSerializer { - /// Creates a new serializer for `BlockId` - pub fn new() -> Self { - Self - } -} - -impl Serializer for BlockIdSerializer { - fn serialize(&self, value: &BlockId, buffer: &mut Vec) -> Result<(), SerializeError> { - buffer.extend(value.to_bytes()); - Ok(()) - } -} - -/// Deserializer for `BlockId` -#[derive(Default, Clone)] -pub struct BlockIdDeserializer { - hash_deserializer: HashDeserializer, -} - -impl BlockIdDeserializer { - /// Creates a new deserializer for `BlockId` - pub fn new() -> Self { - Self { - hash_deserializer: HashDeserializer::new(), - } - } -} - -impl Deserializer for BlockIdDeserializer { - fn deserialize<'a, E: ParseError<&'a [u8]> + ContextError<&'a [u8]>>( - &self, - buffer: &'a [u8], - ) -> IResult<&'a [u8], BlockId, E> { - context("Failed BlockId deserialization", |input| { - let (rest, hash) = self.hash_deserializer.deserialize(input)?; - Ok((rest, BlockId(hash))) - })(buffer) - } -} +// use std::ops::Bound::{Excluded, Included}; +// use std::str::FromStr; +use crate::block_header::{BlockHeader, BlockHeaderDeserializer, SecuredHeader}; +use crate::block_id::BlockId; /// block #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Block { /// signed header - pub header: WrappedHeader, + pub header: SecuredHeader, /// operations ids pub operations: Vec, } @@ -196,26 +57,26 @@ pub struct Block { #[derive(Debug, Clone, Serialize, Deserialize)] pub struct FilledBlock { /// signed header - pub header: WrappedHeader, + pub header: SecuredHeader, /// operations - pub operations: Vec<(OperationId, Option)>, + pub operations: Vec<(OperationId, Option)>, } -/// Wrapped Block -pub type WrappedBlock = Wrapped; +/// Block with assosciated meta-data and interfaces allowing trust of data in untrusted network +pub type SecureShareBlock = SecureShare; -impl WrappedContent for Block { - fn new_wrapped, U: Id>( +impl SecureShareContent for Block { + fn new_verifiable, U: Id>( content: Self, content_serializer: SC, _keypair: &KeyPair, - ) -> Result, ModelsError> { + ) -> Result, ModelsError> { let mut content_serialized = Vec::new(); content_serializer.serialize(&content, &mut content_serialized)?; - Ok(Wrapped { + Ok(SecureShare { signature: content.header.signature, - creator_public_key: content.header.creator_public_key, - creator_address: content.header.creator_address, + content_creator_pub_key: content.header.content_creator_pub_key, + content_creator_address: content.header.content_creator_address, id: U::new(*content.header.id.get_hash()), content, serialized_data: content_serialized, @@ -243,14 +104,14 @@ impl WrappedContent for Block { _creator_public_key_deserializer: &massa_signature::PublicKeyDeserializer, content_deserializer: &DC, buffer: &'a [u8], - ) -> IResult<&'a [u8], Wrapped, E> { + ) -> IResult<&'a [u8], SecureShare, E> { let (rest, content) = content_deserializer.deserialize(buffer)?; Ok(( rest, - Wrapped { + SecureShare { signature: content.header.signature, - creator_public_key: content.header.creator_public_key, - creator_address: content.header.creator_address, + content_creator_pub_key: content.header.content_creator_pub_key, + content_creator_address: content.header.content_creator_address, id: U::new(*content.header.id.get_hash()), content, serialized_data: buffer[..buffer.len() - rest.len()].to_vec(), @@ -260,7 +121,7 @@ impl WrappedContent for Block { } /// Serializer for `Block` pub struct BlockSerializer { - header_serializer: WrappedSerializer, + header_serializer: SecureShareSerializer, op_ids_serializer: OperationIdsSerializer, } @@ -268,7 +129,7 @@ impl BlockSerializer { /// Creates a new `BlockSerializer` pub fn new() -> Self { BlockSerializer { - header_serializer: WrappedSerializer::new(), + header_serializer: SecureShareSerializer::new(), op_ids_serializer: OperationIdsSerializer::new(), } } @@ -283,7 +144,9 @@ impl Default for BlockSerializer { impl Serializer for BlockSerializer { /// ## Example: /// ```rust - /// use massa_models::{block::{Block, BlockSerializer, BlockId, BlockHeader, BlockHeaderSerializer}, config::THREAD_COUNT, slot::Slot, endorsement::{Endorsement, EndorsementSerializer}, wrapped::WrappedContent, prehash::PreHashSet}; + /// use massa_models::{block::{Block, BlockSerializer}, config::THREAD_COUNT, slot::Slot, endorsement::{Endorsement, EndorsementSerializer}, secure_share::SecureShareContent, prehash::PreHashSet}; + /// use massa_models::block_header::{BlockHeader, BlockHeaderSerializer}; + /// use massa_models::block_id::{BlockId}; /// use massa_hash::Hash; /// use massa_signature::KeyPair; /// use massa_serialization::{Serializer, Deserializer, DeserializeError}; @@ -293,13 +156,13 @@ impl Serializer for BlockSerializer { /// .collect(); /// /// // create block header - /// let orig_header = BlockHeader::new_wrapped( + /// let orig_header = BlockHeader::new_verifiable( /// BlockHeader { /// slot: Slot::new(1, 1), /// parents, /// operation_merkle_root: Hash::compute_from("mno".as_bytes()), /// endorsements: vec![ - /// Endorsement::new_wrapped( + /// Endorsement::new_verifiable( /// Endorsement { /// slot: Slot::new(1, 1), /// index: 1, @@ -309,7 +172,7 @@ impl Serializer for BlockSerializer { /// &keypair, /// ) /// .unwrap(), - /// Endorsement::new_wrapped( + /// Endorsement::new_verifiable( /// Endorsement { /// slot: Slot::new(4, 0), /// index: 3, @@ -345,7 +208,7 @@ impl Serializer for BlockSerializer { /// Deserializer for `Block` pub struct BlockDeserializer { - header_deserializer: WrappedDeserializer, + header_deserializer: SecureShareDeserializer, op_ids_deserializer: OperationIdsDeserializer, } @@ -353,7 +216,7 @@ impl BlockDeserializer { /// Creates a new `BlockDeserializer` pub fn new(thread_count: u8, max_operations_per_block: u32, endorsement_count: u32) -> Self { BlockDeserializer { - header_deserializer: WrappedDeserializer::new(BlockHeaderDeserializer::new( + header_deserializer: SecureShareDeserializer::new(BlockHeaderDeserializer::new( thread_count, endorsement_count, )), @@ -365,37 +228,39 @@ impl BlockDeserializer { impl Deserializer for BlockDeserializer { /// ## Example: /// ```rust - /// use massa_models::{block::{Block, BlockSerializer, BlockDeserializer, BlockId,BlockHeader, BlockHeaderSerializer}, config::THREAD_COUNT, slot::Slot, endorsement::{Endorsement, EndorsementSerializer}, wrapped::WrappedContent, prehash::PreHashSet}; + /// use massa_models::{block::{Block, BlockSerializer, BlockDeserializer}, config::THREAD_COUNT, slot::Slot, endorsement::{Endorsement, EndorsementSerializer}, secure_share::SecureShareContent, prehash::PreHashSet}; + /// use massa_models::block_id::BlockId; + /// use massa_models::block_header::{BlockHeader, BlockHeaderSerializer}; /// use massa_hash::Hash; /// use massa_signature::KeyPair; /// use massa_serialization::{Serializer, Deserializer, DeserializeError}; /// let keypair = KeyPair::generate(); - /// let parents = (0..THREAD_COUNT) + /// let parents: Vec = (0..THREAD_COUNT) /// .map(|i| BlockId(Hash::compute_from(&[i]))) /// .collect(); /// /// // create block header - /// let orig_header = BlockHeader::new_wrapped( + /// let orig_header = BlockHeader::new_verifiable( /// BlockHeader { /// slot: Slot::new(1, 1), - /// parents, + /// parents: parents.clone(), /// operation_merkle_root: Hash::compute_from("mno".as_bytes()), /// endorsements: vec![ - /// Endorsement::new_wrapped( + /// Endorsement::new_verifiable( /// Endorsement { /// slot: Slot::new(1, 1), - /// index: 1, - /// endorsed_block: BlockId(Hash::compute_from("blk1".as_bytes())), + /// index: 0, + /// endorsed_block: parents[1].clone(), /// }, /// EndorsementSerializer::new(), /// &keypair, /// ) /// .unwrap(), - /// Endorsement::new_wrapped( + /// Endorsement::new_verifiable( /// Endorsement { - /// slot: Slot::new(4, 0), - /// index: 3, - /// endorsed_block: BlockId(Hash::compute_from("blk2".as_bytes())), + /// slot: Slot::new(1, 1), + /// index: 1, + /// endorsed_block: parents[1].clone(), /// }, /// EndorsementSerializer::new(), /// &keypair, @@ -454,14 +319,14 @@ impl Deserializer for BlockDeserializer { } } -impl WrappedBlock { +impl SecureShareBlock { /// size in bytes of the whole block pub fn bytes_count(&self) -> u64 { self.serialized_data.len() as u64 } /// true if given operation is included in the block - pub fn contains_operation(&self, op: WrappedOperation) -> bool { + pub fn contains_operation(&self, op: SecureShareOperation) -> bool { self.content.operations.contains(&op.id) } @@ -487,19 +352,6 @@ impl std::fmt::Display for Block { } } -/// block header -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct BlockHeader { - /// slot - pub slot: Slot, - /// parents - pub parents: Vec, - /// all operations hash - pub operation_merkle_root: Hash, - /// endorsements - pub endorsements: Vec, -} - // NOTE: TODO // impl Signable for BlockHeader { // fn get_signature_message(&self) -> Result { @@ -512,322 +364,42 @@ pub struct BlockHeader { // } // } -/// wrapped header -pub type WrappedHeader = Wrapped; - -impl WrappedHeader { - /// gets the header fitness - pub fn get_fitness(&self) -> u64 { - (self.content.endorsements.len() as u64) + 1 - } -} - -impl WrappedContent for BlockHeader {} - -/// Serializer for `BlockHeader` -pub struct BlockHeaderSerializer { - slot_serializer: SlotSerializer, - endorsement_serializer: WrappedSerializer, - endorsement_content_serializer: EndorsementSerializerLW, - u32_serializer: U32VarIntSerializer, -} - -impl BlockHeaderSerializer { - /// Creates a new `BlockHeaderSerializer` - pub fn new() -> Self { - Self { - slot_serializer: SlotSerializer::new(), - endorsement_serializer: WrappedSerializer::new(), - u32_serializer: U32VarIntSerializer::new(), - endorsement_content_serializer: EndorsementSerializerLW::new(), - } - } -} - -impl Default for BlockHeaderSerializer { - fn default() -> Self { - Self::new() - } -} - -impl Serializer for BlockHeaderSerializer { - /// ## Example: - /// ```rust - /// use massa_models::block::{BlockId, BlockHeader, BlockHeaderSerializer}; - /// use massa_models::endorsement::{Endorsement, EndorsementSerializer}; - /// use massa_models::wrapped::WrappedContent; - /// use massa_models::{config::THREAD_COUNT, slot::Slot}; - /// use massa_hash::Hash; - /// use massa_signature::KeyPair; - /// use massa_serialization::Serializer; - /// - /// let keypair = KeyPair::generate(); - /// let parents = (0..THREAD_COUNT) - /// .map(|i| BlockId(Hash::compute_from(&[i]))) - /// .collect(); - /// let header = BlockHeader { - /// slot: Slot::new(1, 1), - /// parents, - /// operation_merkle_root: Hash::compute_from("mno".as_bytes()), - /// endorsements: vec![ - /// Endorsement::new_wrapped( - /// Endorsement { - /// slot: Slot::new(1, 1), - /// index: 1, - /// endorsed_block: BlockId(Hash::compute_from("blk1".as_bytes())), - /// }, - /// EndorsementSerializer::new(), - /// &keypair, - /// ) - /// .unwrap(), - /// Endorsement::new_wrapped( - /// Endorsement { - /// slot: Slot::new(4, 0), - /// index: 3, - /// endorsed_block: BlockId(Hash::compute_from("blk2".as_bytes())), - /// }, - /// EndorsementSerializer::new(), - /// &keypair, - /// ) - /// .unwrap(), - /// ], - /// }; - /// let mut buffer = vec![]; - /// BlockHeaderSerializer::new().serialize(&header, &mut buffer).unwrap(); - /// ``` - fn serialize(&self, value: &BlockHeader, buffer: &mut Vec) -> Result<(), SerializeError> { - self.slot_serializer.serialize(&value.slot, buffer)?; - // parents (note: there should be none if slot period=0) - if value.parents.is_empty() { - buffer.push(0); - } else { - buffer.push(1); - } - for parent_h in value.parents.iter() { - buffer.extend(parent_h.0.to_bytes()); - } - - // operations merkle root - buffer.extend(value.operation_merkle_root.to_bytes()); - - self.u32_serializer.serialize( - &value.endorsements.len().try_into().map_err(|err| { - SerializeError::GeneralError(format!("too many endorsements: {}", err)) - })?, - buffer, - )?; - for endorsement in value.endorsements.iter() { - self.endorsement_serializer.serialize_with( - &self.endorsement_content_serializer, - endorsement, - buffer, - )?; - } - Ok(()) - } -} - -/// Deserializer for `BlockHeader` -pub struct BlockHeaderDeserializer { - slot_deserializer: SlotDeserializer, - endorsement_serializer: EndorsementSerializer, - length_endorsements_deserializer: U32VarIntDeserializer, - hash_deserializer: HashDeserializer, - thread_count: u8, - endorsement_count: u32, -} - -impl BlockHeaderDeserializer { - /// Creates a new `BlockHeaderDeserializerLW` - pub const fn new(thread_count: u8, endorsement_count: u32) -> Self { - Self { - slot_deserializer: SlotDeserializer::new( - (Included(0), Included(u64::MAX)), - (Included(0), Excluded(thread_count)), - ), - endorsement_serializer: EndorsementSerializer::new(), - length_endorsements_deserializer: U32VarIntDeserializer::new( - Included(0), - Included(endorsement_count), - ), - hash_deserializer: HashDeserializer::new(), - thread_count, - endorsement_count, - } - } -} - -impl Deserializer for BlockHeaderDeserializer { - /// ## Example: - /// ```rust - /// use massa_models::block::{BlockId, BlockHeader, BlockHeaderDeserializer, BlockHeaderSerializer}; - /// use massa_models::{config::THREAD_COUNT, slot::Slot, wrapped::WrappedContent}; - /// use massa_models::endorsement::{Endorsement, EndorsementSerializerLW}; - /// use massa_hash::Hash; - /// use massa_signature::KeyPair; - /// use massa_serialization::{Serializer, Deserializer, DeserializeError}; - /// - /// let keypair = KeyPair::generate(); - /// let parents = (0..THREAD_COUNT) - /// .map(|i| BlockId(Hash::compute_from(&[i]))) - /// .collect(); - /// let header = BlockHeader { - /// slot: Slot::new(1, 1), - /// parents, - /// operation_merkle_root: Hash::compute_from("mno".as_bytes()), - /// endorsements: vec![ - /// Endorsement::new_wrapped( - /// Endorsement { - /// slot: Slot::new(1, 1), - /// index: 1, - /// endorsed_block: BlockId(Hash::compute_from("blk1".as_bytes())), - /// }, - /// EndorsementSerializerLW::new(), - /// &keypair, - /// ) - /// .unwrap(), - /// Endorsement::new_wrapped( - /// Endorsement { - /// slot: Slot::new(4, 0), - /// index: 3, - /// endorsed_block: BlockId(Hash::compute_from("blk2".as_bytes())), - /// }, - /// EndorsementSerializerLW::new(), - /// &keypair, - /// ) - /// .unwrap(), - /// ], - /// }; - /// let mut buffer = vec![]; - /// BlockHeaderSerializer::new().serialize(&header, &mut buffer).unwrap(); - /// let (rest, deserialized_header) = BlockHeaderDeserializer::new(32, 9).deserialize::(&buffer).unwrap(); - /// assert_eq!(rest.len(), 0); - /// let mut buffer2 = Vec::new(); - /// BlockHeaderSerializer::new().serialize(&deserialized_header, &mut buffer2).unwrap(); - /// assert_eq!(buffer, buffer2); - /// ``` - fn deserialize<'a, E: ParseError<&'a [u8]> + ContextError<&'a [u8]>>( - &self, - buffer: &'a [u8], - ) -> IResult<&'a [u8], BlockHeader, E> { - let (rest, (slot, parents, operation_merkle_root)): (&[u8], (Slot, Vec, Hash)) = - context( - "Failed BlockHeader deserialization", - tuple(( - context("Failed slot deserialization", |input| { - self.slot_deserializer.deserialize(input) - }), - context( - "Failed parents deserialization", - alt(( - preceded(tag(&[0]), |input| Ok((input, Vec::new()))), - preceded( - tag(&[1]), - count( - context("Failed block_id deserialization", |input| { - self.hash_deserializer - .deserialize(input) - .map(|(rest, hash)| (rest, BlockId(hash))) - }), - self.thread_count as usize, - ), - ), - )), - ), - context("Failed operation_merkle_root", |input| { - self.hash_deserializer.deserialize(input) - }), - )), - ) - .parse(buffer)?; - - if parents.is_empty() { - return Ok(( - &rest[1..], // Because there is 0 endorsements, we have a remaining 0 in rest and we don't need it - BlockHeader { - slot, - parents, - operation_merkle_root, - endorsements: Vec::new(), - }, - )); - } - // Now deser the endorsements (which were: lw serialized) - let endorsement_deserializer = WrappedDeserializer::new(EndorsementDeserializerLW::new( - self.endorsement_count, - slot, - parents[slot.thread as usize], - )); - - let (rest, endorsements) = context( - "Failed endorsements deserialization", - length_count::<&[u8], Wrapped, u32, E, _, _>( - context("Failed length deserialization", |input| { - self.length_endorsements_deserializer.deserialize(input) - }), - context("Failed endorsement deserialization", |input| { - endorsement_deserializer.deserialize_with(&self.endorsement_serializer, input) - }), - ), - ) - .parse(rest)?; - - Ok(( - rest, - BlockHeader { - slot, - parents, - operation_merkle_root, - endorsements, - }, - )) - } -} - -impl std::fmt::Display for BlockHeader { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - writeln!( - f, - "\t(period: {}, thread: {})", - self.slot.period, self.slot.thread, - )?; - writeln!(f, "\tMerkle root: {}", self.operation_merkle_root,)?; - writeln!(f, "\tParents: ")?; - for id in self.parents.iter() { - let str_id = id.to_string(); - writeln!(f, "\t\t{}", str_id)?; - } - if self.parents.is_empty() { - writeln!(f, "No parents found: This is a genesis header")?; - } - writeln!(f, "\tEndorsements:")?; - for ed in self.endorsements.iter() { - writeln!(f, "\t\t-----")?; - writeln!(f, "\t\tId: {}", ed.id)?; - writeln!(f, "\t\tIndex: {}", ed.content.index)?; - writeln!(f, "\t\tEndorsed slot: {}", ed.content.slot)?; - writeln!(f, "\t\tEndorser's public key: {}", ed.creator_public_key)?; - writeln!(f, "\t\tEndorsed block: {}", ed.content.endorsed_block)?; - writeln!(f, "\t\tSignature: {}", ed.signature)?; - } - if self.endorsements.is_empty() { - writeln!(f, "\tNo endorsements found")?; - } - Ok(()) - } +/// Block status within the graph +#[derive(Eq, PartialEq, Debug, Deserialize, Serialize)] +pub enum BlockGraphStatus { + /// received but not yet graph-processed + Incoming, + /// waiting for its slot + WaitingForSlot, + /// waiting for a missing dependency + WaitingForDependencies, + /// active in alternative cliques + ActiveInAlternativeCliques, + /// active in blockclique + ActiveInBlockclique, + /// forever applies + Final, + /// discarded for any reason + Discarded, + /// not found in graph + NotFound, } #[cfg(test)] mod test { use super::*; use crate::{ + block_header::BlockHeaderSerializer, config::{ENDORSEMENT_COUNT, MAX_OPERATIONS_PER_BLOCK, THREAD_COUNT}, endorsement::Endorsement, endorsement::EndorsementSerializer, + slot::Slot, }; + use massa_hash::Hash; use massa_serialization::DeserializeError; use massa_signature::KeyPair; use serial_test::serial; + use std::str::FromStr; #[test] #[serial] @@ -843,10 +415,23 @@ mod test { }) .collect(); - let endo = Endorsement::new_wrapped( + let endo1 = Endorsement::new_verifiable( + Endorsement { + slot: Slot::new(1, 0), + index: 0, + endorsed_block: BlockId( + Hash::from_bs58_check("bq1NsaCBAfseMKSjNBYLhpK7M5eeef2m277MYS2P2k424GaDf") + .unwrap(), + ), + }, + EndorsementSerializer::new(), + &keypair, + ) + .unwrap(); + let endo2 = Endorsement::new_verifiable( Endorsement { slot: Slot::new(1, 0), - index: 1, + index: ENDORSEMENT_COUNT - 1, endorsed_block: BlockId( Hash::from_bs58_check("bq1NsaCBAfseMKSjNBYLhpK7M5eeef2m277MYS2P2k424GaDf") .unwrap(), @@ -858,12 +443,12 @@ mod test { .unwrap(); // create block header - let orig_header = BlockHeader::new_wrapped( + let orig_header = BlockHeader::new_verifiable( BlockHeader { slot: Slot::new(1, 0), parents, operation_merkle_root: Hash::compute_from("mno".as_bytes()), - endorsements: vec![endo], + endorsements: vec![endo1, endo2], }, BlockHeaderSerializer::new(), &keypair, @@ -877,15 +462,15 @@ mod test { }; // serialize block - let wrapped_block: WrappedBlock = - Block::new_wrapped(orig_block.clone(), BlockSerializer::new(), &keypair).unwrap(); + let secured_block: SecureShareBlock = + Block::new_verifiable(orig_block.clone(), BlockSerializer::new(), &keypair).unwrap(); let mut ser_block = Vec::new(); - WrappedSerializer::new() - .serialize(&wrapped_block, &mut ser_block) + SecureShareSerializer::new() + .serialize(&secured_block, &mut ser_block) .unwrap(); // deserialize - let (rest, res_block): (&[u8], WrappedBlock) = WrappedDeserializer::new( + let (rest, res_block): (&[u8], SecureShareBlock) = SecureShareDeserializer::new( BlockDeserializer::new(THREAD_COUNT, MAX_OPERATIONS_PER_BLOCK, ENDORSEMENT_COUNT), ) .deserialize::(&ser_block) @@ -918,10 +503,8 @@ mod test { for ed in orig_block.header.content.endorsements.iter() { ed.verify_signature().unwrap(); } - res_block.content.header.verify_signature().unwrap(); - for ed in res_block.content.header.content.endorsements.iter() { - ed.verify_signature().unwrap(); - } + + res_block.content.header.assert_invariants().unwrap(); } #[test] @@ -931,9 +514,9 @@ mod test { let parents: Vec = vec![]; // create block header - let orig_header = BlockHeader::new_wrapped( + let orig_header = BlockHeader::new_verifiable( BlockHeader { - slot: Slot::new(1, 1), + slot: Slot::new(0, 1), parents, operation_merkle_root: Hash::compute_from("mno".as_bytes()), endorsements: vec![], @@ -950,20 +533,22 @@ mod test { }; // serialize block - let wrapped_block: WrappedBlock = - Block::new_wrapped(orig_block.clone(), BlockSerializer::new(), &keypair).unwrap(); + let secured_block: SecureShareBlock = + Block::new_verifiable(orig_block.clone(), BlockSerializer::new(), &keypair).unwrap(); let mut ser_block = Vec::new(); - WrappedSerializer::new() - .serialize(&wrapped_block, &mut ser_block) + SecureShareSerializer::new() + .serialize(&secured_block, &mut ser_block) .unwrap(); // deserialize - let (rest, res_block): (&[u8], WrappedBlock) = WrappedDeserializer::new( + let (rest, res_block): (&[u8], SecureShareBlock) = SecureShareDeserializer::new( BlockDeserializer::new(THREAD_COUNT, MAX_OPERATIONS_PER_BLOCK, ENDORSEMENT_COUNT), ) .deserialize::(&ser_block) .unwrap(); + res_block.content.header.assert_invariants().unwrap(); + // check equality assert!(rest.is_empty()); @@ -992,24 +577,24 @@ mod test { #[test] #[serial] - fn test_invalid_genesis_block_serialization() { + fn test_invalid_genesis_block_serialization_with_endorsements() { let keypair = KeyPair::generate(); let parents: Vec = vec![]; // Genesis block do not have any parents and thus cannot embed endorsements let endorsement = Endorsement { - slot: Slot::new(1, 1), + slot: Slot::new(0, 1), index: 1, endorsed_block: BlockId(Hash::compute_from(&[1])), }; // create block header - let orig_header = BlockHeader::new_wrapped( + let orig_header = BlockHeader::new_verifiable( BlockHeader { - slot: Slot::new(1, 1), + slot: Slot::new(0, 1), parents, operation_merkle_root: Hash::compute_from("mno".as_bytes()), - endorsements: vec![Endorsement::new_wrapped( + endorsements: vec![Endorsement::new_verifiable( endorsement, EndorsementSerializer::new(), &keypair, @@ -1028,19 +613,473 @@ mod test { }; // serialize block - let wrapped_block: WrappedBlock = - Block::new_wrapped(orig_block, BlockSerializer::new(), &keypair).unwrap(); + let secured_block: SecureShareBlock = + Block::new_verifiable(orig_block, BlockSerializer::new(), &keypair).unwrap(); + let mut ser_block = Vec::new(); + SecureShareSerializer::new() + .serialize(&secured_block, &mut ser_block) + .unwrap(); + + // deserialize + let res: Result<(&[u8], SecureShareBlock), _> = SecureShareDeserializer::new( + BlockDeserializer::new(THREAD_COUNT, MAX_OPERATIONS_PER_BLOCK, ENDORSEMENT_COUNT), + ) + .deserialize::(&ser_block); + + // TODO: Catch an failed deser being a fail, instead of a recoverable error + // TODO: assert that the error variant/context/etc. matches the expected failure + assert!(res.is_err()); + // let nom::Err::Failure(_) = res.unwrap_err() else { + // panic!("Deserialisation with invalid endorsements should be total fail"); + // }; + } + #[test] + #[serial] + fn test_invalid_genesis_block_serialization_with_parents() { + let keypair = KeyPair::generate(); + let parents = (0..THREAD_COUNT) + .map(|i| BlockId(Hash::compute_from(&[i]))) + .collect(); + + // create block header + let orig_header = BlockHeader::new_verifiable( + BlockHeader { + slot: Slot::new(0, 1), + parents, + operation_merkle_root: Hash::compute_from("mno".as_bytes()), + endorsements: vec![], + }, + BlockHeaderSerializer::new(), + &keypair, + ) + .unwrap(); + + // create block + let orig_block = Block { + header: orig_header, + operations: Default::default(), + }; + + // serialize block + let secured_block: SecureShareBlock = + Block::new_verifiable(orig_block, BlockSerializer::new(), &keypair).unwrap(); + let mut ser_block = Vec::new(); + SecureShareSerializer::new() + .serialize(&secured_block, &mut ser_block) + .unwrap(); + + // deserialize + let res: Result<(&[u8], SecureShareBlock), _> = SecureShareDeserializer::new( + BlockDeserializer::new(THREAD_COUNT, MAX_OPERATIONS_PER_BLOCK, ENDORSEMENT_COUNT), + ) + .deserialize::(&ser_block); + + // TODO: Catch an failed deser being a fail, instead of a recoverable error + // TODO: assert that the error variant/context/etc. matches the expected failure + assert!(res.is_err()); + } + #[test] + #[serial] + fn test_invalid_block_serialization_no_parents() { + let keypair = KeyPair::generate(); + // Non genesis block must have THREAD_COUNT parents + + // create block header + let orig_header = BlockHeader::new_verifiable( + BlockHeader { + slot: Slot::new(1, 1), + parents: vec![], + operation_merkle_root: Hash::compute_from("mno".as_bytes()), + endorsements: vec![], + }, + BlockHeaderSerializer::new(), + &keypair, + ) + .unwrap(); + + // create block + let orig_block = Block { + header: orig_header, + operations: Default::default(), + }; + + // serialize block + let secured_block: SecureShareBlock = + Block::new_verifiable(orig_block, BlockSerializer::new(), &keypair).unwrap(); + let mut ser_block = Vec::new(); + SecureShareSerializer::new() + .serialize(&secured_block, &mut ser_block) + .unwrap(); + + // deserialize + let res: Result<(&[u8], SecureShareBlock), _> = SecureShareDeserializer::new( + BlockDeserializer::new(THREAD_COUNT, MAX_OPERATIONS_PER_BLOCK, ENDORSEMENT_COUNT), + ) + .deserialize::(&ser_block); + + // TODO: Catch an failed deser being a fail, instead of a recoverable error + // TODO: assert that the error variant/context/etc. matches the expected failure + assert!(res.is_err()); + } + #[test] + #[serial] + fn test_invalid_block_serialization_obo_high_parent_count() { + let keypair = KeyPair::generate(); + // Non genesis block must have THREAD_COUNT parents + let parents = (0..=THREAD_COUNT) + .map(|i| BlockId(Hash::compute_from(&[i]))) + .collect(); + + // create block header + let orig_header = BlockHeader::new_verifiable( + BlockHeader { + slot: Slot::new(1, 1), + parents, + operation_merkle_root: Hash::compute_from("mno".as_bytes()), + endorsements: vec![], + }, + BlockHeaderSerializer::new(), + &keypair, + ) + .unwrap(); + + // create block + let orig_block = Block { + header: orig_header, + operations: Default::default(), + }; + + // serialize block + let secured_block: SecureShareBlock = + Block::new_verifiable(orig_block, BlockSerializer::new(), &keypair).unwrap(); + let mut ser_block = Vec::new(); + SecureShareSerializer::new() + .serialize(&secured_block, &mut ser_block) + .unwrap(); + + // deserialize + let res: Result<(&[u8], SecureShareBlock), _> = SecureShareDeserializer::new( + BlockDeserializer::new(THREAD_COUNT, MAX_OPERATIONS_PER_BLOCK, ENDORSEMENT_COUNT), + ) + .deserialize::(&ser_block); + + // TODO: Catch an failed deser being a fail, instead of a recoverable error + // TODO: assert that the error variant/context/etc. matches the expected failure + assert!(res.is_err()); + } + + #[test] + #[serial] + fn test_block_serialization_max_endo_count() { + let keypair = + KeyPair::from_str("S1bXjyPwrssNmG4oUG5SEqaUhQkVArQi7rzQDWpCprTSmEgZDGG").unwrap(); + let endorsed = BlockId( + Hash::from_bs58_check("bq1NsaCBAfseMKSjNBYLhpK7M5eeef2m277MYS2P2k424GaDf").unwrap(), + ); + let fillers = (1..THREAD_COUNT).map(|i| BlockId(Hash::compute_from(&[i]))); + let parents = std::iter::once(endorsed).chain(fillers).collect(); + + let endorsements = (0..ENDORSEMENT_COUNT) + .map(|i| { + Endorsement::new_verifiable( + Endorsement { + slot: Slot::new(1, 0), + index: i, + endorsed_block: BlockId( + Hash::from_bs58_check( + "bq1NsaCBAfseMKSjNBYLhpK7M5eeef2m277MYS2P2k424GaDf", + ) + .unwrap(), + ), + }, + EndorsementSerializer::new(), + &keypair, + ) + .unwrap() + }) + .collect(); + // create block header + let orig_header = BlockHeader::new_verifiable( + BlockHeader { + slot: Slot::new(1, 0), + parents, + operation_merkle_root: Hash::compute_from("mno".as_bytes()), + endorsements, + }, + BlockHeaderSerializer::new(), + &keypair, + ) + .unwrap(); + + // create block + let orig_block = Block { + header: orig_header, + operations: Default::default(), + }; + + // serialize block + let secured_block: SecureShareBlock = + Block::new_verifiable(orig_block, BlockSerializer::new(), &keypair).unwrap(); + let mut ser_block = Vec::new(); + SecureShareSerializer::new() + .serialize(&secured_block, &mut ser_block) + .unwrap(); + + // deserialize + let (_, res): (&[u8], SecureShareBlock) = SecureShareDeserializer::new( + BlockDeserializer::new(THREAD_COUNT, MAX_OPERATIONS_PER_BLOCK, ENDORSEMENT_COUNT), + ) + .deserialize::(&ser_block) + .unwrap(); + + res.content.header.assert_invariants().unwrap(); + } + #[test] + #[serial] + fn test_invalid_block_serialization_obo_low_parent_count() { + let keypair = KeyPair::generate(); + // Non genesis block must have THREAD_COUNT parents + let parents = (1..THREAD_COUNT) + .map(|i| BlockId(Hash::compute_from(&[i]))) + .collect(); + + // create block header + let orig_header = BlockHeader::new_verifiable( + BlockHeader { + slot: Slot::new(1, 1), + parents, + operation_merkle_root: Hash::compute_from("mno".as_bytes()), + endorsements: vec![], + }, + BlockHeaderSerializer::new(), + &keypair, + ) + .unwrap(); + + // create block + let orig_block = Block { + header: orig_header, + operations: Default::default(), + }; + + // serialize block + let secured_block: SecureShareBlock = + Block::new_verifiable(orig_block, BlockSerializer::new(), &keypair).unwrap(); + let mut ser_block = Vec::new(); + SecureShareSerializer::new() + .serialize(&secured_block, &mut ser_block) + .unwrap(); + + // deserialize + let res: Result<(&[u8], SecureShareBlock), _> = SecureShareDeserializer::new( + BlockDeserializer::new(THREAD_COUNT, MAX_OPERATIONS_PER_BLOCK, ENDORSEMENT_COUNT), + ) + .deserialize::(&ser_block); + + // TODO: Catch an failed deser being a fail, instead of a recoverable error + // TODO: assert that the error variant/context/etc. matches the expected failure + assert!(res.is_err()); + } + #[test] + #[serial] + fn test_invalid_block_serialization_obo_high_endo_count() { + let keypair = KeyPair::generate(); + // Non genesis block must have THREAD_COUNT parents + let parents = (0..THREAD_COUNT) + .map(|i| BlockId(Hash::compute_from(&[i]))) + .collect(); + + let endorsements = (0..=ENDORSEMENT_COUNT) + .map(|i| { + Endorsement::new_verifiable( + Endorsement { + slot: Slot::new(0, 1), + index: i, + endorsed_block: BlockId(Hash::compute_from(&[i as u8])), + }, + EndorsementSerializer::new(), + &keypair, + ) + .unwrap() + }) + .collect(); + // create block header + let orig_header = BlockHeader::new_verifiable( + BlockHeader { + slot: Slot::new(1, 1), + parents, + operation_merkle_root: Hash::compute_from("mno".as_bytes()), + endorsements, + }, + BlockHeaderSerializer::new(), + &keypair, + ) + .unwrap(); + + // create block + let orig_block = Block { + header: orig_header, + operations: Default::default(), + }; + + // serialize block + let secured_block: SecureShareBlock = + Block::new_verifiable(orig_block, BlockSerializer::new(), &keypair).unwrap(); + let mut ser_block = Vec::new(); + SecureShareSerializer::new() + .serialize(&secured_block, &mut ser_block) + .unwrap(); + + // deserialize + let res: Result<(&[u8], SecureShareBlock), _> = SecureShareDeserializer::new( + BlockDeserializer::new(THREAD_COUNT, MAX_OPERATIONS_PER_BLOCK, ENDORSEMENT_COUNT), + ) + .deserialize::(&ser_block); + + // TODO: Catch an failed deser being a fail, instead of a recoverable error + // TODO: see issue #3400 + assert!(res.is_err()); + } + #[test] + #[serial] + fn test_invalid_endorsement_idx() { + let keypair = + KeyPair::from_str("S1bXjyPwrssNmG4oUG5SEqaUhQkVArQi7rzQDWpCprTSmEgZDGG").unwrap(); + let parents = (0..THREAD_COUNT) + .map(|_i| { + BlockId( + Hash::from_bs58_check("bq1NsaCBAfseMKSjNBYLhpK7M5eeef2m277MYS2P2k424GaDf") + .unwrap(), + ) + }) + .collect(); + + let endo1 = Endorsement::new_verifiable( + Endorsement { + slot: Slot::new(1, 0), + index: ENDORSEMENT_COUNT, + endorsed_block: BlockId( + Hash::from_bs58_check("bq1NsaCBAfseMKSjNBYLhpK7M5eeef2m277MYS2P2k424GaDf") + .unwrap(), + ), + }, + EndorsementSerializer::new(), + &keypair, + ) + .unwrap(); + + // create block header + let orig_header = BlockHeader::new_verifiable( + BlockHeader { + slot: Slot::new(1, 0), + parents, + operation_merkle_root: Hash::compute_from("mno".as_bytes()), + endorsements: vec![endo1], + }, + BlockHeaderSerializer::new(), + &keypair, + ) + .unwrap(); + + // create block + let orig_block = Block { + header: orig_header.clone(), + operations: Default::default(), + }; + + // serialize block + let secured_block: SecureShareBlock = + Block::new_verifiable(orig_block.clone(), BlockSerializer::new(), &keypair).unwrap(); + let mut ser_block = Vec::new(); + SecureShareSerializer::new() + .serialize(&secured_block, &mut ser_block) + .unwrap(); + + // deserialize + let res: Result<(&[u8], SecureShareBlock), _> = SecureShareDeserializer::new( + BlockDeserializer::new(THREAD_COUNT, MAX_OPERATIONS_PER_BLOCK, ENDORSEMENT_COUNT), + ) + .deserialize::(&ser_block); + // TODO: Catch an failed deser being a fail, instead of a recoverable error + // TODO: assert that the error variant/context/etc. matches the expected failure + assert!(res.is_err()); + } + #[test] + #[serial] + fn test_invalid_dupe_endo_idx() { + let keypair = + KeyPair::from_str("S1bXjyPwrssNmG4oUG5SEqaUhQkVArQi7rzQDWpCprTSmEgZDGG").unwrap(); + let parents = (0..THREAD_COUNT) + .map(|_i| { + BlockId( + Hash::from_bs58_check("bq1NsaCBAfseMKSjNBYLhpK7M5eeef2m277MYS2P2k424GaDf") + .unwrap(), + ) + }) + .collect(); + + let endo1 = Endorsement::new_verifiable( + Endorsement { + slot: Slot::new(1, 0), + index: 0, + endorsed_block: BlockId( + Hash::from_bs58_check("bq1NsaCBAfseMKSjNBYLhpK7M5eeef2m277MYS2P2k424GaDf") + .unwrap(), + ), + }, + EndorsementSerializer::new(), + &keypair, + ) + .unwrap(); + let endo2 = Endorsement::new_verifiable( + Endorsement { + slot: Slot::new(1, 0), + index: 0, + endorsed_block: BlockId( + Hash::from_bs58_check("bq1NsaCBAfseMKSjNBYLhpK7M5eeef2m277MYS2P2k424GaDf") + .unwrap(), + ), + }, + EndorsementSerializer::new(), + &keypair, + ) + .unwrap(); + + // create block header + let orig_header = BlockHeader::new_verifiable( + BlockHeader { + slot: Slot::new(1, 0), + parents, + operation_merkle_root: Hash::compute_from("mno".as_bytes()), + endorsements: vec![endo1, endo2], + }, + BlockHeaderSerializer::new(), + &keypair, + ) + .unwrap(); + + // create block + let orig_block = Block { + header: orig_header.clone(), + operations: Default::default(), + }; + + // serialize block + let secured_block: SecureShareBlock = + Block::new_verifiable(orig_block.clone(), BlockSerializer::new(), &keypair).unwrap(); let mut ser_block = Vec::new(); - WrappedSerializer::new() - .serialize(&wrapped_block, &mut ser_block) + SecureShareSerializer::new() + .serialize(&secured_block, &mut ser_block) .unwrap(); // deserialize - let res: Result<(&[u8], WrappedBlock), _> = WrappedDeserializer::new( + let res: Result<(&[u8], SecureShareBlock), _> = SecureShareDeserializer::new( BlockDeserializer::new(THREAD_COUNT, MAX_OPERATIONS_PER_BLOCK, ENDORSEMENT_COUNT), ) .deserialize::(&ser_block); + // TODO: Catch an failed deser being a fail, instead of a recoverable error + // TODO: assert that the error variant/context/etc. matches the expected failure assert!(res.is_err()); } } diff --git a/massa-models/src/block_header.rs b/massa-models/src/block_header.rs new file mode 100644 index 00000000000..58721fd2af3 --- /dev/null +++ b/massa-models/src/block_header.rs @@ -0,0 +1,453 @@ +use crate::block_id::BlockId; +use crate::config::THREAD_COUNT; +use crate::endorsement::{ + Endorsement, EndorsementDeserializerLW, EndorsementId, EndorsementSerializer, + EndorsementSerializerLW, SecureShareEndorsement, +}; +use crate::secure_share::{ + SecureShare, SecureShareContent, SecureShareDeserializer, SecureShareSerializer, +}; +use crate::slot::{Slot, SlotDeserializer, SlotSerializer}; +use massa_hash::{Hash, HashDeserializer}; +use massa_serialization::{ + Deserializer, SerializeError, Serializer, U32VarIntDeserializer, U32VarIntSerializer, +}; +use nom::branch::alt; +use nom::bytes::complete::tag; +use nom::error::{context, ContextError, ParseError}; +use nom::multi::{count, length_count}; +use nom::sequence::{preceded, tuple}; +use nom::{IResult, Parser}; +use serde::{Deserialize, Serialize}; +use std::collections::Bound::{Excluded, Included}; +use std::collections::HashSet; +use std::fmt::Formatter; + +/// block header +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct BlockHeader { + /// slot + pub slot: Slot, + /// parents + pub parents: Vec, + /// all operations hash + pub operation_merkle_root: Hash, + /// endorsements + pub endorsements: Vec, +} + +// TODO: gh-issue #3398 +#[cfg(any(test, feature = "testing"))] +impl BlockHeader { + fn assert_invariants(&self) -> Result<(), Box> { + if self.slot.period == 0 { + if !self.parents.is_empty() { + return Err("Invariant broken: genesis block with parent(s)".into()); + } + if !self.endorsements.is_empty() { + return Err("Invariant broken: genesis block with endorsement(s)".into()); + } + } else { + if self.parents.len() != crate::config::THREAD_COUNT as usize { + return Err( + "Invariant broken: non-genesis block with incorrect number of parents".into(), + ); + } + if self.endorsements.len() > crate::config::ENDORSEMENT_COUNT as usize { + return Err("Invariant broken: endorsement count too high".into()); + } + + let parent_id = self.parents[self.slot.thread as usize]; + for endo in self.endorsements.iter() { + if endo.content.endorsed_block != parent_id { + return Err("Invariant broken: endorsement doesn't match parent".into()); + } + } + } + + // assert that the endorsement indexes are all unique... + let mut set = HashSet::new(); + for endo in self.endorsements.iter() { + // ...and check signatures + invariants while at it + endo.check_invariants()?; + + if !set.insert(endo.content.index) { + return Err("Endorsement duplicate index found".into()); + } + } + Ok(()) + } +} + +/// BlockHeader wrapped up alongside verification data +pub type SecuredHeader = SecureShare; + +impl SecuredHeader { + /// gets the header fitness + pub fn get_fitness(&self) -> u64 { + (self.content.endorsements.len() as u64) + 1 + } + // TODO: gh-issue #3398 + #[allow(dead_code)] + #[cfg(any(test, feature = "testing"))] + pub(crate) fn assert_invariants(&self) -> Result<(), Box> { + self.content.assert_invariants()?; + self.verify_signature() + .map_err(|er| format!("{}", er).into()) + } +} + +impl SecureShareContent for BlockHeader {} + +/// Serializer for `BlockHeader` +pub struct BlockHeaderSerializer { + slot_serializer: SlotSerializer, + endorsement_serializer: SecureShareSerializer, + endorsement_content_serializer: EndorsementSerializerLW, + u32_serializer: U32VarIntSerializer, +} + +impl BlockHeaderSerializer { + /// Creates a new `BlockHeaderSerializer` + pub fn new() -> Self { + Self { + slot_serializer: SlotSerializer::new(), + endorsement_serializer: SecureShareSerializer::new(), + u32_serializer: U32VarIntSerializer::new(), + endorsement_content_serializer: EndorsementSerializerLW::new(), + } + } +} + +impl Default for BlockHeaderSerializer { + fn default() -> Self { + Self::new() + } +} + +impl Serializer for BlockHeaderSerializer { + /// ## Example: + /// ```rust + /// use massa_models::{block_id::BlockId, block_header::BlockHeader, block_header::BlockHeaderSerializer}; + /// use massa_models::endorsement::{Endorsement, EndorsementSerializer}; + /// use massa_models::secure_share::SecureShareContent; + /// use massa_models::{config::THREAD_COUNT, slot::Slot}; + /// use massa_hash::Hash; + /// use massa_signature::KeyPair; + /// use massa_serialization::Serializer; + /// + /// let keypair = KeyPair::generate(); + /// let parents = (0..THREAD_COUNT) + /// .map(|i| BlockId(Hash::compute_from(&[i]))) + /// .collect(); + /// let header = BlockHeader { + /// slot: Slot::new(1, 1), + /// parents, + /// operation_merkle_root: Hash::compute_from("mno".as_bytes()), + /// endorsements: vec![ + /// Endorsement::new_verifiable( + /// Endorsement { + /// slot: Slot::new(1, 1), + /// index: 1, + /// endorsed_block: BlockId(Hash::compute_from("blk1".as_bytes())), + /// }, + /// EndorsementSerializer::new(), + /// &keypair, + /// ) + /// .unwrap(), + /// Endorsement::new_verifiable( + /// Endorsement { + /// slot: Slot::new(4, 0), + /// index: 3, + /// endorsed_block: BlockId(Hash::compute_from("blk2".as_bytes())), + /// }, + /// EndorsementSerializer::new(), + /// &keypair, + /// ) + /// .unwrap(), + /// ], + /// }; + /// let mut buffer = vec![]; + /// BlockHeaderSerializer::new().serialize(&header, &mut buffer).unwrap(); + /// ``` + fn serialize(&self, value: &BlockHeader, buffer: &mut Vec) -> Result<(), SerializeError> { + self.slot_serializer.serialize(&value.slot, buffer)?; + // parents (note: there should be none if slot period=0) + if value.parents.is_empty() { + buffer.push(0); + } else { + buffer.push(1); + } + for parent_h in value.parents.iter() { + buffer.extend(parent_h.0.to_bytes()); + } + + // operations merkle root + buffer.extend(value.operation_merkle_root.to_bytes()); + + self.u32_serializer.serialize( + &value.endorsements.len().try_into().map_err(|err| { + SerializeError::GeneralError(format!("too many endorsements: {}", err)) + })?, + buffer, + )?; + for endorsement in value.endorsements.iter() { + self.endorsement_serializer.serialize_with( + &self.endorsement_content_serializer, + endorsement, + buffer, + )?; + } + Ok(()) + } +} + +/// Deserializer for `BlockHeader` +pub struct BlockHeaderDeserializer { + slot_deserializer: SlotDeserializer, + endorsement_serializer: EndorsementSerializer, + length_endorsements_deserializer: U32VarIntDeserializer, + hash_deserializer: HashDeserializer, + thread_count: u8, + endorsement_count: u32, +} + +impl BlockHeaderDeserializer { + /// Creates a new `BlockHeaderDeserializerLW` + pub const fn new(thread_count: u8, endorsement_count: u32) -> Self { + Self { + slot_deserializer: SlotDeserializer::new( + (Included(0), Included(u64::MAX)), + (Included(0), Excluded(thread_count)), + ), + endorsement_serializer: EndorsementSerializer::new(), + length_endorsements_deserializer: U32VarIntDeserializer::new( + Included(0), + Included(endorsement_count), + ), + hash_deserializer: HashDeserializer::new(), + thread_count, + endorsement_count, + } + } +} + +impl Deserializer for BlockHeaderDeserializer { + /// ## Example: + /// ```rust + /// use massa_models::block_header::{BlockHeader, BlockHeaderDeserializer, BlockHeaderSerializer}; + /// use massa_models::block_id::{BlockId}; + /// use massa_models::{config::THREAD_COUNT, slot::Slot, secure_share::SecureShareContent}; + /// use massa_models::endorsement::{Endorsement, EndorsementSerializer}; + /// use massa_hash::Hash; + /// use massa_signature::KeyPair; + /// use massa_serialization::{Serializer, Deserializer, DeserializeError}; + /// + /// let keypair = KeyPair::generate(); + /// let parents: Vec = (0..THREAD_COUNT) + /// .map(|i| BlockId(Hash::compute_from(&[i]))) + /// .collect(); + /// let header = BlockHeader { + /// slot: Slot::new(1, 1), + /// parents: parents.clone(), + /// operation_merkle_root: Hash::compute_from("mno".as_bytes()), + /// endorsements: vec![ + /// Endorsement::new_verifiable( + /// Endorsement { + /// slot: Slot::new(1, 1), + /// index: 0, + /// endorsed_block: parents[1].clone(), + /// }, + /// EndorsementSerializer::new(), + /// &keypair, + /// ) + /// .unwrap(), + /// Endorsement::new_verifiable( + /// Endorsement { + /// slot: Slot::new(1, 1), + /// index: 1, + /// endorsed_block: parents[1].clone(), + /// }, + /// EndorsementSerializer::new(), + /// &keypair, + /// ) + /// .unwrap(), + /// ], + /// }; + /// let mut buffer = vec![]; + /// BlockHeaderSerializer::new().serialize(&header, &mut buffer).unwrap(); + /// let (rest, deserialized_header) = BlockHeaderDeserializer::new(32, 9).deserialize::(&buffer).unwrap(); + /// assert_eq!(rest.len(), 0); + /// let mut buffer2 = Vec::new(); + /// BlockHeaderSerializer::new().serialize(&deserialized_header, &mut buffer2).unwrap(); + /// assert_eq!(buffer, buffer2); + /// ``` + fn deserialize<'a, E: ParseError<&'a [u8]> + ContextError<&'a [u8]>>( + &self, + buffer: &'a [u8], + ) -> IResult<&'a [u8], BlockHeader, E> { + let (rest, (slot, parents, operation_merkle_root)): (&[u8], (Slot, Vec, Hash)) = + context("Failed BlockHeader deserialization", |input| { + let (rest, (slot, parents)) = tuple(( + context("Failed slot deserialization", |input| { + self.slot_deserializer.deserialize(input) + }), + context( + "Failed parents deserialization", + alt(( + preceded(tag(&[0]), |input| Ok((input, Vec::new()))), + preceded( + tag(&[1]), + count( + context("Failed block_id deserialization", |input| { + self.hash_deserializer + .deserialize(input) + .map(|(rest, hash)| (rest, BlockId(hash))) + }), + self.thread_count as usize, + ), + ), + )), + ), + )) + .parse(input)?; + + // validate the parent/slot invariats before moving on to other fields + if slot.period == 0 && !parents.is_empty() { + return Err(nom::Err::Failure(ContextError::add_context( + rest, + "Genesis block cannot contain parents", + ParseError::from_error_kind(rest, nom::error::ErrorKind::Fail), + ))); + } else if slot.period != 0 && parents.len() != THREAD_COUNT as usize { + return Err(nom::Err::Failure(ContextError::add_context( + rest, + const_format::formatcp!( + "Non-genesis block must have {} parents", + THREAD_COUNT + ), + ParseError::from_error_kind(rest, nom::error::ErrorKind::Fail), + ))); + } + + let (rest, merkle) = context("Failed operation_merkle_root", |input| { + self.hash_deserializer.deserialize(input) + }) + .parse(rest)?; + Ok((rest, (slot, parents, merkle))) + }) + .parse(buffer)?; + + if parents.is_empty() { + let res = BlockHeader { + slot, + parents, + operation_merkle_root, + endorsements: Vec::new(), + }; + + // TODO: gh-issue #3398 + #[cfg(any(test, feature = "testing"))] + res.assert_invariants().unwrap(); + + return Ok(( + &rest[1..], // Because there is 0 endorsements, we have a remaining 0 in rest and we don't need it + res, + )); + } + // Now deser the endorsements (which were light-weight serialized) + let endorsement_deserializer = + SecureShareDeserializer::new(EndorsementDeserializerLW::new( + self.endorsement_count, + slot, + parents[slot.thread as usize], + )); + + let parent_id = parents[slot.thread as usize]; + let (rest, endorsements): (&[u8], Vec>) = context( + "Failed endorsements deserialization", + length_count::<&[u8], SecureShare, u32, E, _, _>( + context("Failed length deserialization", |input| { + self.length_endorsements_deserializer.deserialize(input) + }), + context("Failed endorsement deserialization", |input| { + let (rest, endo) = endorsement_deserializer + .deserialize_with(&self.endorsement_serializer, input)?; + + if endo.content.endorsed_block != parent_id { + return Err(nom::Err::Failure(ContextError::add_context( + rest, + "Endorsement does not match block parents", + ParseError::from_error_kind(rest, nom::error::ErrorKind::Fail), + ))); + } + + Ok((rest, endo)) + }), + ), + ) + .parse(rest)?; + + let mut set = HashSet::new(); + + for end in endorsements.iter() { + if !set.insert(end.content.index) { + return Err(nom::Err::Failure(ContextError::add_context( + rest, + "Duplicate endorsement index found", + ParseError::from_error_kind(rest, nom::error::ErrorKind::Fail), + ))); + } + } + + let header = BlockHeader { + slot, + parents, + operation_merkle_root, + endorsements, + }; + + // TODO: gh-issue #3398 + #[cfg(any(test, feature = "testing"))] + header.assert_invariants().unwrap(); + + Ok((rest, header)) + } +} + +impl std::fmt::Display for BlockHeader { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + writeln!( + f, + "\t(period: {}, thread: {})", + self.slot.period, self.slot.thread, + )?; + writeln!(f, "\tMerkle root: {}", self.operation_merkle_root,)?; + writeln!(f, "\tParents: ")?; + for id in self.parents.iter() { + let str_id = id.to_string(); + writeln!(f, "\t\t{}", str_id)?; + } + if self.parents.is_empty() { + writeln!(f, "No parents found: This is a genesis header")?; + } + writeln!(f, "\tEndorsements:")?; + for ed in self.endorsements.iter() { + writeln!(f, "\t\t-----")?; + writeln!(f, "\t\tId: {}", ed.id)?; + writeln!(f, "\t\tIndex: {}", ed.content.index)?; + writeln!(f, "\t\tEndorsed slot: {}", ed.content.slot)?; + writeln!( + f, + "\t\tEndorser's public key: {}", + ed.content_creator_pub_key + )?; + writeln!(f, "\t\tEndorsed block: {}", ed.content.endorsed_block)?; + writeln!(f, "\t\tSignature: {}", ed.signature)?; + } + if self.endorsements.is_empty() { + writeln!(f, "\tNo endorsements found")?; + } + Ok(()) + } +} diff --git a/massa-models/src/block_id.rs b/massa-models/src/block_id.rs new file mode 100644 index 00000000000..77640407d4d --- /dev/null +++ b/massa-models/src/block_id.rs @@ -0,0 +1,165 @@ +use crate::error::ModelsError; +use crate::prehash::PreHashed; +use crate::secure_share::Id; +use massa_hash::{Hash, HashDeserializer}; +use massa_serialization::{ + DeserializeError, Deserializer, SerializeError, Serializer, U64VarIntDeserializer, + U64VarIntSerializer, +}; +use nom::error::{context, ContextError, ParseError}; +use nom::IResult; +use serde_with::{DeserializeFromStr, SerializeDisplay}; +use std::collections::Bound::Included; +use std::convert::TryInto; +use std::str::FromStr; + +/// Size in bytes of a serialized block ID +const BLOCK_ID_SIZE_BYTES: usize = massa_hash::HASH_SIZE_BYTES; + +/// block id +#[derive( + Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash, SerializeDisplay, DeserializeFromStr, +)] +pub struct BlockId(pub Hash); + +impl PreHashed for BlockId {} + +impl Id for BlockId { + fn new(hash: Hash) -> Self { + BlockId(hash) + } + + fn get_hash(&self) -> &Hash { + &self.0 + } +} + +const BLOCKID_PREFIX: char = 'B'; +const BLOCKID_VERSION: u64 = 0; + +impl std::fmt::Display for BlockId { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + let u64_serializer = U64VarIntSerializer::new(); + // might want to allocate the vector with capacity in order to avoid re-allocation + let mut bytes: Vec = Vec::new(); + u64_serializer + .serialize(&BLOCKID_VERSION, &mut bytes) + .map_err(|_| std::fmt::Error)?; + bytes.extend(self.0.to_bytes()); + write!( + f, + "{}{}", + BLOCKID_PREFIX, + bs58::encode(bytes).with_check().into_string() + ) + } +} + +impl std::fmt::Debug for BlockId { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{}", self) + } +} + +impl FromStr for BlockId { + type Err = ModelsError; + /// ## Example + /// ```rust + /// # use massa_hash::Hash; + /// # use std::str::FromStr; + /// # use massa_models::block_id::BlockId; + /// # let hash = Hash::compute_from(b"test"); + /// # let block_id = BlockId(hash); + /// let ser = block_id.to_string(); + /// let res_block_id = BlockId::from_str(&ser).unwrap(); + /// assert_eq!(block_id, res_block_id); + /// ``` + fn from_str(s: &str) -> Result { + let mut chars = s.chars(); + match chars.next() { + Some(prefix) if prefix == BLOCKID_PREFIX => { + let data = chars.collect::(); + let decoded_bs58_check = bs58::decode(data) + .with_check(None) + .into_vec() + .map_err(|_| ModelsError::BlockIdParseError)?; + let u64_deserializer = U64VarIntDeserializer::new(Included(0), Included(u64::MAX)); + let (rest, _version) = u64_deserializer + .deserialize::(&decoded_bs58_check[..]) + .map_err(|_| ModelsError::BlockIdParseError)?; + Ok(BlockId(Hash::from_bytes( + rest.try_into() + .map_err(|_| ModelsError::BlockIdParseError)?, + ))) + } + _ => Err(ModelsError::BlockIdParseError), + } + } +} + +impl BlockId { + /// block id to bytes + pub fn to_bytes(&self) -> &[u8; BLOCK_ID_SIZE_BYTES] { + self.0.to_bytes() + } + + /// block id into bytes + pub fn into_bytes(self) -> [u8; BLOCK_ID_SIZE_BYTES] { + self.0.into_bytes() + } + + /// block id from bytes + pub fn from_bytes(data: &[u8; BLOCK_ID_SIZE_BYTES]) -> BlockId { + BlockId(Hash::from_bytes(data)) + } + + /// first bit of the hashed block id + pub fn get_first_bit(&self) -> bool { + self.to_bytes()[0] >> 7 == 1 + } +} + +/// Serializer for `BlockId` +#[derive(Default, Clone)] +pub struct BlockIdSerializer; + +impl BlockIdSerializer { + /// Creates a new serializer for `BlockId` + pub fn new() -> Self { + Self + } +} + +impl Serializer for BlockIdSerializer { + fn serialize(&self, value: &BlockId, buffer: &mut Vec) -> Result<(), SerializeError> { + buffer.extend(value.to_bytes()); + Ok(()) + } +} + +/// Deserializer for `BlockId` +#[derive(Default, Clone)] +pub struct BlockIdDeserializer { + hash_deserializer: HashDeserializer, +} + +impl BlockIdDeserializer { + /// Creates a new deserializer for `BlockId` + pub fn new() -> Self { + Self { + hash_deserializer: HashDeserializer::new(), + } + } +} + +impl Deserializer for BlockIdDeserializer { + fn deserialize<'a, E: ParseError<&'a [u8]> + ContextError<&'a [u8]>>( + &self, + buffer: &'a [u8], + ) -> IResult<&'a [u8], BlockId, E> { + context("Failed BlockId deserialization", |input| { + let (rest, hash) = self.hash_deserializer.deserialize(input)?; + Ok((rest, BlockId(hash))) + })(buffer) + } +} diff --git a/massa-models/src/clique.rs b/massa-models/src/clique.rs index 0280827ca49..946084c766b 100644 --- a/massa-models/src/clique.rs +++ b/massa-models/src/clique.rs @@ -17,7 +17,7 @@ use nom::sequence::tuple; use nom::{IResult, Parser}; use serde::{Deserialize, Serialize}; -use crate::block::BlockId; +use crate::block_id::BlockId; use crate::prehash::PreHashSet; use std::ops::Bound::{Excluded, Included}; @@ -64,7 +64,7 @@ impl Serializer for CliqueSerializer { /// ## Example /// ```rust /// # use massa_models::clique::{Clique, CliqueSerializer}; - /// # use massa_models::block::BlockId; + /// # use massa_models::block_id::BlockId; /// # use massa_hash::Hash; /// # use std::str::FromStr; /// # use massa_serialization::Serializer; @@ -117,7 +117,7 @@ impl Deserializer for CliqueDeserializer { /// ## Example /// ```rust /// # use massa_models::clique::{Clique, CliqueDeserializer, CliqueSerializer}; - /// # use massa_models::block::BlockId; + /// # use massa_models::block_id::BlockId; /// # use massa_hash::Hash; /// # use std::str::FromStr; /// # use massa_serialization::{Serializer, Deserializer, DeserializeError}; diff --git a/massa-models/src/composite.rs b/massa-models/src/composite.rs index 618911a65b3..c0497e79c17 100644 --- a/massa-models/src/composite.rs +++ b/massa-models/src/composite.rs @@ -1,7 +1,7 @@ // Copyright (c) 2022 MASSA LABS use crate::prehash::PreHashMap; -use crate::{address::Address, block::BlockId, operation::WrappedOperation}; +use crate::{address::Address, block_id::BlockId, operation::SecureShareOperation}; use massa_signature::{PublicKey, Signature}; use serde::{Deserialize, Serialize}; use std::fmt::Display; @@ -36,7 +36,7 @@ pub enum OperationSearchResultStatus { #[derive(Clone, Debug, Serialize, Deserialize)] pub struct OperationSearchResult { /// the operation - pub op: WrappedOperation, + pub op: SecureShareOperation, /// true if in pool pub in_pool: bool, /// maps block id to index on the operation in the block and if it's final diff --git a/massa-models/src/config/compact_config.rs b/massa-models/src/config/compact_config.rs index fb5c47de1e8..97ae34dbe44 100644 --- a/massa-models/src/config/compact_config.rs +++ b/massa-models/src/config/compact_config.rs @@ -50,11 +50,11 @@ impl Display for CompactConfig { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { writeln!( f, - " Genesis timestamp: {}", + " Genesis time: {}", self.genesis_timestamp.to_utc_string() )?; if let Some(end) = self.end_timestamp { - writeln!(f, " End timestamp: {}", end.to_utc_string())?; + writeln!(f, " End time: {}", end.to_utc_string())?; } writeln!(f, " Thread count: {}", self.thread_count)?; writeln!(f, " t0: {}", self.t0)?; diff --git a/massa-models/src/config/constants.rs b/massa-models/src/config/constants.rs index bb974e5fc23..f24c1eed1c6 100644 --- a/massa-models/src/config/constants.rs +++ b/massa-models/src/config/constants.rs @@ -45,14 +45,14 @@ lazy_static::lazy_static! { .saturating_add(MassaTime::from_millis(1000 * 10)) ) } else { - 1672790401000.into() // Wednesday, January 04, 2022 00:00:01 AM UTC + 1675296001000.into() // Thursday, February 02, 2022 00:00:01 AM UTC }; /// TESTNET: time when the blockclique is ended. pub static ref END_TIMESTAMP: Option = if cfg!(feature = "sandbox") { None } else { - Some(1675105200000.into()) // Monday, January 30, 2022 19:00:00 PM UTC + Some(1677596400000.into()) // Tuesday, February 28, 2022 15:00:00 PM UTC }; /// `KeyPair` to sign genesis blocks. pub static ref GENESIS_KEY: KeyPair = KeyPair::from_str("S1UxdCJv5ckDK8z87E5Jq5fEfSVLi2cTHgtpfZy7iURs3KpPns8") @@ -64,7 +64,7 @@ lazy_static::lazy_static! { if cfg!(feature = "sandbox") { "SAND.0.0" } else { - "TEST.18.0" + "TEST.19.0" } .parse() .unwrap() @@ -94,7 +94,7 @@ pub const DELTA_F0: u64 = 64 * (ENDORSEMENT_COUNT as u64 + 1); /// Maximum number of operations per block pub const MAX_OPERATIONS_PER_BLOCK: u32 = 5000; /// Maximum block size in bytes -pub const MAX_BLOCK_SIZE: u32 = 500_000; +pub const MAX_BLOCK_SIZE: u32 = 1_000_000; /// Maximum capacity of the asynchronous messages pool pub const MAX_ASYNC_POOL_LENGTH: u64 = 10_000; /// Maximum data size in async message diff --git a/massa-models/src/endorsement.rs b/massa-models/src/endorsement.rs index 52692d067c2..d64a248aeba 100644 --- a/massa-models/src/endorsement.rs +++ b/massa-models/src/endorsement.rs @@ -1,9 +1,9 @@ // Copyright (c) 2022 MASSA LABS use crate::prehash::PreHashed; +use crate::secure_share::{Id, SecureShare, SecureShareContent}; use crate::slot::{Slot, SlotDeserializer, SlotSerializer}; -use crate::wrapped::{Id, Wrapped, WrappedContent}; -use crate::{block::BlockId, error::ModelsError}; +use crate::{block_id::BlockId, error::ModelsError}; use massa_hash::{Hash, HashDeserializer}; use massa_serialization::{ DeserializeError, Deserializer, SerializeError, Serializer, U32VarIntDeserializer, @@ -145,10 +145,28 @@ pub struct Endorsement { pub endorsed_block: BlockId, } +#[cfg(any(test, feature = "testing"))] +impl SecureShareEndorsement { + // TODO: gh-issue #3398 + /// Used under testing conditions to validate an instance of Self + pub fn check_invariants(&self) -> Result<(), Box> { + if let Err(e) = self.verify_signature() { + return Err(e.into()); + } + if self.content.slot.thread >= crate::config::THREAD_COUNT { + Err("Endorsement slot on non-existant thread".into()) + } else if self.content.index >= crate::config::ENDORSEMENT_COUNT { + Err("Endorsement index out of range".into()) + } else { + Ok(()) + } + } +} + /// Wrapped endorsement -pub type WrappedEndorsement = Wrapped; +pub type SecureShareEndorsement = SecureShare; -impl WrappedContent for Endorsement {} +impl SecureShareContent for Endorsement {} /// Serializer for `Endorsement` #[derive(Clone)] @@ -176,7 +194,7 @@ impl Default for EndorsementSerializer { impl Serializer for EndorsementSerializer { /// ## Example: /// ```rust - /// use massa_models::{slot::Slot, block::BlockId, endorsement::{Endorsement, EndorsementSerializer}}; + /// use massa_models::{slot::Slot, block_id::BlockId, endorsement::{Endorsement, EndorsementSerializer}}; /// use massa_serialization::Serializer; /// use massa_hash::Hash; /// @@ -223,7 +241,7 @@ impl EndorsementDeserializer { impl Deserializer for EndorsementDeserializer { /// ## Example: /// ```rust - /// use massa_models::{slot::Slot, block::BlockId, endorsement::{Endorsement, EndorsementSerializer, EndorsementDeserializer}}; + /// use massa_models::{slot::Slot, block_id::BlockId, endorsement::{Endorsement, EndorsementSerializer, EndorsementDeserializer}}; /// use massa_serialization::{Serializer, Deserializer, DeserializeError}; /// use massa_hash::Hash; /// @@ -292,7 +310,7 @@ impl Default for EndorsementSerializerLW { impl Serializer for EndorsementSerializerLW { /// ## Example: /// ```rust - /// use massa_models::{slot::Slot, block::BlockId, endorsement::{Endorsement, EndorsementSerializerLW}}; + /// use massa_models::{slot::Slot, block_id::BlockId, endorsement::{Endorsement, EndorsementSerializerLW}}; /// use massa_serialization::Serializer; /// use massa_hash::Hash; /// @@ -334,7 +352,7 @@ impl EndorsementDeserializerLW { impl Deserializer for EndorsementDeserializerLW { /// ## Example: /// ```rust - /// use massa_models::{slot::Slot, block::BlockId, endorsement::{Endorsement, EndorsementSerializerLW, EndorsementDeserializerLW}}; + /// use massa_models::{slot::Slot, block_id::BlockId, endorsement::{Endorsement, EndorsementSerializerLW, EndorsementDeserializerLW}}; /// use massa_serialization::{Serializer, Deserializer, DeserializeError}; /// use massa_hash::Hash; /// @@ -369,7 +387,7 @@ impl Deserializer for EndorsementDeserializerLW { #[cfg(test)] mod tests { - use crate::wrapped::{WrappedDeserializer, WrappedSerializer}; + use crate::secure_share::{SecureShareDeserializer, SecureShareSerializer}; use super::*; use massa_serialization::DeserializeError; @@ -385,17 +403,17 @@ mod tests { index: 0, endorsed_block: BlockId(Hash::compute_from("blk".as_bytes())), }; - let endorsement: WrappedEndorsement = - Endorsement::new_wrapped(content, EndorsementSerializer::new(), &sender_keypair) + let endorsement: SecureShareEndorsement = + Endorsement::new_verifiable(content, EndorsementSerializer::new(), &sender_keypair) .unwrap(); let mut ser_endorsement: Vec = Vec::new(); - let serializer = WrappedSerializer::new(); + let serializer = SecureShareSerializer::new(); serializer .serialize(&endorsement, &mut ser_endorsement) .unwrap(); - let (_, res_endorsement): (&[u8], WrappedEndorsement) = - WrappedDeserializer::new(EndorsementDeserializer::new(32, 1)) + let (_, res_endorsement): (&[u8], SecureShareEndorsement) = + SecureShareDeserializer::new(EndorsementDeserializer::new(32, 1)) .deserialize::(&ser_endorsement) .unwrap(); assert_eq!(res_endorsement, endorsement); @@ -410,22 +428,23 @@ mod tests { index: 0, endorsed_block: BlockId(Hash::compute_from("blk".as_bytes())), }; - let endorsement: WrappedEndorsement = - Endorsement::new_wrapped(content, EndorsementSerializerLW::new(), &sender_keypair) + let endorsement: SecureShareEndorsement = + Endorsement::new_verifiable(content, EndorsementSerializerLW::new(), &sender_keypair) .unwrap(); let mut ser_endorsement: Vec = Vec::new(); - let serializer = WrappedSerializer::new(); + let serializer = SecureShareSerializer::new(); serializer .serialize(&endorsement, &mut ser_endorsement) .unwrap(); let parent = BlockId(Hash::compute_from("blk".as_bytes())); - let (_, res_endorsement): (&[u8], WrappedEndorsement) = - WrappedDeserializer::new(EndorsementDeserializerLW::new(1, Slot::new(10, 1), parent)) - .deserialize::(&ser_endorsement) - .unwrap(); + let (_, res_endorsement): (&[u8], SecureShareEndorsement) = SecureShareDeserializer::new( + EndorsementDeserializerLW::new(1, Slot::new(10, 1), parent), + ) + .deserialize::(&ser_endorsement) + .unwrap(); // Test only endorsement index as with the lw ser. we only process this field assert_eq!(res_endorsement.content.index, endorsement.content.index); } diff --git a/massa-models/src/execution.rs b/massa-models/src/execution.rs index c49d87c3f49..2ca8ca807fe 100644 --- a/massa-models/src/execution.rs +++ b/massa-models/src/execution.rs @@ -1,48 +1,31 @@ -use std::{collections::VecDeque, fmt::Display}; +// Copyright (c) 2022 MASSA LABS -use crate::{output_event::SCOutputEvent, slot::Slot}; +use crate::{address::Address, operation::OperationId, slot::Slot}; use serde::{Deserialize, Serialize}; -/// The result of the read-only execution. -#[derive(Clone, Debug, Deserialize, Serialize)] -pub enum ReadOnlyResult { - /// An error occurred during execution. - Error(String), - /// The result of a successful execution. - Ok(Vec), -} - -/// The response to a request for a read-only execution. -#[derive(Clone, Debug, Deserialize, Serialize)] -pub struct ExecuteReadOnlyResponse { - /// The slot at which the read-only execution occurred. - pub executed_at: Slot, - /// The result of the read-only execution. - pub result: ReadOnlyResult, - /// The output events generated by the read-only execution. - pub output_events: VecDeque, - /// The gas cost for the execution - pub gas_cost: u64, -} - -impl Display for ExecuteReadOnlyResponse { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - writeln!(f, "Executed at slot: {}", self.executed_at)?; - writeln!( - f, - "Result: {}", - match &self.result { - ReadOnlyResult::Error(e) => - format!("an error occurred during the execution: {}", e), - ReadOnlyResult::Ok(ret) => format!("success, returned value: {:?}", ret), - } - )?; - if !self.output_events.is_empty() { - writeln!(f, "Generated events:",)?; - for event in self.output_events.iter() { - writeln!(f, "{}", event)?; // id already displayed in event - } - } - Ok(()) - } +/// filter used when retrieving SC output events +#[derive(Default, Debug, Deserialize, Clone, Serialize)] +pub struct EventFilter { + /// optional start slot + pub start: Option, + /// optional end slot + pub end: Option, + /// optional emitter address + pub emitter_address: Option
, + /// optional caller address + pub original_caller_address: Option
, + /// optional operation id + pub original_operation_id: Option, + /// optional event status + /// + /// Some(true) means final + /// Some(false) means candidate + /// None means final _and_ candidate + pub is_final: Option, + /// optional execution status + /// + /// Some(true) means events coming from a failed sc execution + /// Some(false) means events coming from a succeeded sc execution + /// None means both + pub is_error: Option, } diff --git a/massa-models/src/ledger_models.rs b/massa-models/src/ledger.rs similarity index 95% rename from massa-models/src/ledger_models.rs rename to massa-models/src/ledger.rs index 1f93b7c93af..115abdbd856 100644 --- a/massa-models/src/ledger_models.rs +++ b/massa-models/src/ledger.rs @@ -49,10 +49,10 @@ impl LedgerDataSerializer { impl Serializer for LedgerDataSerializer { /// ## Example: /// ```rust - /// use massa_models::ledger_models::{LedgerData, LedgerDataSerializer}; /// use massa_models::amount::Amount; /// use massa_serialization::Serializer; /// use std::str::FromStr; + /// use massa_models::ledger::{LedgerData, LedgerDataSerializer}; /// /// let ledger_data = LedgerData { /// balance: Amount::from_str("1349").unwrap(), @@ -92,10 +92,10 @@ impl Default for LedgerDataDeserializer { impl Deserializer for LedgerDataDeserializer { /// ## Example: /// ```rust - /// use massa_models::ledger_models::{LedgerData, LedgerDataDeserializer, LedgerDataSerializer}; /// use massa_models::amount::Amount; /// use massa_serialization::{Serializer, Deserializer, DeserializeError}; /// use std::str::FromStr; + /// use massa_models::ledger::{LedgerData, LedgerDataDeserializer, LedgerDataSerializer}; /// /// let ledger_data = LedgerData { /// balance: Amount::from_str("1349").unwrap(), @@ -193,9 +193,9 @@ impl LedgerChangeSerializer { impl Serializer for LedgerChangeSerializer { /// ## Example /// ```rust - /// use massa_models::{address::Address, amount::Amount, ledger_models::LedgerChangeSerializer}; + /// use massa_models::{address::Address, amount::Amount}; /// use std::str::FromStr; - /// use massa_models::ledger_models::LedgerChange; + /// use massa_models::ledger::{LedgerChange, LedgerChangeSerializer}; /// use massa_serialization::Serializer; /// let ledger_change = LedgerChange { /// balance_delta: Amount::from_str("1149").unwrap(), @@ -238,9 +238,9 @@ impl Default for LedgerChangeDeserializer { impl Deserializer for LedgerChangeDeserializer { /// ## Example /// ```rust - /// use massa_models::{address::Address, amount::Amount, ledger_models::{LedgerChangeSerializer, LedgerChangeDeserializer}}; + /// use massa_models::{address::Address, amount::Amount}; /// use std::str::FromStr; - /// use massa_models::ledger_models::LedgerChange; + /// use massa_models::ledger::{LedgerChange, LedgerChangeDeserializer, LedgerChangeSerializer}; /// use massa_serialization::{Serializer, Deserializer, DeserializeError}; /// let ledger_change = LedgerChange { /// balance_delta: Amount::from_str("1149").unwrap(), @@ -377,9 +377,9 @@ impl LedgerChangesDeserializer { impl Deserializer for LedgerChangesDeserializer { /// ## Example /// ```rust - /// # use massa_models::{address::Address, amount::Amount, ledger_models::{LedgerChangesSerializer, LedgerChangesDeserializer, LedgerChangeSerializer, LedgerChangeDeserializer}}; + /// # use massa_models::{address::Address, amount::Amount}; /// # use std::str::FromStr; - /// # use massa_models::ledger_models::{LedgerChanges, LedgerChange}; + /// use massa_models::ledger::{LedgerChange, LedgerChanges, LedgerChangesDeserializer, LedgerChangeSerializer, LedgerChangesSerializer}; /// # use massa_serialization::{Serializer, Deserializer, DeserializeError}; /// # let ledger_changes = LedgerChanges(vec![ /// # ( diff --git a/massa-models/src/lib.rs b/massa-models/src/lib.rs index 30cd8478710..9298e92ba47 100644 --- a/massa-models/src/lib.rs +++ b/massa-models/src/lib.rs @@ -15,10 +15,12 @@ pub mod active_block; pub mod address; /// amount related structures pub mod amount; -/// structure use by the API -pub mod api; -/// block-related structures +/// block structure pub mod block; +/// block-related structure: block_header +pub mod block_header; +/// block-related structure: block_id +pub mod block_id; /// clique pub mod clique; /// various structures @@ -34,7 +36,7 @@ pub mod error; /// execution related structures pub mod execution; /// ledger related structures -pub mod ledger_models; +pub mod ledger; /// node related structure pub mod node; /// operations @@ -45,6 +47,8 @@ pub mod output_event; pub mod prehash; /// rolls pub mod rolls; +/// trait for [Signature] secured data-structs +pub mod secure_share; /// serialization pub mod serialization; /// slots @@ -57,8 +61,6 @@ pub mod streaming_step; pub mod timeslots; /// versions pub mod version; -/// trait for signed structure -pub mod wrapped; /// Test utils #[cfg(feature = "testing")] diff --git a/massa-models/src/operation.rs b/massa-models/src/operation.rs index 5f960f51a5e..3fa874074b1 100644 --- a/massa-models/src/operation.rs +++ b/massa-models/src/operation.rs @@ -2,7 +2,9 @@ use crate::datastore::{Datastore, DatastoreDeserializer, DatastoreSerializer}; use crate::prehash::{PreHashSet, PreHashed}; -use crate::wrapped::{Id, Wrapped, WrappedContent, WrappedDeserializer, WrappedSerializer}; +use crate::secure_share::{ + Id, SecureShare, SecureShareContent, SecureShareDeserializer, SecureShareSerializer, +}; use crate::{ address::{Address, AddressDeserializer}, amount::{Amount, AmountDeserializer, AmountSerializer}, @@ -26,7 +28,7 @@ use nom::{ }; use num_enum::{IntoPrimitive, TryFromPrimitive}; use serde::{Deserialize, Serialize}; -use serde_with::{DeserializeFromStr, SerializeDisplay}; +use serde_with::{serde_as, DeserializeFromStr, SerializeDisplay}; use std::convert::TryInto; use std::fmt::Formatter; use std::{ops::Bound::Included, ops::RangeInclusive, str::FromStr}; @@ -262,9 +264,9 @@ impl std::fmt::Display for Operation { } /// signed operation -pub type WrappedOperation = Wrapped; +pub type SecureShareOperation = SecureShare; -impl WrappedContent for Operation {} +impl SecureShareContent for Operation {} /// Serializer for `Operation` pub struct OperationSerializer { @@ -419,6 +421,7 @@ impl Deserializer for OperationDeserializer { } /// Type specific operation content +#[serde_as] #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub enum OperationType { /// transfer coins from sender to recipient @@ -445,7 +448,7 @@ pub enum OperationType { /// The maximum amount of gas that the execution of the contract is allowed to cost. max_gas: u64, /// A key-value store associating a hash to arbitrary bytes - #[serde(skip)] + #[serde_as(as = "Vec<(_, _)>")] datastore: Datastore, }, /// Calls an exported function from a stored smart contract @@ -790,7 +793,7 @@ impl Deserializer for OperationTypeDeserializer { } } -impl WrappedOperation { +impl SecureShareOperation { /// get the range of periods during which an operation is valid /// Range: `(op.expire_period - cfg.operation_validity_period) -> op.expire_period` (included) pub fn get_validity_range(&self, operation_validity_period: u64) -> RangeInclusive { @@ -815,7 +818,7 @@ impl WrappedOperation { /// get the addresses that are involved in this operation from a ledger point of view pub fn get_ledger_involved_addresses(&self) -> PreHashSet
{ let mut res = PreHashSet::
::default(); - let emitter_address = Address::from_public_key(&self.creator_public_key); + let emitter_address = Address::from_public_key(&self.content_creator_pub_key); res.insert(emitter_address); match &self.content.op { OperationType::Transaction { @@ -854,10 +857,10 @@ impl WrappedOperation { match self.content.op { OperationType::Transaction { .. } => {} OperationType::RollBuy { .. } => { - res.insert(Address::from_public_key(&self.creator_public_key)); + res.insert(Address::from_public_key(&self.content_creator_pub_key)); } OperationType::RollSell { .. } => { - res.insert(Address::from_public_key(&self.creator_public_key)); + res.insert(Address::from_public_key(&self.content_creator_pub_key)); } OperationType::ExecuteSC { .. } => {} OperationType::CallSC { .. } => {} @@ -1125,7 +1128,7 @@ impl Serializer for OperationPrefixIdsSerializer { /// Serializer for `Operations` pub struct OperationsSerializer { u32_serializer: U32VarIntSerializer, - signed_op_serializer: WrappedSerializer, + signed_op_serializer: SecureShareSerializer, } impl OperationsSerializer { @@ -1133,7 +1136,7 @@ impl OperationsSerializer { pub const fn new() -> Self { Self { u32_serializer: U32VarIntSerializer::new(), - signed_op_serializer: WrappedSerializer::new(), + signed_op_serializer: SecureShareSerializer::new(), } } } @@ -1144,10 +1147,10 @@ impl Default for OperationsSerializer { } } -impl Serializer> for OperationsSerializer { +impl Serializer> for OperationsSerializer { /// ## Example: /// ```rust - /// use massa_models::{operation::{WrappedOperation, Operation, OperationType, OperationsSerializer, OperationSerializer}, wrapped::WrappedContent, address::Address, amount::Amount}; + /// use massa_models::{operation::{SecureShareOperation, Operation, OperationType, OperationsSerializer, OperationSerializer}, secure_share::SecureShareContent, address::Address, amount::Amount}; /// use massa_signature::KeyPair; /// use massa_serialization::Serializer; /// use std::str::FromStr; @@ -1162,14 +1165,14 @@ impl Serializer> for OperationsSerializer { /// op, /// expire_period: 50, /// }; - /// let op_wrapped = Operation::new_wrapped(content, OperationSerializer::new(), &keypair).unwrap(); - /// let operations = vec![op_wrapped.clone(), op_wrapped.clone()]; + /// let op_secured = Operation::new_verifiable(content, OperationSerializer::new(), &keypair).unwrap(); + /// let operations = vec![op_secured.clone(), op_secured.clone()]; /// let mut buffer = Vec::new(); /// OperationsSerializer::new().serialize(&operations, &mut buffer).unwrap(); /// ``` fn serialize( &self, - value: &Vec, + value: &Vec, buffer: &mut Vec, ) -> Result<(), SerializeError> { let list_len: u32 = value.len().try_into().map_err(|_| { @@ -1186,7 +1189,7 @@ impl Serializer> for OperationsSerializer { /// Deserializer for `Operations` pub struct OperationsDeserializer { length_deserializer: U32VarIntDeserializer, - signed_op_deserializer: WrappedDeserializer, + signed_op_deserializer: SecureShareDeserializer, } impl OperationsDeserializer { @@ -1205,7 +1208,7 @@ impl OperationsDeserializer { Included(0), Included(max_operations_per_message), ), - signed_op_deserializer: WrappedDeserializer::new(OperationDeserializer::new( + signed_op_deserializer: SecureShareDeserializer::new(OperationDeserializer::new( max_datastore_value_length, max_function_name_length, max_parameters_size, @@ -1217,10 +1220,10 @@ impl OperationsDeserializer { } } -impl Deserializer> for OperationsDeserializer { +impl Deserializer> for OperationsDeserializer { /// ## Example: /// ```rust - /// use massa_models::{operation::{WrappedOperation, Operation, OperationType, OperationsSerializer, OperationsDeserializer, OperationSerializer}, wrapped::WrappedContent, address::Address, amount::Amount}; + /// use massa_models::{operation::{SecureShareOperation, Operation, OperationType, OperationsSerializer, OperationsDeserializer, OperationSerializer}, secure_share::SecureShareContent, address::Address, amount::Amount}; /// use massa_signature::KeyPair; /// use massa_serialization::{Serializer, Deserializer, DeserializeError}; /// use std::str::FromStr; @@ -1235,22 +1238,22 @@ impl Deserializer> for OperationsDeserializer { /// op, /// expire_period: 50, /// }; - /// let op_wrapped = Operation::new_wrapped(content, OperationSerializer::new(), &keypair).unwrap(); - /// let operations = vec![op_wrapped.clone(), op_wrapped.clone()]; + /// let op_secured = Operation::new_verifiable(content, OperationSerializer::new(), &keypair).unwrap(); + /// let operations = vec![op_secured.clone(), op_secured.clone()]; /// let mut buffer = Vec::new(); /// OperationsSerializer::new().serialize(&operations, &mut buffer).unwrap(); /// let (rest, deserialized_operations) = OperationsDeserializer::new(10000, 10000, 10000, 10000, 10, 255, 10_000).deserialize::(&buffer).unwrap(); /// for (operation1, operation2) in deserialized_operations.iter().zip(operations.iter()) { /// assert_eq!(operation1.id, operation2.id); /// assert_eq!(operation1.signature, operation2.signature); - /// assert_eq!(operation1.creator_public_key, operation2.creator_public_key); + /// assert_eq!(operation1.content_creator_pub_key, operation2.content_creator_pub_key); /// assert_eq!(operation1.content.fee, operation2.content.fee); /// } /// ``` fn deserialize<'a, E: ParseError<&'a [u8]> + ContextError<&'a [u8]>>( &self, buffer: &'a [u8], - ) -> IResult<&'a [u8], Vec, E> { + ) -> IResult<&'a [u8], Vec, E> { context( "Failed Operations deserialization", length_count( @@ -1331,14 +1334,14 @@ mod tests { let op_serializer = OperationSerializer::new(); - let op = Operation::new_wrapped(content, op_serializer, &sender_keypair).unwrap(); + let op = Operation::new_verifiable(content, op_serializer, &sender_keypair).unwrap(); let mut ser_op = Vec::new(); - WrappedSerializer::new() + SecureShareSerializer::new() .serialize(&op, &mut ser_op) .unwrap(); - let (_, res_op): (&[u8], WrappedOperation) = - WrappedDeserializer::new(OperationDeserializer::new( + let (_, res_op): (&[u8], SecureShareOperation) = + SecureShareDeserializer::new(OperationDeserializer::new( MAX_DATASTORE_VALUE_LENGTH, MAX_FUNCTION_NAME_LENGTH, MAX_PARAMETERS_SIZE, @@ -1405,14 +1408,14 @@ mod tests { assert_eq!(res_content, content); let op_serializer = OperationSerializer::new(); - let op = Operation::new_wrapped(content, op_serializer, &sender_keypair).unwrap(); + let op = Operation::new_verifiable(content, op_serializer, &sender_keypair).unwrap(); let mut ser_op = Vec::new(); - WrappedSerializer::new() + SecureShareSerializer::new() .serialize(&op, &mut ser_op) .unwrap(); - let (_, res_op): (&[u8], WrappedOperation) = - WrappedDeserializer::new(OperationDeserializer::new( + let (_, res_op): (&[u8], SecureShareOperation) = + SecureShareDeserializer::new(OperationDeserializer::new( MAX_DATASTORE_VALUE_LENGTH, MAX_FUNCTION_NAME_LENGTH, MAX_PARAMETERS_SIZE, @@ -1481,14 +1484,14 @@ mod tests { assert_eq!(res_content, content); let op_serializer = OperationSerializer::new(); - let op = Operation::new_wrapped(content, op_serializer, &sender_keypair).unwrap(); + let op = Operation::new_verifiable(content, op_serializer, &sender_keypair).unwrap(); let mut ser_op = Vec::new(); - WrappedSerializer::new() + SecureShareSerializer::new() .serialize(&op, &mut ser_op) .unwrap(); - let (_, res_op): (&[u8], WrappedOperation) = - WrappedDeserializer::new(OperationDeserializer::new( + let (_, res_op): (&[u8], SecureShareOperation) = + SecureShareDeserializer::new(OperationDeserializer::new( MAX_DATASTORE_VALUE_LENGTH, MAX_FUNCTION_NAME_LENGTH, MAX_PARAMETERS_SIZE, diff --git a/massa-models/src/output_event.rs b/massa-models/src/output_event.rs index 3a2dc851ee0..924b3927bdb 100644 --- a/massa-models/src/output_event.rs +++ b/massa-models/src/output_event.rs @@ -1,4 +1,4 @@ -use crate::{address::Address, block::BlockId, operation::OperationId, slot::Slot}; +use crate::{address::Address, block_id::BlockId, operation::OperationId, slot::Slot}; use serde::{Deserialize, Serialize}; use std::{collections::VecDeque, fmt::Display}; diff --git a/massa-models/src/wrapped.rs b/massa-models/src/secure_share.rs similarity index 60% rename from massa-models/src/wrapped.rs rename to massa-models/src/secure_share.rs index f5980aadc5d..40ca8475c37 100644 --- a/massa-models/src/wrapped.rs +++ b/massa-models/src/secure_share.rs @@ -14,26 +14,33 @@ use nom::{ }; use serde::{Deserialize, Serialize}; -/// Wrapped structure T where U is the associated id +/// Packages type T such that it can be securely sent and received in a trust-free network +/// +/// If the internal content is mutated, then it must be re-wrapped, as the assosciated +/// signature, serialized data, etc. would no longer be in sync #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] -pub struct Wrapped +pub struct SecureShare where - T: Display + WrappedContent, - U: Id, + T: Display + SecureShareContent, + ID: Id, { - /// content + /// Reference contents. Not required for the the security protocols. + /// + /// Use the Lightweight equivilant structures when you need verifiable + /// serialized data, but do not need to read the values directly (such as when sending) pub content: T, - /// signature - pub signature: Signature, - /// the content creator public key - pub creator_public_key: PublicKey, - /// the content creator address - pub creator_address: Address, - /// Id - pub id: U, #[serde(skip)] - /// Content serialized + /// Content in sharable, deserializable form. Is used in the secure verification protocols. pub serialized_data: Vec, + + /// A cryptographically generated value using `serialized_data` and a public key. + pub signature: Signature, + /// The public-key component used in the generation of the signature + pub content_creator_pub_key: PublicKey, + /// Derived from the same public key used to generate the signature + pub content_creator_address: Address, + /// A secure hash of the data. See also [massa_hash::Hash] + pub id: ID, } /// Used by signed structure @@ -44,17 +51,18 @@ pub trait Id { fn get_hash(&self) -> &Hash; } -/// Trait that define a structure that can be wrapped. -pub trait WrappedContent +/// Trait that define a structure that can be signed for secure sharing. +pub trait SecureShareContent where Self: Sized + Display, { - /// Creates a wrapped version of the object - fn new_wrapped, U: Id>( + /// Using the provided key-pair, applies a cryptographic signature, and packages + /// the data required to share and verify the data in a trust-free network of peers. + fn new_verifiable, ID: Id>( content: Self, - content_serializer: SC, + content_serializer: Ser, keypair: &KeyPair, - ) -> Result, ModelsError> { + ) -> Result, ModelsError> { let mut content_serialized = Vec::new(); content_serializer.serialize(&content, &mut content_serialized)?; let mut hash_data = Vec::new(); @@ -63,17 +71,17 @@ where hash_data.extend(content_serialized.clone()); let hash = Hash::compute_from(&hash_data); let creator_address = Address::from_public_key(&public_key); - Ok(Wrapped { + Ok(SecureShare { signature: keypair.sign(&hash)?, - creator_public_key: public_key, - creator_address, + content_creator_pub_key: public_key, + content_creator_address: creator_address, content, serialized_data: content_serialized, - id: U::new(hash), + id: ID::new(hash), }) } - /// Serialize the wrapped structure + /// Serialize the secured structure fn serialize( signature: &Signature, creator_public_key: &PublicKey, @@ -86,21 +94,21 @@ where Ok(()) } - /// Deserialize the wrapped structure + /// Deserialize the secured structure fn deserialize< 'a, E: ParseError<&'a [u8]> + ContextError<&'a [u8]>, - DC: Deserializer, - U: Id, + Deser: Deserializer, + ID: Id, >( content_serializer: Option<&dyn Serializer>, signature_deserializer: &SignatureDeserializer, creator_public_key_deserializer: &PublicKeyDeserializer, - content_deserializer: &DC, + content_deserializer: &Deser, buffer: &'a [u8], - ) -> IResult<&'a [u8], Wrapped, E> { + ) -> IResult<&'a [u8], SecureShare, E> { let (serialized_data, (signature, creator_public_key)) = context( - "Failed wrapped deserialization", + "Failed SecureShare deserialization", tuple(( context("Failed signature deserialization", |input| { signature_deserializer.deserialize(input) @@ -131,42 +139,42 @@ where serialized_full_data.extend(&content_serialized); Ok(( rest, - Wrapped { + SecureShare { content, signature, - creator_public_key, - creator_address, + content_creator_pub_key: creator_public_key, + content_creator_address: creator_address, serialized_data: content_serialized.to_vec(), - id: U::new(Hash::compute_from(&serialized_full_data)), + id: ID::new(Hash::compute_from(&serialized_full_data)), }, )) } } -impl Display for Wrapped +impl Display for SecureShare where - T: Display + WrappedContent, - U: Id, + T: Display + SecureShareContent, + ID: Id, { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { writeln!(f, "Signature: {}", self.signature)?; - writeln!(f, "Creator pubkey: {}", self.creator_public_key)?; - writeln!(f, "Creator address: {}", self.creator_address)?; + writeln!(f, "Creator pubkey: {}", self.content_creator_pub_key)?; + writeln!(f, "Creator address: {}", self.content_creator_address)?; writeln!(f, "Id: {}", self.id.get_hash())?; writeln!(f, "{}", self.content)?; Ok(()) } } -impl Wrapped +impl SecureShare where - T: Display + WrappedContent, - U: Id, + T: Display + SecureShareContent, + ID: Id, { /// check if self has been signed by public key pub fn verify_signature(&self) -> Result<(), ModelsError> { Ok(self - .creator_public_key + .content_creator_pub_key .verify_signature(self.id.get_hash(), &self.signature)?) } @@ -180,83 +188,87 @@ where } // NOTE FOR EXPLICATION: No content serializer because serialized data is already here. -/// Serializer for `Wrapped` structure +/// Serializer for `SecureShare` structure #[derive(Default)] -pub struct WrappedSerializer; +pub struct SecureShareSerializer; -impl WrappedSerializer { - /// Creates a new `WrappedSerializer` +impl SecureShareSerializer { + /// Creates a new `SecureShareSerializer` pub const fn new() -> Self { Self } - /// This method is used to serialize a `Wrapped` structure and use a custom serializer instead of + /// This method is used to serialize a `SecureShare` structure and use a custom serializer instead of /// using the serialized form of the content stored in `serialized_data`. /// This is useful when the content need to be serialized in a lighter form in specific cases. /// /// # Arguments: - /// * `serializer_content`: Custom serializer to be used instead of the data in `serialized_data` - /// * `value`: Wrapped structure to be serialized + /// * `content_serializer`: Custom serializer to be used instead of the data in `serialized_data` + /// * `value`: SecureShare structure to be serialized /// * `buffer`: buffer of serialized data to be extend - pub fn serialize_with( + pub fn serialize_with( &self, - serializer_content: &SC, - value: &Wrapped, + content_serializer: &Ser, + value: &SecureShare, buffer: &mut Vec, ) -> Result<(), SerializeError> where - SC: Serializer, - T: Display + WrappedContent, - U: Id, + Ser: Serializer, + T: Display + SecureShareContent, + ID: Id, { let mut content_buffer = Vec::new(); - serializer_content.serialize(&value.content, &mut content_buffer)?; + content_serializer.serialize(&value.content, &mut content_buffer)?; T::serialize( &value.signature, - &value.creator_public_key, + &value.content_creator_pub_key, &content_buffer, buffer, ) } } -impl Serializer> for WrappedSerializer +impl Serializer> for SecureShareSerializer where - T: Display + WrappedContent, - U: Id, + T: Display + SecureShareContent, + ID: Id, { - fn serialize(&self, value: &Wrapped, buffer: &mut Vec) -> Result<(), SerializeError> { + fn serialize( + &self, + value: &SecureShare, + buffer: &mut Vec, + ) -> Result<(), SerializeError> { T::serialize( &value.signature, - &value.creator_public_key, + &value.content_creator_pub_key, &value.serialized_data, buffer, ) } } -/// Deserializer for Wrapped structure -pub struct WrappedDeserializer +/// Deserializer for SecureShare structure +pub struct SecureShareDeserializer where - T: Display + WrappedContent, - DT: Deserializer, + T: Display + SecureShareContent, + Deser: Deserializer, { signature_deserializer: SignatureDeserializer, public_key_deserializer: PublicKeyDeserializer, - content_deserializer: DT, + content_deserializer: Deser, marker_t: std::marker::PhantomData, } -impl WrappedDeserializer +impl SecureShareDeserializer where - T: Display + WrappedContent, - DT: Deserializer, + T: Display + SecureShareContent, + Deser: Deserializer, { - /// Creates a new `WrappedDeserializer` + /// Creates a new `SecureShareDeserializer` /// /// # Arguments /// * `content_deserializer` - Deserializer for the content - pub const fn new(content_deserializer: DT) -> Self { + pub const fn new(content_deserializer: Deser) -> Self { Self { signature_deserializer: SignatureDeserializer::new(), public_key_deserializer: PublicKeyDeserializer::new(), @@ -276,17 +288,17 @@ where /// * `buffer`: buffer of serialized data to be deserialized /// /// # Returns: - /// A rest and the wrapped structure with coherent fields. + /// A rest (data left over from deserialization), an instance of `T`, and the data enabling signature verification pub fn deserialize_with< 'a, E: ParseError<&'a [u8]> + ContextError<&'a [u8]>, - U: Id, - ST: Serializer, + ID: Id, + Ser: Serializer, >( &self, - content_serializer: &ST, + content_serializer: &Ser, buffer: &'a [u8], - ) -> IResult<&'a [u8], Wrapped, E> { + ) -> IResult<&'a [u8], SecureShare, E> { T::deserialize( Some(content_serializer), &self.signature_deserializer, @@ -297,14 +309,15 @@ where } } -impl Deserializer> for WrappedDeserializer +impl Deserializer> for SecureShareDeserializer where - T: Display + WrappedContent, - U: Id, - DT: Deserializer, + T: Display + SecureShareContent, + ID: Id, + Deser: Deserializer, { /// ``` - /// # use massa_models::{block::BlockId, endorsement::{Endorsement, EndorsementSerializer, EndorsementDeserializer}, slot::Slot, wrapped::{Wrapped, WrappedSerializer, WrappedDeserializer, WrappedContent}}; + /// # use massa_models::{endorsement::{Endorsement, EndorsementSerializer, EndorsementDeserializer}, slot::Slot, secure_share::{SecureShare, SecureShareSerializer, SecureShareDeserializer, SecureShareContent}}; + /// use massa_models::block_id::BlockId; /// # use massa_serialization::{Deserializer, Serializer, DeserializeError, U16VarIntSerializer, U16VarIntDeserializer}; /// # use massa_signature::KeyPair; /// # use std::ops::Bound::Included; @@ -316,22 +329,22 @@ where /// endorsed_block: BlockId(Hash::compute_from("blk".as_bytes())), /// }; /// let keypair = KeyPair::generate(); - /// let wrapped: Wrapped = Endorsement::new_wrapped( + /// let secured: SecureShare = Endorsement::new_verifiable( /// content, /// EndorsementSerializer::new(), /// &keypair /// ).unwrap(); /// let mut serialized_data = Vec::new(); - /// let serialized = WrappedSerializer::new().serialize(&wrapped, &mut serialized_data).unwrap(); - /// let deserializer = WrappedDeserializer::new(EndorsementDeserializer::new(32, 1)); - /// let (rest, deserialized): (&[u8], Wrapped) = deserializer.deserialize::(&serialized_data).unwrap(); + /// let serialized = SecureShareSerializer::new().serialize(&secured, &mut serialized_data).unwrap(); + /// let deserializer = SecureShareDeserializer::new(EndorsementDeserializer::new(32, 1)); + /// let (rest, deserialized): (&[u8], SecureShare) = deserializer.deserialize::(&serialized_data).unwrap(); /// assert!(rest.is_empty()); - /// assert_eq!(wrapped.id, deserialized.id); + /// assert_eq!(secured.id, deserialized.id); /// ``` fn deserialize<'a, E: ParseError<&'a [u8]> + ContextError<&'a [u8]>>( &self, buffer: &'a [u8], - ) -> IResult<&'a [u8], Wrapped, E> { + ) -> IResult<&'a [u8], SecureShare, E> { T::deserialize( None, &self.signature_deserializer, diff --git a/massa-models/src/slot.rs b/massa-models/src/slot.rs index 86b8c7c177f..b4bd7b11784 100644 --- a/massa-models/src/slot.rs +++ b/massa-models/src/slot.rs @@ -329,3 +329,18 @@ impl Slot { .saturating_sub(s.thread as u64)) } } + +/// When an address is drawn to create an endorsement it is selected for a specific index +#[derive(Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] +pub struct IndexedSlot { + /// slot + pub slot: Slot, + /// endorsement index in the slot + pub index: usize, +} + +impl std::fmt::Display for IndexedSlot { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + writeln!(f, "Slot: {}, Index: {}", self.slot, self.index) + } +} diff --git a/massa-models/src/timeslots.rs b/massa-models/src/timeslots.rs index 0a9883f1c06..c898de3b037 100644 --- a/massa-models/src/timeslots.rs +++ b/massa-models/src/timeslots.rs @@ -39,17 +39,17 @@ pub fn get_block_slot_timestamp( ) -> Result { let base: MassaTime = t0 .checked_div_u64(thread_count as u64) - .or(Err(ModelsError::TimeOverflowError))? + .map_err(|_| ModelsError::TimeOverflowError)? .checked_mul(slot.thread as u64) - .or(Err(ModelsError::TimeOverflowError))?; + .map_err(|_| ModelsError::TimeOverflowError)?; let shift: MassaTime = t0 .checked_mul(slot.period) - .or(Err(ModelsError::TimeOverflowError))?; + .map_err(|_| ModelsError::TimeOverflowError)?; genesis_timestamp .checked_add(base) - .or(Err(ModelsError::TimeOverflowError))? + .map_err(|_| ModelsError::TimeOverflowError)? .checked_add(shift) - .or(Err(ModelsError::TimeOverflowError)) + .map_err(|_| ModelsError::TimeOverflowError) } /// Returns the thread and block period index of the latest block slot at a given timestamp (inclusive), if any happened diff --git a/massa-network-exports/Cargo.toml b/massa-network-exports/Cargo.toml index 9009a3a1eb1..5b597f24253 100644 --- a/massa-network-exports/Cargo.toml +++ b/massa-network-exports/Cargo.toml @@ -10,7 +10,7 @@ displaydoc = "0.2" nom = "7.1" serde = { version = "1.0", features = ["derive"] } thiserror = "1.0" -tokio = { version = "1.21", features = ["full"] } +tokio = { version = "1.23", features = ["full"] } enum-map = { version = "2.4", features = ["serde"] } # custom modules massa_hash = { path = "../massa-hash" } diff --git a/massa-network-exports/src/commands.rs b/massa-network-exports/src/commands.rs index d2547f8b288..7187fb4aad6 100644 --- a/massa-network-exports/src/commands.rs +++ b/massa-network-exports/src/commands.rs @@ -71,11 +71,12 @@ use crate::{BootstrapPeers, ConnectionClosureReason, Peers}; use massa_models::{ - block::{BlockId, WrappedHeader}, + block_header::SecuredHeader, + block_id::BlockId, composite::PubkeySig, - endorsement::WrappedEndorsement, + endorsement::SecureShareEndorsement, node::NodeId, - operation::{OperationId, OperationPrefixIds, WrappedOperation}, + operation::{OperationId, OperationPrefixIds, SecureShareOperation}, stats::NetworkStats, }; use serde::{Deserialize, Serialize}; @@ -89,7 +90,7 @@ pub enum NodeCommand { /// Send given peer list to node. SendPeerList(Vec), /// Send the header of a block to a node. - SendBlockHeader(WrappedHeader), + SendBlockHeader(SecuredHeader), /// Ask for info on a list of blocks. AskForBlocks(Vec<(BlockId, AskForBlocksInfo)>), /// Reply with info on a list of blocks. @@ -97,13 +98,13 @@ pub enum NodeCommand { /// Close the node worker. Close(ConnectionClosureReason), /// Send full Operations (send to a node that previously asked for) - SendOperations(Vec), + SendOperations(Vec), /// Send a batch of operation ids SendOperationAnnouncements(OperationPrefixIds), /// Ask for a set of operations AskForOperations(OperationPrefixIds), /// Endorsements - SendEndorsements(Vec), + SendEndorsements(Vec), /// Ask peer list AskPeerList, } @@ -119,19 +120,19 @@ pub enum NodeEventType { /// Node we are connected to sent peer list ReceivedPeerList(Vec), /// Node we are connected to sent block header - ReceivedBlockHeader(WrappedHeader), + ReceivedBlockHeader(SecuredHeader), /// Node we are connected asked for info on a list of blocks. ReceivedAskForBlocks(Vec<(BlockId, AskForBlocksInfo)>), /// Node we are connected sent info on a list of blocks. ReceivedReplyForBlocks(Vec<(BlockId, BlockInfoReply)>), /// Received full operations. - ReceivedOperations(Vec), + ReceivedOperations(Vec), /// Received an operation id batch announcing new operations ReceivedOperationAnnouncements(OperationPrefixIds), /// Receive a list of wanted operations ReceivedAskForOperations(OperationPrefixIds), /// Receive a set of endorsement - ReceivedEndorsements(Vec), + ReceivedEndorsements(Vec), } /// Events node worker can emit. @@ -172,7 +173,7 @@ pub enum NetworkCommand { /// to node id node: NodeId, /// block id - header: WrappedHeader, + header: SecuredHeader, }, /// `(PeerInfo, Vec <(NodeId, bool)>) peer info + list` of associated Id nodes in connection out (true) GetPeers(oneshot::Sender), @@ -191,7 +192,7 @@ pub enum NetworkCommand { /// to node id node: NodeId, /// endorsements - endorsements: Vec, + endorsements: Vec, }, /// sign message with our node keypair (associated to node id) /// != staking key @@ -211,7 +212,7 @@ pub enum NetworkCommand { /// to node id node: NodeId, /// operations - operations: Vec, + operations: Vec, }, /// Send operation ids batch to a node SendOperationAnnouncements { @@ -238,11 +239,11 @@ pub enum NetworkCommand { #[allow(clippy::large_enum_variant)] pub enum BlockInfoReply { /// Header - Header(WrappedHeader), + Header(SecuredHeader), /// The info about the block is required(list of operations ids). Info(Vec), /// The actual operations required. - Operations(Vec), + Operations(Vec), /// Block not found NotFound, } @@ -267,7 +268,7 @@ pub enum NetworkEvent { /// from node id source_node_id: NodeId, /// header - header: WrappedHeader, + header: SecuredHeader, }, /// Someone ask for block with given header hash. AskedForBlocks { @@ -281,7 +282,7 @@ pub enum NetworkEvent { /// node id node: NodeId, /// operations - operations: Vec, + operations: Vec, }, /// Receive a list of `OperationId` ReceivedOperationAnnouncements { @@ -302,7 +303,7 @@ pub enum NetworkEvent { /// node id node: NodeId, /// Endorsements - endorsements: Vec, + endorsements: Vec, }, } diff --git a/massa-network-exports/src/network_controller.rs b/massa-network-exports/src/network_controller.rs index d8dca55d79a..188018df383 100644 --- a/massa-network-exports/src/network_controller.rs +++ b/massa-network-exports/src/network_controller.rs @@ -6,11 +6,12 @@ use crate::{ BlockInfoReply, BootstrapPeers, NetworkCommand, NetworkEvent, Peers, }; use massa_models::{ - block::{BlockId, WrappedHeader}, + block_header::SecuredHeader, + block_id::BlockId, composite::PubkeySig, - endorsement::WrappedEndorsement, + endorsement::SecureShareEndorsement, node::NodeId, - operation::{OperationPrefixIds, WrappedOperation}, + operation::{OperationPrefixIds, SecureShareOperation}, stats::NetworkStats, }; use std::{ @@ -123,7 +124,7 @@ impl NetworkCommandSender { pub async fn send_block_header( &self, node: NodeId, - header: WrappedHeader, + header: SecuredHeader, ) -> Result<(), NetworkError> { self.0 .send(NetworkCommand::SendBlockHeader { node, header }) @@ -178,7 +179,7 @@ impl NetworkCommandSender { pub async fn send_operations( &self, node: NodeId, - operations: Vec, + operations: Vec, ) -> Result<(), NetworkError> { self.0 .send(NetworkCommand::SendOperations { node, operations }) @@ -241,7 +242,7 @@ impl NetworkCommandSender { pub async fn send_endorsements( &self, node: NodeId, - endorsements: Vec, + endorsements: Vec, ) -> Result<(), NetworkError> { self.0 .send(NetworkCommand::SendEndorsements { node, endorsements }) diff --git a/massa-network-worker/Cargo.toml b/massa-network-worker/Cargo.toml index 617c744a724..836bf749293 100644 --- a/massa-network-worker/Cargo.toml +++ b/massa-network-worker/Cargo.toml @@ -15,7 +15,7 @@ nom = "7.1" rand = "0.8" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" -tokio = { version = "1.21", features = ["full"] } +tokio = { version = "1.23", features = ["full"] } tracing = "0.1" # custom modules massa_hash = { path = "../massa-hash" } diff --git a/massa-network-worker/src/messages.rs b/massa-network-worker/src/messages.rs index cc5ab4855a4..31fef1ddcfc 100644 --- a/massa-network-worker/src/messages.rs +++ b/massa-network-worker/src/messages.rs @@ -2,18 +2,19 @@ use massa_hash::HashDeserializer; use massa_models::{ - block::{BlockHeader, BlockHeaderDeserializer, BlockId, WrappedHeader}, + block_header::{BlockHeader, BlockHeaderDeserializer, SecuredHeader}, + block_id::BlockId, config::HANDSHAKE_RANDOMNESS_SIZE_BYTES, - endorsement::{Endorsement, EndorsementDeserializer, WrappedEndorsement}, + endorsement::{Endorsement, EndorsementDeserializer, SecureShareEndorsement}, operation::{ OperationIdsDeserializer, OperationIdsSerializer, OperationPrefixIds, OperationPrefixIdsDeserializer, OperationPrefixIdsSerializer, OperationsDeserializer, - OperationsSerializer, WrappedOperation, + OperationsSerializer, SecureShareOperation, }, + secure_share::{SecureShareDeserializer, SecureShareSerializer}, serialization::array_from_slice, serialization::{IpAddrDeserializer, IpAddrSerializer}, version::{Version, VersionDeserializer, VersionSerializer}, - wrapped::{WrappedDeserializer, WrappedSerializer}, }; use massa_network_exports::{AskForBlocksInfo, BlockInfoReply}; use massa_serialization::{ @@ -52,7 +53,7 @@ pub enum Message { signature: Signature, }, /// Block header - BlockHeader(WrappedHeader), + BlockHeader(SecuredHeader), /// Message asking the peer for info on a list of blocks. AskForBlocks(Vec<(BlockId, AskForBlocksInfo)>), /// Message replying with info on a list of blocks. @@ -69,9 +70,9 @@ pub enum Message { /// Someone ask for operations. AskForOperations(OperationPrefixIds), /// A list of operations - Operations(Vec), + Operations(Vec), /// Endorsements - Endorsements(Vec), + Endorsements(Vec), } #[derive(IntoPrimitive, Debug, Eq, PartialEq, TryFromPrimitive)] @@ -103,7 +104,7 @@ pub(crate) enum BlockInfoType { pub struct MessageSerializer { version_serializer: VersionSerializer, u32_serializer: U32VarIntSerializer, - wrapped_serializer: WrappedSerializer, + secure_serializer: SecureShareSerializer, operation_prefix_ids_serializer: OperationPrefixIdsSerializer, operations_ids_serializer: OperationIdsSerializer, operations_serializer: OperationsSerializer, @@ -116,7 +117,7 @@ impl MessageSerializer { MessageSerializer { version_serializer: VersionSerializer::new(), u32_serializer: U32VarIntSerializer::new(), - wrapped_serializer: WrappedSerializer::new(), + secure_serializer: SecureShareSerializer::new(), operation_prefix_ids_serializer: OperationPrefixIdsSerializer::new(), operations_ids_serializer: OperationIdsSerializer::new(), operations_serializer: OperationsSerializer::new(), @@ -155,7 +156,7 @@ impl Serializer for MessageSerializer { Message::BlockHeader(header) => { self.u32_serializer .serialize(&(MessageTypeId::BlockHeader as u32), buffer)?; - self.wrapped_serializer.serialize(header, buffer)?; + self.secure_serializer.serialize(header, buffer)?; } Message::AskForBlocks(list) => { self.u32_serializer @@ -192,7 +193,7 @@ impl Serializer for MessageSerializer { self.u32_serializer .serialize(&u32::from(info_type), buffer)?; if let BlockInfoReply::Header(header) = info { - self.wrapped_serializer.serialize(header, buffer)?; + self.secure_serializer.serialize(header, buffer)?; } if let BlockInfoReply::Operations(ops) = info { self.operations_serializer.serialize(ops, buffer)?; @@ -238,7 +239,7 @@ impl Serializer for MessageSerializer { self.u32_serializer .serialize(&(endorsements.len() as u32), buffer)?; for endorsement in endorsements { - self.wrapped_serializer.serialize(endorsement, buffer)?; + self.secure_serializer.serialize(endorsement, buffer)?; } } } @@ -256,9 +257,9 @@ pub struct MessageDeserializer { peer_list_length_deserializer: U32VarIntDeserializer, operations_deserializer: OperationsDeserializer, hash_deserializer: HashDeserializer, - block_header_deserializer: WrappedDeserializer, + block_header_deserializer: SecureShareDeserializer, endorsements_length_deserializer: U32VarIntDeserializer, - endorsement_deserializer: WrappedDeserializer, + endorsement_deserializer: SecureShareDeserializer, operation_prefix_ids_deserializer: OperationPrefixIdsDeserializer, infos_deserializer: OperationIdsDeserializer, ip_addr_deserializer: IpAddrDeserializer, @@ -305,7 +306,7 @@ impl MessageDeserializer { max_op_datastore_value_length, ), hash_deserializer: HashDeserializer::new(), - block_header_deserializer: WrappedDeserializer::new(BlockHeaderDeserializer::new( + block_header_deserializer: SecureShareDeserializer::new(BlockHeaderDeserializer::new( thread_count, endorsement_count, )), @@ -313,7 +314,7 @@ impl MessageDeserializer { Included(0), Excluded(max_endorsements_per_message), ), - endorsement_deserializer: WrappedDeserializer::new(EndorsementDeserializer::new( + endorsement_deserializer: SecureShareDeserializer::new(EndorsementDeserializer::new( thread_count, endorsement_count, )), diff --git a/massa-network-worker/src/network_cmd_impl.rs b/massa-network-worker/src/network_cmd_impl.rs index 35d6fc05c5f..2c937f0a4ad 100644 --- a/massa-network-worker/src/network_cmd_impl.rs +++ b/massa-network-worker/src/network_cmd_impl.rs @@ -24,11 +24,12 @@ use futures::{stream::FuturesUnordered, StreamExt}; use massa_hash::Hash; use massa_logging::massa_trace; use massa_models::{ - block::{BlockId, WrappedHeader}, + block_header::SecuredHeader, + block_id::BlockId, composite::PubkeySig, - endorsement::WrappedEndorsement, + endorsement::SecureShareEndorsement, node::NodeId, - operation::{OperationPrefixIds, WrappedOperation}, + operation::{OperationPrefixIds, SecureShareOperation}, stats::NetworkStats, }; use massa_network_exports::{ @@ -175,7 +176,7 @@ pub async fn on_node_ban_by_ids_cmd( pub async fn on_send_block_header_cmd( worker: &mut NetworkWorker, node: NodeId, - header: WrappedHeader, + header: SecuredHeader, ) -> Result<(), NetworkError> { massa_trace!("network_worker.manage_network_command send NodeCommand::SendBlockHeader", {"block_id": header.id, "node": node}); worker @@ -254,7 +255,7 @@ pub async fn on_get_bootstrap_peers_cmd( pub async fn on_send_endorsements_cmd( worker: &mut NetworkWorker, node: NodeId, - endorsements: Vec, + endorsements: Vec, ) { massa_trace!( "network_worker.manage_network_command receive NetworkCommand::SendEndorsements", @@ -355,7 +356,7 @@ pub async fn on_get_stats_cmd( pub async fn on_send_operations_cmd( worker: &mut NetworkWorker, to_node: NodeId, - operations: Vec, + operations: Vec, ) { massa_trace!( "network_worker.manage_network_command receive NetworkCommand::SendOperations", diff --git a/massa-network-worker/src/network_event.rs b/massa-network-worker/src/network_event.rs index a939301a1dc..61e95d9dcff 100644 --- a/massa-network-worker/src/network_event.rs +++ b/massa-network-worker/src/network_event.rs @@ -80,11 +80,12 @@ pub mod event_impl { use crate::network_worker::NetworkWorker; use massa_logging::massa_trace; use massa_models::{ - block::{BlockId, WrappedHeader}, - endorsement::WrappedEndorsement, + block_header::SecuredHeader, + block_id::BlockId, + endorsement::SecureShareEndorsement, node::NodeId, - operation::{OperationPrefixIds, WrappedOperation}, - wrapped::Id, + operation::{OperationPrefixIds, SecureShareOperation}, + secure_share::Id, }; use massa_network_exports::{AskForBlocksInfo, BlockInfoReply, NodeCommand}; use massa_network_exports::{NetworkError, NetworkEvent}; @@ -128,7 +129,7 @@ pub mod event_impl { pub async fn on_received_block_header( worker: &mut NetworkWorker, from: NodeId, - header: WrappedHeader, + header: SecuredHeader, ) -> Result<(), NetworkError> { massa_trace!( "network_worker.on_node_event receive NetworkEvent::ReceivedBlockHeader", @@ -195,7 +196,7 @@ pub mod event_impl { pub async fn on_received_operations( worker: &mut NetworkWorker, from: NodeId, - operations: Vec, + operations: Vec, ) { massa_trace!( "network_worker.on_node_event receive NetworkEvent::ReceivedOperations", @@ -262,7 +263,7 @@ pub mod event_impl { pub async fn on_received_endorsements( worker: &mut NetworkWorker, from: NodeId, - endorsements: Vec, + endorsements: Vec, ) { massa_trace!( "network_worker.on_node_event receive NetworkEvent::ReceivedEndorsements", diff --git a/massa-network-worker/src/node_worker.rs b/massa-network-worker/src/node_worker.rs index a93218c4ef7..1174a66471b 100644 --- a/massa-network-worker/src/node_worker.rs +++ b/massa-network-worker/src/node_worker.rs @@ -6,7 +6,7 @@ use super::{ }; use itertools::Itertools; use massa_logging::massa_trace; -use massa_models::{node::NodeId, wrapped::Id}; +use massa_models::{node::NodeId, secure_share::Id}; use massa_network_exports::{ ConnectionClosureReason, NetworkConfig, NetworkError, NodeCommand, NodeEvent, NodeEventType, }; diff --git a/massa-network-worker/src/tests/scenarios.rs b/massa-network-worker/src/tests/scenarios.rs index d0155abab7e..bf9f03b37e7 100644 --- a/massa-network-worker/src/tests/scenarios.rs +++ b/massa-network-worker/src/tests/scenarios.rs @@ -22,11 +22,11 @@ use massa_models::config::{ MAX_OPERATION_DATASTORE_VALUE_LENGTH, MAX_PARAMETERS_SIZE, THREAD_COUNT, }; use massa_models::{ - block::BlockId, + block_id::BlockId, endorsement::{Endorsement, EndorsementSerializer}, node::NodeId, + secure_share::SecureShareContent, slot::Slot, - wrapped::WrappedContent, }; use massa_network_exports::{settings::PeerTypeConnectionConfig, NodeCommand, NodeEvent}; use massa_network_exports::{ @@ -1197,7 +1197,7 @@ async fn test_endorsements_messages() { index: 0, endorsed_block: BlockId(Hash::compute_from(&[])), }; - let endorsement = Endorsement::new_wrapped( + let endorsement = Endorsement::new_verifiable( content.clone(), EndorsementSerializer::new(), &sender_keypair, @@ -1233,7 +1233,7 @@ async fn test_endorsements_messages() { index: 0, endorsed_block: BlockId(Hash::compute_from(&[])), }; - let endorsement = Endorsement::new_wrapped( + let endorsement = Endorsement::new_verifiable( content.clone(), EndorsementSerializer::new(), &sender_keypair, diff --git a/massa-network-worker/src/tests/tools.rs b/massa-network-worker/src/tests/tools.rs index 321e9bd4f97..b8537922cca 100644 --- a/massa-network-worker/src/tests/tools.rs +++ b/massa-network-worker/src/tests/tools.rs @@ -11,12 +11,12 @@ use crate::NetworkEvent; use massa_hash::Hash; use massa_models::node::NodeId; -use massa_models::wrapped::WrappedContent; +use massa_models::secure_share::SecureShareContent; use massa_models::{ address::Address, amount::Amount, - block::BlockId, - operation::{Operation, OperationSerializer, OperationType, WrappedOperation}, + block_id::BlockId, + operation::{Operation, OperationSerializer, OperationType, SecureShareOperation}, version::Version, }; use massa_network_exports::test_exports::mock_establisher::{self, MockEstablisherInterface}; @@ -324,7 +324,7 @@ pub async fn incoming_message_drain_stop( join_handle.await.expect("could not join message drain") } -pub fn get_transaction(expire_period: u64, fee: u64) -> WrappedOperation { +pub fn get_transaction(expire_period: u64, fee: u64) -> SecureShareOperation { let sender_keypair = KeyPair::generate(); let recv_keypair = KeyPair::generate(); @@ -339,7 +339,7 @@ pub fn get_transaction(expire_period: u64, fee: u64) -> WrappedOperation { expire_period, }; - Operation::new_wrapped(content, OperationSerializer::new(), &sender_keypair).unwrap() + Operation::new_verifiable(content, OperationSerializer::new(), &sender_keypair).unwrap() } /// Runs a consensus test, passing a mock pool controller to it. diff --git a/massa-node/Cargo.toml b/massa-node/Cargo.toml index 06f063d07bf..a4466bab81f 100644 --- a/massa-node/Cargo.toml +++ b/massa-node/Cargo.toml @@ -14,7 +14,7 @@ lazy_static = "1.4" parking_lot = { version = "0.12", features = ["deadlock_detection"] } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" -tokio = { version = "1.21", features = ["full"] } +tokio = { version = "1.23", features = ["full"] } tracing = { version = "0.1", features = [ "max_level_debug", "release_max_level_debug", @@ -24,6 +24,7 @@ paw = "1.0" structopt = { version = "0.3", features = ["paw"] } dialoguer = "0.10" # custom modules +massa_api_exports = { path = "../massa-api-exports" } massa_api = { path = "../massa-api" } massa_async_pool = { path = "../massa-async-pool" } massa_bootstrap = { path = "../massa-bootstrap" } @@ -32,7 +33,6 @@ massa_consensus_worker = { path = "../massa-consensus-worker" } massa_executed_ops = { path = "../massa-executed-ops" } massa_execution_exports = { path = "../massa-execution-exports" } massa_execution_worker = { path = "../massa-execution-worker" } -massa_signature = { path = "../massa-signature" } massa_logging = { path = "../massa-logging" } massa_final_state = { path = "../massa-final-state" } massa_ledger_exports = { path = "../massa-ledger-exports" } diff --git a/massa-node/base_config/config.toml b/massa-node/base_config/config.toml index a0225ed2ce6..e6ad4f77846 100644 --- a/massa-node/base_config/config.toml +++ b/massa-node/base_config/config.toml @@ -7,11 +7,11 @@ [api] # max number of future periods considered during requests draw_lookahead_period_count = 10 - # port on which the node API listens for admin and node management requests. Dangerous if publicly exposed + # port on which the node API listens for admin and node management requests. Dangerous if publicly exposed. Bind to "[::1]:port" if you want to access the node from IPv6. bind_private = "127.0.0.1:33034" - # port on which the node API listens for public requests. Can be exposed to the Internet + # port on which the node API listens for public requests. Can be exposed to the Internet. Bind to "[::]:port" if you want to access the node from IPv6. bind_public = "0.0.0.0:33035" - # port on which the node API(V2) listens for HTTP requests and WebSockets subscriptions. Can be exposed to the Internet + # port on which the node API(V2) listens for HTTP requests and WebSockets subscriptions. Can be exposed to the Internet. Bind to "[::]:port" if you want to access the node from IPv6. bind_api = "0.0.0.0:33036" # max number of arguments per RPC call max_arguments = 128 @@ -54,6 +54,8 @@ abi_gas_costs_file = "base_config/gas_costs/abi_gas_costs.json" # gas cost for wasm operator wasm_gas_costs_file = "base_config/gas_costs/wasm_gas_costs.json" + # max number of compiled modules in the cache + max_module_cache_size = 1000 [ledger] # path to the initial ledger @@ -132,8 +134,6 @@ max_operations_propagation_time = 32000 # time threshold after which endorsement are not propagated max_endorsements_propagation_time = 48000 - # operations sender(channel) capacity - broadcast_operations_capacity = 5000 [network] # port on which to listen for protocol communication @@ -197,23 +197,31 @@ [bootstrap] # list of bootstrap (ip, node id) bootstrap_list = [ - ["149.202.86.103:31245", "P12UbyLJDS7zimGWf3LTHe8hYY67RdLke1iDRZqJbQQLHQSKPW8j"], - ["149.202.89.125:31245", "P12vxrYTQzS5TRzxLfFNYxn6PyEsphKWkdqx2mVfEuvJ9sPF43uq"], - ["158.69.120.215:31245", "P12rPDBmpnpnbECeAKDjbmeR19dYjAUwyLzsa8wmYJnkXLCNF28E"], - ["158.69.23.120:31245", "P1XxexKa3XNzvmakNmPawqFrE9Z2NFhfq1AhvV1Qx4zXq5p1Bp9"], - ["198.27.74.5:31245", "P1qxuqNnx9kyAMYxUfsYiv2gQd5viiBX126SzzexEdbbWd2vQKu"], - ["198.27.74.52:31245", "P1hdgsVsd4zkNp8cF1rdqqG6JPRQasAmx12QgJaJHBHFU1fRHEH"], - ["54.36.174.177:31245", "P1gEdBVEbRFbBxBtrjcTDDK9JPbJFDay27uiJRE3vmbFAFDKNh7"], - ["51.75.60.228:31245", "P13Ykon8Zo73PTKMruLViMMtE2rEG646JQ4sCcee2DnopmVM3P5"] + ["149.202.86.103:31245", "N12UbyLJDS7zimGWf3LTHe8hYY67RdLke1iDRZqJbQQLHQSKPW8j"], + ["149.202.89.125:31245", "N12vxrYTQzS5TRzxLfFNYxn6PyEsphKWkdqx2mVfEuvJ9sPF43uq"], + ["158.69.120.215:31245", "N12rPDBmpnpnbECeAKDjbmeR19dYjAUwyLzsa8wmYJnkXLCNF28E"], + ["158.69.23.120:31245", "N1XxexKa3XNzvmakNmPawqFrE9Z2NFhfq1AhvV1Qx4zXq5p1Bp9"], + ["198.27.74.5:31245", "N1qxuqNnx9kyAMYxUfsYiv2gQd5viiBX126SzzexEdbbWd2vQKu"], + ["198.27.74.52:31245", "N1hdgsVsd4zkNp8cF1rdqqG6JPRQasAmx12QgJaJHBHFU1fRHEH"], + ["54.36.174.177:31245", "N1gEdBVEbRFbBxBtrjcTDDK9JPbJFDay27uiJRE3vmbFAFDKNh7"], + ["51.75.60.228:31245", "N13Ykon8Zo73PTKMruLViMMtE2rEG646JQ4sCcee2DnopmVM3P5"], + ["[2001:41d0:1004:67::]:31245", "N12UbyLJDS7zimGWf3LTHe8hYY67RdLke1iDRZqJbQQLHQSKPW8j"], + ["[2001:41d0:a:7f7d::]:31245", "N12vxrYTQzS5TRzxLfFNYxn6PyEsphKWkdqx2mVfEuvJ9sPF43uq"], + ["[2001:41d0:602:db1::]:31245", "N1gEdBVEbRFbBxBtrjcTDDK9JPbJFDay27uiJRE3vmbFAFDKNh7"], + ["[2001:41d0:602:21e4::]:31245", "N13Ykon8Zo73PTKMruLViMMtE2rEG646JQ4sCcee2DnopmVM3P5"], ] + # force the bootstrap protocol to use: "IPv4", "IPv6", or "Both". Defaults to using both protocols. + bootstrap_protocol = "Both" # path to the bootstrap whitelist file. This whitelist define IPs that can bootstrap on your node. bootstrap_whitelist_path = "base_config/bootstrap_whitelist.json" # path to the bootstrap blacklist file. This whitelist define IPs that will not be able to bootstrap on your node. This list is optional. bootstrap_blacklist_path = "base_config/bootstrap_blacklist.json" - # [optionnal] port on which to listen for incoming bootstrap requests + # [optional] port on which to listen for incoming bootstrap requests bind = "[::]:31245" # timeout to establish a bootstrap connection connect_timeout = 15000 + # timeout for providing the bootstrap to a connection + bootstrap_timeout = 1200000 # delay in milliseconds to wait between consecutive bootstrap attempts retry_delay = 60000 # if ping is too high bootstrap will be interrupted after max_ping milliseconds @@ -248,6 +256,8 @@ max_endorsement_count = 10000 # max number of items returned per query max_item_return_count = 100 + # operations sender(channel) capacity + broadcast_operations_capacity = 5000 [selector] # maximum number of computed cycle's draws we keep in cache diff --git a/massa-node/base_config/gas_costs/abi_gas_costs.json b/massa-node/base_config/gas_costs/abi_gas_costs.json index 6abe6319cad..aed9b578e5a 100644 --- a/massa-node/base_config/gas_costs/abi_gas_costs.json +++ b/massa-node/base_config/gas_costs/abi_gas_costs.json @@ -1,49 +1,49 @@ { - "assembly_caller_has_write_access": 142, - "assembly_function_exists": 40653, "assembly_script_abort": 0, - "assembly_script_address_from_public_key": 317, - "assembly_script_append_data": 314, - "assembly_script_append_data_for": 337, - "assembly_script_call": 32288, - "assembly_script_create_sc": 305, - "assembly_script_date_now": 93, - "assembly_script_delete_data": 217, - "assembly_script_delete_data_for": 214, - "assembly_script_generate_event": 161, - "assembly_script_get_balance": 143, - "assembly_script_get_balance_for": 173, - "assembly_script_get_bytecode": 156, - "assembly_script_get_bytecode_for": 181, - "assembly_script_get_call_coins": 141, - "assembly_script_get_call_stack": 280, - "assembly_script_get_current_period": 142, - "assembly_script_get_current_thread": 142, - "assembly_script_get_data": 218, - "assembly_script_get_data_for": 265, - "assembly_script_get_keys": 460, - "assembly_script_get_keys_for": 483, - "assembly_script_get_op_data": 109, - "assembly_script_get_op_keys": 266, - "assembly_script_get_owned_addresses": 272, - "assembly_script_get_remaining_gas": 150, - "assembly_script_get_time": 140, - "assembly_script_has_data": 189, - "assembly_script_has_data_for": 226, - "assembly_script_has_op_key": 234, - "assembly_script_hash": 238, - "assembly_script_local_call": 34482, - "assembly_script_local_execution": 40401, - "assembly_script_print": 176, - "assembly_script_seed": 67, - "assembly_script_send_message": 462, - "assembly_script_set_bytecode": 224, - "assembly_script_set_bytecode_for": 275, - "assembly_script_set_data": 280, - "assembly_script_set_data_for": 400, - "assembly_script_signature_verify": 204, - "assembly_script_transfer_coins": 196, - "assembly_script_transfer_coins_for": 226, - "assembly_script_unsafe_random": 144, - "launch": 40555 + "assembly_script_address_from_public_key": 275, + "assembly_script_append_data": 200, + "assembly_script_append_data_for": 224, + "assembly_script_call": 7752, + "assembly_script_caller_has_write_access": 134, + "assembly_script_create_sc": 410, + "assembly_script_date_now": 70, + "assembly_script_delete_data": 163, + "assembly_script_delete_data_for": 186, + "assembly_script_function_exists": 25302, + "assembly_script_generate_event": 142, + "assembly_script_get_balance": 125, + "assembly_script_get_balance_for": 162, + "assembly_script_get_bytecode": 155, + "assembly_script_get_bytecode_for": 206, + "assembly_script_get_call_coins": 123, + "assembly_script_get_call_stack": 264, + "assembly_script_get_current_period": 130, + "assembly_script_get_current_thread": 143, + "assembly_script_get_data": 183, + "assembly_script_get_data_for": 202, + "assembly_script_get_keys": 153, + "assembly_script_get_keys_for": 177, + "assembly_script_get_op_data": 11485, + "assembly_script_get_op_keys": 252, + "assembly_script_get_owned_addresses": 268, + "assembly_script_get_remaining_gas": 93, + "assembly_script_get_time": 144, + "assembly_script_has_data": 149, + "assembly_script_has_data_for": 173, + "assembly_script_has_op_key": 235, + "assembly_script_hash": 244, + "assembly_script_local_call": 7752, + "assembly_script_local_execution": 7752, + "assembly_script_print": 156, + "assembly_script_seed": 66, + "assembly_script_send_message": 247, + "assembly_script_set_bytecode": 146, + "assembly_script_set_bytecode_for": 170, + "assembly_script_set_data": 202, + "assembly_script_set_data_for": 210, + "assembly_script_signature_verify": 209, + "assembly_script_transfer_coins": 176, + "assembly_script_transfer_coins_for": 208, + "assembly_script_unsafe_random": 145, + "launch": 7752 } \ No newline at end of file diff --git a/massa-node/base_config/gas_costs/wasm_gas_costs.json b/massa-node/base_config/gas_costs/wasm_gas_costs.json index 5aa482e5e65..ee5f8c394fc 100644 --- a/massa-node/base_config/gas_costs/wasm_gas_costs.json +++ b/massa-node/base_config/gas_costs/wasm_gas_costs.json @@ -1,13 +1,13 @@ { - "Wasm:Drop": 38, - "Wasm:GlobalGet": 8, - "Wasm:GlobalSet": 51, - "Wasm:I32Add": 0, + "Wasm:Drop": 0, + "Wasm:GlobalGet": 4, + "Wasm:GlobalSet": 0, + "Wasm:I32Add": 126, "Wasm:I32Const": 0, - "Wasm:I32DivS": 61, - "Wasm:I32Mul": 26, - "Wasm:I32Sub": 0, - "Wasm:If": 78, - "Wasm:LocalGet": 3, - "Wasm:LocalSet": 18 + "Wasm:I32DivS": 46, + "Wasm:I32Mul": 11, + "Wasm:I32Sub": 22, + "Wasm:If": 48, + "Wasm:LocalGet": 0, + "Wasm:LocalSet": 0 } \ No newline at end of file diff --git a/massa-node/base_config/initial_ledger.json b/massa-node/base_config/initial_ledger.json index 3d93a1a5fda..a652cf1b597 100644 --- a/massa-node/base_config/initial_ledger.json +++ b/massa-node/base_config/initial_ledger.json @@ -1,45 +1,45 @@ { - "A1qDAxGJ387ETi9JRQzZWSPKYq4YPXrFvdiE4VoXUaiAt38JFEC": { + "A12dhs6CsQk8AXFTYyUpc1P9e8GDf65ozU6RcigW68qfJV7vdbNf": { "balance": "1000000000", "datastore": {}, "bytecode": [] }, - "A12M3AQqs7JH7mSe1UZyEA5NQ7nGQHXaqqxe1TGEpkimcRhsQ4eF": { + "A12WQRoxQJKMjNG8hVjkyh4YgBwaYeUH4BsqJEEdTUJda37GhSx9": { "balance": "1000000000", "datastore": {}, "bytecode": [] }, - "A1nsqw9mCcYLyyMJx5f4in4NXDoe4B1LzV9pQdvX5Wrxq9ehf6h": { + "A1226KGgzq425xzpNmrUCggKcru4yMkFSUXGxYnTwCt6vso5PVbn": { "balance": "1000000000", "datastore": {}, "bytecode": [] }, - "A1pdnk7gME8DSA6ueNZdCHqfSt9YfTwAJSgRCcB8g3z3kkapWtU": { + "A12p3neq9Caq8idS33jrWuRZgfoBL3wAAfG2NdZEBxNdfujVtCLq": { "balance": "1000000000", "datastore": {}, "bytecode": [] }, - "A1H1Ze77ctAFi4FBc3nVe9AtWdtg7246V9pVXSeXqWaJFLPKfB1": { + "A122tXU6uhDfGP1BxCtvLQTvyascwsjX5NVo3vv1fssmdeKSeytM": { "balance": "1000000000", "datastore": {}, "bytecode": [] }, - "A12Dvay7jT1maaKpV9CHX6yMt3cS5ZEWy6Q67HV8twVGS3ihoq5x": { + "A1xfaL8CTrZWTBY79JDMEokwYjc2U4gUFJqqgtbahj7gwYn2s6Y": { "balance": "1000000000", "datastore": {}, "bytecode": [] }, - "A13evhD7c9AXFc6CxWWjWWRT6bQnejYhq3MsNofJWJDe4UQStJE": { + "A1xgxVCw4Vnr2s8JSnFYzz8UGuqZdMH7wHBDUapndgRCdDteWS": { "balance": "1000000000", "datastore": {}, "bytecode": [] }, - "A1UpZLobUAWqj3M9DpBZNhh4GD4ZLvixKXQu2kt7ZDUiEepD89E": { + "A12DPHgthL9JJGrASquAmsdef9oYpLCXzZRvyiXzY3TG87Hgcs4o": { "balance": "1000000000", "datastore": {}, "bytecode": [] }, - "A12p8v9V68SiehQb2Syzy6smfv9NTCJh2p6JPbsacy7PaGRw39uH": { + "A124Lusm3gJFwkCkY13FKXae2z2cV4pvhepJfxPHM2LJz8fEjU3C": { "balance": "1000000000", "datastore": {}, "bytecode": [] diff --git a/massa-node/base_config/initial_rolls.json b/massa-node/base_config/initial_rolls.json index 19b631e303e..be2fe62f7d8 100644 --- a/massa-node/base_config/initial_rolls.json +++ b/massa-node/base_config/initial_rolls.json @@ -1,11 +1,11 @@ { - "A1qDAxGJ387ETi9JRQzZWSPKYq4YPXrFvdiE4VoXUaiAt38JFEC": 10, - "A12M3AQqs7JH7mSe1UZyEA5NQ7nGQHXaqqxe1TGEpkimcRhsQ4eF": 10, - "A1nsqw9mCcYLyyMJx5f4in4NXDoe4B1LzV9pQdvX5Wrxq9ehf6h": 10, - "A1pdnk7gME8DSA6ueNZdCHqfSt9YfTwAJSgRCcB8g3z3kkapWtU": 10, - "A1H1Ze77ctAFi4FBc3nVe9AtWdtg7246V9pVXSeXqWaJFLPKfB1": 10, - "A12Dvay7jT1maaKpV9CHX6yMt3cS5ZEWy6Q67HV8twVGS3ihoq5x": 10, - "A13evhD7c9AXFc6CxWWjWWRT6bQnejYhq3MsNofJWJDe4UQStJE": 10, - "A1UpZLobUAWqj3M9DpBZNhh4GD4ZLvixKXQu2kt7ZDUiEepD89E": 10, - "A12p8v9V68SiehQb2Syzy6smfv9NTCJh2p6JPbsacy7PaGRw39uH": 10 + "A12dhs6CsQk8AXFTYyUpc1P9e8GDf65ozU6RcigW68qfJV7vdbNf": 10, + "A12WQRoxQJKMjNG8hVjkyh4YgBwaYeUH4BsqJEEdTUJda37GhSx9": 10, + "A1226KGgzq425xzpNmrUCggKcru4yMkFSUXGxYnTwCt6vso5PVbn": 10, + "A12p3neq9Caq8idS33jrWuRZgfoBL3wAAfG2NdZEBxNdfujVtCLq": 10, + "A122tXU6uhDfGP1BxCtvLQTvyascwsjX5NVo3vv1fssmdeKSeytM": 10, + "A1xfaL8CTrZWTBY79JDMEokwYjc2U4gUFJqqgtbahj7gwYn2s6Y": 10, + "A1xgxVCw4Vnr2s8JSnFYzz8UGuqZdMH7wHBDUapndgRCdDteWS": 10, + "A12DPHgthL9JJGrASquAmsdef9oYpLCXzZRvyiXzY3TG87Hgcs4o": 10, + "A124Lusm3gJFwkCkY13FKXae2z2cV4pvhepJfxPHM2LJz8fEjU3C": 10 } diff --git a/massa-node/base_config/openrpc.json b/massa-node/base_config/openrpc.json index c68fd0b7daf..6042524ca05 100644 --- a/massa-node/base_config/openrpc.json +++ b/massa-node/base_config/openrpc.json @@ -2,7 +2,7 @@ "openrpc": "1.2.4", "info": { "title": "Massa OpenRPC Specification", - "version": "TEST.18.0", + "version": "TEST.19.0", "description": "Massa OpenRPC Specification document. Find more information on https://docs.massa.net/en/latest/technical-doc/api.html", "termsOfService": "https://open-rpc.org", "contact": { @@ -394,12 +394,22 @@ "description": "Massa public api" } ], - "params": [], + "params": [ + { + "schema": { + "$ref": "#/components/schemas/PageRequest" + }, + "name": "PageRequest" + } + ], "result": { "schema": { - "$ref": "#/components/schemas/Staker" + "type": "array", + "items": { + "$ref": "#/components/schemas/Staker" + } }, - "name": "Staker" + "name": "PagedStakers" }, "name": "get_stakers", "summary": "Get stakers", @@ -503,8 +513,8 @@ "schema": false }, "name": "node_add_to_bootstrap_blacklist", - "summary": "Add to bootsrap blacklist given IP address(es)", - "description": "Add to bootsrap blacklist given IP address(es)." + "summary": "Add to bootstrap blacklist given IP address(es)", + "description": "Add to bootstrap blacklist given IP address(es)." }, { "tags": [ @@ -533,8 +543,8 @@ "schema": false }, "name": "node_add_to_bootstrap_whitelist", - "summary": "Add to bootsrap whitelist given IP address(es)", - "description": "Add to bootsrap whitelist given IP address(es)." + "summary": "Add to bootstrap whitelist given IP address(es)", + "description": "Add to bootstrap whitelist given IP address(es)." }, { "tags": [ @@ -644,8 +654,8 @@ } }, "name": "node_bootstrap_blacklist", - "summary": "Returns bootsrap blacklist IP address(es)", - "description": "Returns bootsrap blacklist IP address(es)." + "summary": "Returns bootstrap blacklist IP address(es)", + "description": "Returns bootstrap blacklist IP address(es)." }, { "tags": [ @@ -667,8 +677,8 @@ } }, "name": "node_bootstrap_whitelist", - "summary": "Returns bootsrap whitelist IP address(es)", - "description": "Returns bootsrap whitelist IP address(es)." + "summary": "Returns bootstrap whitelist IP address(es)", + "description": "Returns bootstrap whitelist IP address(es)." }, { "tags": [ @@ -684,8 +694,8 @@ "schema": false }, "name": "node_bootstrap_whitelist_allow_all", - "summary": "Allow everyone to bootsrap from the node", - "description": "Allow everyone to bootsrap from the node. Remove bootsrap whitelist configuration file." + "summary": "Allow everyone to bootstrap from the node", + "description": "Allow everyone to bootstrap from the node. Remove bootstrap whitelist configuration file." }, { "tags": [ @@ -737,8 +747,8 @@ "schema": false }, "name": "node_remove_from_bootstrap_blacklist", - "summary": "Remove from bootsrap blacklist given IP address(es)", - "description": "Remove from bootsrap blacklist given IP address(es)." + "summary": "Remove from bootstrap blacklist given IP address(es)", + "description": "Remove from bootstrap blacklist given IP address(es)." }, { "tags": [ @@ -767,8 +777,8 @@ "schema": false }, "name": "node_remove_from_bootstrap_whitelist", - "summary": "Remove from bootsrap whitelist given IP address(es)", - "description": "Remove from bootsrap whitelist given IP address(es)." + "summary": "Remove from bootstrap whitelist given IP address(es)", + "description": "Remove from bootstrap whitelist given IP address(es)." }, { "tags": [ @@ -1124,8 +1134,8 @@ "name": "FilledBlockInfo" }, "name": "subscribe_new_filled_blocks", - "summary": "New produced block with operations content", - "description": "New produced block with operations content." + "summary": "New produced blocks with operations content", + "description": "New produced blocks with operations content." }, { "tags": [ @@ -1150,8 +1160,8 @@ "name": "Operation" }, "name": "subscribe_new_operations", - "summary": "Subscribe to new received operations", - "description": "Subscribe to new received operations." + "summary": "Subscribe to new operations", + "description": "Subscribe to new operations." }, { "tags": [ @@ -1376,8 +1386,8 @@ "type": "object", "properties": { "slot": { - "type": "object", - "$ref": "#/components/schemas/Slot" + "$ref": "#/components/schemas/Slot", + "type": "object" }, "amount": { "type": "number" @@ -1389,8 +1399,8 @@ "description": "The next block draws", "type": "array", "items": { - "type": "object", - "$ref": "#/components/schemas/Slot" + "$ref": "#/components/schemas/Slot", + "type": "object" } }, "next_endorsement_draws": { @@ -1400,8 +1410,8 @@ "type": "object", "properties": { "slot": { - "type": "object", - "$ref": "#/components/schemas/Slot" + "$ref": "#/components/schemas/Slot", + "type": "object" }, "index": { "type": "number" @@ -1425,8 +1435,8 @@ "description": "Cycle infos", "type": "array", "items": { - "type": "object", - "$ref": "#/components/schemas/ExecutionAddressCycleInfo" + "$ref": "#/components/schemas/ExecutionAddressCycleInfo", + "type": "object" } } }, @@ -1540,8 +1550,8 @@ "type": "object", "properties": { "target_addr": { - "description": "Address", - "$ref": "#/components/schemas/Address" + "$ref": "#/components/schemas/Address", + "description": "Address" }, "target_func": { "description": "Function name", @@ -1574,9 +1584,7 @@ "description": "The block ids of the blocks in that clique", "type": "array", "items": { - "schema": { - "$ref": "#/components/schemas/BlockId" - } + "$ref": "#/components/schemas/BlockId" } }, "fitness": { @@ -1615,16 +1623,16 @@ "type": "number" }, "end_timestamp": { - "type": "number", - "description": "(Only in tesnets)\nTime in milliseconds when the blockclique started." + "description": "(Only in tesnets)\nTime in milliseconds when the blockclique started.", + "type": "number" }, "genesis_timestamp": { - "type": "number", - "description": "Time in milliseconds when the blockclique started." + "description": "Time in milliseconds when the blockclique started.", + "type": "number" }, "max_block_size": { - "type": "number", - "description": "Maximum size (in bytes) of a block" + "description": "Maximum size (in bytes) of a block", + "type": "number" }, "operation_validity_periods": { "description": "Maximum operation validity period count", @@ -1647,8 +1655,8 @@ "type": "string" }, "t0": { - "type": "number", - "description": "Time between the periods in the same thread." + "description": "Time between the periods in the same thread.", + "type": "number" }, "thread_count": { "description": "Number of threads", @@ -1719,13 +1727,16 @@ }, "DataStore": { "title": "Datastore", - "description": "Datastore", + "description": "A tuple which contains (entry, bytes)", "type": "object", "additionalProperties": { "type": "object", "properties": { - "enrtry": { - "type": "string" + "entry": { + "type": "array", + "items": { + "type": "integer" + } }, "bytes": { "type": "array", @@ -1735,14 +1746,21 @@ } } }, - "example": { - "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx1": [ - "Number" + "example": [ + [ + 1, + 2, + 3, + 4 ], - "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx2": [ - "Number" + [ + 5, + 6, + 7, + 9, + 10 ] - } + ] }, "DataStoreEntry": { "description": "Datastore entry", @@ -1921,28 +1939,14 @@ }, "additionalProperties": false }, - "ReadOnlyResult": { - "title": "ReadOnlyResult", - "type": "object", - "description": "The result of a read-only execution", - "properties": { - "Ok": { - "type": "array", - "description": "Included in case of success. The result of the execution" - }, - "Error": { - "type": "string", - "description": "Included in case of error. The error message" - } - } - }, "ExecuteReadOnlyResponse": { "title": "ExecuteReadOnlyResponse", "required": [ "executed_at", "output_events", "result", - "gas_cost" + "gas_cost", + "state_changes" ], "type": "object", "properties": { @@ -1961,6 +1965,9 @@ "gas_cost": { "description": "The gas cost for the execution", "type": "number" + }, + "state_changes": { + "$ref": "#/components/schemas/StateChanges" } }, "additionalProperties": false @@ -1970,7 +1977,8 @@ "description": "Execute Smart Contract", "required": [ "data", - "max_gas" + "max_gas", + "datastore" ], "type": "object", "properties": { @@ -1984,6 +1992,10 @@ "max_gas": { "description": "Maximum amount of gas that the execution of the contract is allowed to cost.", "type": "number" + }, + "datastore": { + "$ref": "#/components/schemas/DataStore", + "description": "A tuple which contains (key, value)" } }, "additionalProperties": false @@ -2030,12 +2042,12 @@ "description": "Optional end slot\nWill use by default Slot(0,0)" }, "emitter_address": { - "type": "string", - "description": "Optional emitter address" + "description": "Optional emitter address", + "type": "string" }, "original_caller_address": { - "type": "string", - "description": "Optional caller address" + "description": "Optional caller address", + "type": "string" }, "original_operation_id": { "description": "Optional operation id", @@ -2070,7 +2082,7 @@ } } }, - "EventExecutionContext" : { + "EventExecutionContext": { "title": "EventExecutionContext", "description": "Context of the event (not generated by the user)", "required": [ @@ -2334,6 +2346,8 @@ "consensus_stats", "current_cycle", "current_time", + "current_cycle_time", + "next_cycle_time", "network_stats", "next_slot", "node_id", @@ -2359,8 +2373,16 @@ "type": "number" }, "current_time": { - "type": "number", - "description": "Time in milliseconds since 1970-01-01" + "description": "Time in milliseconds since 1970-01-01", + "type": "number" + }, + "current_cycle_time": { + "description": "current cycle starting time in milliseconds since 1970-01-01", + "type": "number" + }, + "next_cycle_time": { + "description": "next cycle starting time in milliseconds since 1970-01-01", + "type": "number" }, "last_slot": { "$ref": "#/components/schemas/Slot", @@ -2393,6 +2415,31 @@ }, "additionalProperties": false }, + "Operation": { + "title": "Operation", + "description": "Operation", + "required": [ + "fee", + "expire_period", + "op" + ], + "type": "object", + "properties": { + "fee": { + "description": "the fee they have decided for this operation", + "type": "string" + }, + "expire_period": { + "description": "after `expire_period` slot the operation won't be included in a block", + "type": "number" + }, + "op": { + "$ref": "#/components/schemas/OperationType", + "description": "the type specific operation part" + } + }, + "additionalProperties": false + }, "OperationId": { "description": "Operation id", "type": "string" @@ -2405,6 +2452,7 @@ "in_blocks", "in_pool", "is_final", + "thread", "operation" ], "type": "object", @@ -2428,6 +2476,10 @@ "description": "True if the operation is final (for example in a final block)", "type": "boolean" }, + "thread": { + "description": "Thread in which the operation can be included", + "type": "number" + }, "operation": { "$ref": "#/components/schemas/WrappedOperation", "description": "The operation itself" @@ -2489,6 +2541,23 @@ } } }, + "PageRequest": { + "title": "PageRequest", + "description": "An PageRequest object, which contains limit (max elements par page) and a page offset.", + "type": "object", + "required": [ + "limit", + "offset" + ], + "properties": { + "limit": { + "type": "number" + }, + "offset": { + "type": "number" + } + } + }, "PoolStats": { "title": "PoolStats", "description": "Pool stats", @@ -2573,15 +2642,27 @@ }, "bytecode": { "description": "Bytecode to execute", - "type": "array" + "type": "array", + "items": { + "format": "byte", + "type": "string" + } }, "address": { - "description": "caller's address", - "$ref": "#/components/schemas/Address" + "$ref": "#/components/schemas/Address", + "description": "caller's address" }, "operation_datastore": { "description": "An operation datastore", - "type": "array" + "type": "array", + "items": { + "format": "byte", + "type": "string" + } + }, + "is_final": { + "description": "Whether to start execution from final or active state", + "type": "boolean" } }, "additionalProperties": false @@ -2614,12 +2695,31 @@ "type": "string" }, "caller_address": { - "type": "string", - "description": "Caller's address, optional" + "description": "Caller's address, optional", + "type": "string" } }, "additionalProperties": false }, + "ReadOnlyResult": { + "title": "ReadOnlyResult", + "description": "The result of a read-only execution", + "type": "object", + "properties": { + "Ok": { + "description": "Included in case of success. The result of the execution", + "type": "array", + "items": { + "format": "byte", + "type": "string" + } + }, + "Error": { + "description": "Included in case of error. The error message", + "type": "string" + } + } + }, "Roll": { "title": "Roll", "description": "Roll", @@ -2780,6 +2880,38 @@ "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx2": "Number" } }, + "StateChanges": { + "title": "StateChanges", + "required": [ + "async_pool_changes", + "executed_ops_changes", + "ledger_changes", + "pos_changes" + ], + "type": "object", + "properties": { + "ledger_changes": { + "description": "ledger changes", + "type": "object" + }, + "async_pool_changes": { + "description": "async pool changes", + "type": "array", + "items": { + "type": "object" + } + }, + "pos_changes": { + "description": "pos changes", + "type": "object" + }, + "executed_ops_changes": { + "description": "executed operations changes", + "type": "object" + } + }, + "additionalProperties": false + }, "Transaction": { "title": "Transaction", "description": "Transation", @@ -2973,6 +3105,14 @@ "$ref": "#/components/schemas/NodeStatus" } }, + "Operation": { + "name": "Operation", + "summary": "Operation", + "description": "A Operation object", + "schema": { + "$ref": "#/components/schemas/Operation" + } + }, "OperationId": { "name": "OperationId", "summary": "OperationId", @@ -3005,6 +3145,14 @@ "$ref": "#/components/schemas/OperationType" } }, + "PageRequest": { + "name": "PageRequest", + "summary": "PageRequest", + "description": "An Page request parameter, which contains limit and offset properties.", + "schema": { + "$ref": "#/components/schemas/PageRequest" + } + }, "PubkeySig": { "name": "PubkeySig", "summary": "PubkeySig", diff --git a/massa-node/src/main.rs b/massa-node/src/main.rs index bcc6098d21b..8bb12c084a8 100644 --- a/massa-node/src/main.rs +++ b/massa-node/src/main.rs @@ -8,7 +8,8 @@ use crate::settings::SETTINGS; use crossbeam_channel::{Receiver, TryRecvError}; use dialoguer::Password; -use massa_api::{APIConfig, ApiServer, ApiV2, Private, Public, RpcServer, StopHandle, API}; +use massa_api::{ApiServer, ApiV2, Private, Public, RpcServer, StopHandle, API}; +use massa_api_exports::config::APIConfig; use massa_async_pool::AsyncPoolConfig; use massa_bootstrap::{get_state, start_bootstrap_server, BootstrapConfig, BootstrapManager}; use massa_consensus_exports::events::ConsensusEvent; @@ -47,7 +48,7 @@ use massa_models::config::constants::{ use massa_models::config::CONSENSUS_BOOTSTRAP_PART_SIZE; use massa_network_exports::{Establisher, NetworkConfig, NetworkManager}; use massa_network_worker::start_network_controller; -use massa_pool_exports::{PoolConfig, PoolManager}; +use massa_pool_exports::{PoolChannels, PoolConfig, PoolManager}; use massa_pool_worker::start_pool_controller; use massa_pos_exports::{PoSConfig, SelectorConfig, SelectorManager}; use massa_pos_worker::start_selector_worker; @@ -172,10 +173,12 @@ async fn launch( let bootstrap_config: BootstrapConfig = BootstrapConfig { bootstrap_list: SETTINGS.bootstrap.bootstrap_list.clone(), + bootstrap_protocol: SETTINGS.bootstrap.bootstrap_protocol, bootstrap_whitelist_path: SETTINGS.bootstrap.bootstrap_whitelist_path.clone(), bootstrap_blacklist_path: SETTINGS.bootstrap.bootstrap_blacklist_path.clone(), bind: SETTINGS.bootstrap.bind, connect_timeout: SETTINGS.bootstrap.connect_timeout, + bootstrap_timeout: SETTINGS.bootstrap.bootstrap_timeout, read_timeout: SETTINGS.bootstrap.read_timeout, write_timeout: SETTINGS.bootstrap.write_timeout, read_error_timeout: SETTINGS.bootstrap.read_error_timeout, @@ -328,6 +331,7 @@ async fn launch( max_datastore_key_length: MAX_DATASTORE_KEY_LENGTH, max_bytecode_size: MAX_BYTECODE_LENGTH, max_datastore_value_size: MAX_DATASTORE_VALUE_LENGTH, + max_module_cache_size: SETTINGS.execution.max_module_cache_size, storage_costs_constants, max_read_only_gas: SETTINGS.execution.max_read_only_gas, gas_costs: GasCosts::new( @@ -353,9 +357,20 @@ async fn launch( max_operation_pool_size_per_thread: SETTINGS.pool.max_pool_size_per_thread, max_endorsements_pool_size_per_thread: SETTINGS.pool.max_pool_size_per_thread, channels_size: POOL_CONTROLLER_CHANNEL_SIZE, + broadcast_enabled: SETTINGS.api.enable_ws, + broadcast_operations_capacity: SETTINGS.pool.broadcast_operations_capacity, + }; + + let pool_channels = PoolChannels { + operation_sender: broadcast::channel(pool_config.broadcast_operations_capacity).0, }; - let (pool_manager, pool_controller) = - start_pool_controller(pool_config, &shared_storage, execution_controller.clone()); + + let (pool_manager, pool_controller) = start_pool_controller( + pool_config, + &shared_storage, + execution_controller.clone(), + pool_channels.clone(), + ); let (protocol_command_sender, protocol_command_receiver) = mpsc::channel::(PROTOCOL_CONTROLLER_CHANNEL_SIZE); @@ -440,13 +455,10 @@ async fn launch( t0: T0, max_operations_propagation_time: SETTINGS.protocol.max_operations_propagation_time, max_endorsements_propagation_time: SETTINGS.protocol.max_endorsements_propagation_time, - broadcast_enabled: SETTINGS.api.enable_ws, - broadcast_operations_capacity: SETTINGS.protocol.broadcast_operations_capacity, }; let protocol_senders = ProtocolSenders { network_command_sender: network_command_sender.clone(), - operation_sender: broadcast::channel(protocol_config.broadcast_operations_capacity).0, }; let protocol_receivers = ProtocolReceivers { @@ -530,7 +542,7 @@ async fn launch( // spawn Massa API let api = API::::new( consensus_channels, - protocol_senders, + pool_channels, api_config.clone(), *VERSION, ); diff --git a/massa-node/src/settings.rs b/massa-node/src/settings.rs index 3511d2c7bf6..907a948a7df 100644 --- a/massa-node/src/settings.rs +++ b/massa-node/src/settings.rs @@ -4,8 +4,8 @@ use std::path::PathBuf; use enum_map::EnumMap; -use massa_models::config::build_massa_settings; -use massa_signature::PublicKey; +use massa_bootstrap::IpType; +use massa_models::{config::build_massa_settings, node::NodeId}; use massa_time::MassaTime; use serde::Deserialize; use std::net::{IpAddr, SocketAddr}; @@ -30,6 +30,7 @@ pub struct ExecutionSettings { pub max_read_only_gas: u64, pub abi_gas_costs_file: PathBuf, pub wasm_gas_costs_file: PathBuf, + pub max_module_cache_size: u32, } #[derive(Clone, Debug, Deserialize)] @@ -75,7 +76,8 @@ pub struct NetworkSettings { /// Bootstrap configuration. #[derive(Debug, Deserialize, Clone)] pub struct BootstrapSettings { - pub bootstrap_list: Vec<(SocketAddr, PublicKey)>, + pub bootstrap_list: Vec<(SocketAddr, NodeId)>, + pub bootstrap_protocol: IpType, pub bootstrap_whitelist_path: PathBuf, pub bootstrap_blacklist_path: PathBuf, pub bind: Option, @@ -92,6 +94,8 @@ pub struct BootstrapSettings { pub per_ip_min_interval: MassaTime, pub ip_list_max_size: usize, pub max_bytes_read_write: f64, + /// Allocated time with which to manage the bootstrap process + pub bootstrap_timeout: MassaTime, } /// Factory settings @@ -110,6 +114,8 @@ pub struct PoolSettings { pub max_operation_future_validity_start_periods: u64, pub max_endorsement_count: u64, pub max_item_return_count: usize, + /// operations sender(channel) capacity + pub broadcast_operations_capacity: usize, } /// API and server configuration, read from a file configuration. @@ -219,8 +225,6 @@ pub struct ProtocolSettings { pub max_operations_propagation_time: MassaTime, /// Time threshold after which operation are not propagated pub max_endorsements_propagation_time: MassaTime, - /// operations sender sender(channel) capacity - pub broadcast_operations_capacity: usize, } #[cfg(test)] diff --git a/massa-node/src/tests/config.toml b/massa-node/src/tests/config.toml index 9f3ebfee3c0..442fc3cb9d4 100644 --- a/massa-node/src/tests/config.toml +++ b/massa-node/src/tests/config.toml @@ -70,8 +70,10 @@ ["149.202.86.103:31245", "5GcSNukkKePWpNSjx9STyoEZniJAN4U4EUzdsQyqhuP3WYf6nj"], ["149.202.89.125:31245", "5wDwi2GYPniGLzpDfKjXJrmHV3p1rLRmm4bQ9TUWNVkpYmd4Zm"], ["158.69.120.215:31245", "5QbsTjSoKzYc8uBbwPCap392CoMQfZ2jviyq492LZPpijctb9c"], - ["158.69.23.120:31245", "8139kbee951YJdwK99odM7e6V3eW7XShCfX5E2ovG3b9qxqqrq"] + ["158.69.23.120:31245", "8139kbee951YJdwK99odM7e6V3eW7XShCfX5E2ovG3b9qxqqrq"], + ["[dba7:c8ca:cfda:ffad:23ee:feb7:569e:a0bf]:31245", "6DbsTjSoKzYc8uBbwPCap392CoMQfZ2jviyq492LZPpijctb9c"] ] + bootstrap_protocol = "Both" bind = "[::]:31245" connect_timeout = 15000 retry_delay = 5000 diff --git a/massa-pool-exports/Cargo.toml b/massa-pool-exports/Cargo.toml index 6eaab6f8577..a30134f2a5e 100644 --- a/massa-pool-exports/Cargo.toml +++ b/massa-pool-exports/Cargo.toml @@ -6,6 +6,7 @@ edition = "2021" [dependencies] serde = { version = "1.0", features = ["derive"] } +tokio = { version = "1.23", features = ["sync"] } # custom modules massa_models = { path = "../massa-models" } massa_storage = { path = "../massa-storage" } diff --git a/massa-pool-exports/src/channels.rs b/massa-pool-exports/src/channels.rs new file mode 100644 index 00000000000..b44ec4da659 --- /dev/null +++ b/massa-pool-exports/src/channels.rs @@ -0,0 +1,8 @@ +use massa_models::operation::Operation; + +/// channels used by the pool worker +#[derive(Clone)] +pub struct PoolChannels { + /// Broadcast sender(channel) for new operations + pub operation_sender: tokio::sync::broadcast::Sender, +} diff --git a/massa-pool-exports/src/config.rs b/massa-pool-exports/src/config.rs index af41e3bddbe..80f3636baa1 100644 --- a/massa-pool-exports/src/config.rs +++ b/massa-pool-exports/src/config.rs @@ -24,4 +24,8 @@ pub struct PoolConfig { pub max_block_endorsement_count: u32, /// operations and endorsements communication channels size pub channels_size: usize, + /// Whether WebSockets are enabled + pub broadcast_enabled: bool, + /// operations sender(channel) capacity + pub broadcast_operations_capacity: usize, } diff --git a/massa-pool-exports/src/controller_traits.rs b/massa-pool-exports/src/controller_traits.rs index bef47bf3022..b07711f67f9 100644 --- a/massa-pool-exports/src/controller_traits.rs +++ b/massa-pool-exports/src/controller_traits.rs @@ -1,7 +1,7 @@ // Copyright (c) 2022 MASSA LABS use massa_models::{ - block::BlockId, endorsement::EndorsementId, operation::OperationId, slot::Slot, + block_id::BlockId, endorsement::EndorsementId, operation::OperationId, slot::Slot, }; use massa_storage::Storage; diff --git a/massa-pool-exports/src/lib.rs b/massa-pool-exports/src/lib.rs index 93e7cd7d3f3..7a30ca80bef 100644 --- a/massa-pool-exports/src/lib.rs +++ b/massa-pool-exports/src/lib.rs @@ -5,9 +5,11 @@ #![warn(missing_docs)] #![warn(unused_crate_dependencies)] +mod channels; mod config; mod controller_traits; +pub use channels::PoolChannels; pub use config::PoolConfig; pub use controller_traits::{PoolController, PoolManager}; diff --git a/massa-pool-exports/src/test_exports/config.rs b/massa-pool-exports/src/test_exports/config.rs index 5f40d6972b5..077117bc225 100644 --- a/massa-pool-exports/src/test_exports/config.rs +++ b/massa-pool-exports/src/test_exports/config.rs @@ -19,6 +19,8 @@ impl Default for PoolConfig { max_endorsements_pool_size_per_thread: 1000, max_block_endorsement_count: ENDORSEMENT_COUNT, channels_size: 1024, + broadcast_enabled: false, + broadcast_operations_capacity: 5000, } } } diff --git a/massa-pool-exports/src/test_exports/mock.rs b/massa-pool-exports/src/test_exports/mock.rs index 8f55592e17e..c45d2d89a98 100644 --- a/massa-pool-exports/src/test_exports/mock.rs +++ b/massa-pool-exports/src/test_exports/mock.rs @@ -6,7 +6,7 @@ use std::sync::{ }; use massa_models::{ - block::BlockId, endorsement::EndorsementId, operation::OperationId, slot::Slot, + block_id::BlockId, endorsement::EndorsementId, operation::OperationId, slot::Slot, }; use massa_storage::Storage; use massa_time::MassaTime; diff --git a/massa-pool-worker/Cargo.toml b/massa-pool-worker/Cargo.toml index dcbd3334ad2..be4a15529a4 100644 --- a/massa-pool-worker/Cargo.toml +++ b/massa-pool-worker/Cargo.toml @@ -15,6 +15,7 @@ massa_pool_exports = { path = "../massa-pool-exports" } massa_execution_exports = { path = "../massa-execution-exports" } [dev-dependencies] +tokio = { version = "1.23", features = ["sync"] } massa_signature = { path = "../massa-signature" } massa_hash = { path = "../massa-hash" } massa_pool_exports = { path = "../massa-pool-exports", features = [ "testing" ] } diff --git a/massa-pool-worker/src/controller_impl.rs b/massa-pool-worker/src/controller_impl.rs index 67254720bc7..e09ce8862d0 100644 --- a/massa-pool-worker/src/controller_impl.rs +++ b/massa-pool-worker/src/controller_impl.rs @@ -3,7 +3,7 @@ //! Pool controller implementation use massa_models::{ - block::BlockId, endorsement::EndorsementId, operation::OperationId, slot::Slot, + block_id::BlockId, endorsement::EndorsementId, operation::OperationId, slot::Slot, }; use massa_pool_exports::{PoolConfig, PoolController, PoolManager}; use massa_storage::Storage; diff --git a/massa-pool-worker/src/endorsement_pool.rs b/massa-pool-worker/src/endorsement_pool.rs index 80f1e7a57ef..3aa6f40c400 100644 --- a/massa-pool-worker/src/endorsement_pool.rs +++ b/massa-pool-worker/src/endorsement_pool.rs @@ -1,7 +1,7 @@ //! Copyright (c) 2022 MASSA LABS use massa_models::{ - block::BlockId, + block_id::BlockId, endorsement::EndorsementId, prehash::{CapacityAllocator, PreHashSet}, slot::Slot, diff --git a/massa-pool-worker/src/operation_pool.rs b/massa-pool-worker/src/operation_pool.rs index 661873c7a1a..9ddbacef6d1 100644 --- a/massa-pool-worker/src/operation_pool.rs +++ b/massa-pool-worker/src/operation_pool.rs @@ -8,7 +8,7 @@ use massa_models::{ prehash::{CapacityAllocator, PreHashMap, PreHashSet}, slot::Slot, }; -use massa_pool_exports::PoolConfig; +use massa_pool_exports::{PoolChannels, PoolConfig}; use massa_storage::Storage; use std::collections::BTreeSet; @@ -35,6 +35,9 @@ pub struct OperationPool { /// last consensus final periods, per thread last_cs_final_periods: Vec, + + /// channels used by the pool worker + channels: PoolChannels, } impl OperationPool { @@ -42,6 +45,7 @@ impl OperationPool { config: PoolConfig, storage: &Storage, execution_controller: Box, + channels: PoolChannels, ) -> Self { OperationPool { operations: Default::default(), @@ -51,6 +55,7 @@ impl OperationPool { config, storage: storage.clone_without_refs(), execution_controller, + channels, } } @@ -112,10 +117,15 @@ impl OperationPool { { let ops = ops_storage.read_operations(); for op_id in items { + let op = ops + .get(&op_id) + .expect("attempting to add operation to pool, but it is absent from storage"); + // Broadcast operation to active sender(channel) subscribers. + if self.config.broadcast_enabled { + let _ = self.channels.operation_sender.send(op.content.clone()); + } let op_info = OperationInfo::from_op( - ops.get(&op_id).expect( - "attempting to add operation to pool, but it is absent from storage", - ), + op, self.config.operation_validity_periods, self.config.roll_price, self.config.thread_count, diff --git a/massa-pool-worker/src/tests/operation_pool_tests.rs b/massa-pool-worker/src/tests/operation_pool_tests.rs index a960e3eb4b1..07823d8fb6a 100644 --- a/massa-pool-worker/src/tests/operation_pool_tests.rs +++ b/massa-pool-worker/src/tests/operation_pool_tests.rs @@ -23,15 +23,16 @@ use massa_execution_exports::test_exports::MockExecutionController; use massa_models::{ address::Address, amount::Amount, - operation::{Operation, OperationSerializer, OperationType, WrappedOperation}, + operation::{Operation, OperationSerializer, OperationType, SecureShareOperation}, prehash::PreHashMap, + secure_share::SecureShareContent, slot::Slot, - wrapped::WrappedContent, }; -use massa_pool_exports::PoolConfig; +use massa_pool_exports::{PoolChannels, PoolConfig}; use massa_signature::KeyPair; use massa_storage::Storage; use std::str::FromStr; +use tokio::sync::broadcast; #[test] fn test_add_operation() { @@ -56,7 +57,7 @@ fn test_add_irrelevant_operation() { }); } -fn get_transaction(expire_period: u64, fee: u64) -> WrappedOperation { +fn get_transaction(expire_period: u64, fee: u64) -> SecureShareOperation { let sender_keypair = KeyPair::generate(); let recv_keypair = KeyPair::generate(); @@ -70,7 +71,7 @@ fn get_transaction(expire_period: u64, fee: u64) -> WrappedOperation { op, expire_period, }; - Operation::new_wrapped(content, OperationSerializer::new(), &sender_keypair).unwrap() + Operation::new_verifiable(content, OperationSerializer::new(), &sender_keypair).unwrap() } /// TODO refactor old tests @@ -80,7 +81,13 @@ fn test_pool() { let (execution_controller, _execution_receiver) = MockExecutionController::new_with_receiver(); let pool_config = PoolConfig::default(); let storage_base = Storage::create_root(); - let mut pool = OperationPool::init(pool_config, &storage_base, execution_controller); + let operation_sender = broadcast::channel(pool_config.broadcast_operations_capacity).0; + let mut pool = OperationPool::init( + pool_config, + &storage_base, + execution_controller, + PoolChannels { operation_sender }, + ); // generate (id, transactions, range of validity) by threads let mut thread_tx_lists = vec![Vec::new(); pool_config.thread_count as usize]; for i in 0..18 { @@ -105,7 +112,9 @@ fn test_pool() { //TODO: compare //assert_eq!(storage.get_op_refs(), &ops.keys().copied().collect::>()); - let op_thread = op.creator_address.get_thread(pool_config.thread_count); + let op_thread = op + .content_creator_address + .get_thread(pool_config.thread_count); thread_tx_lists[op_thread as usize].push((op, start_period..=expire_period)); } @@ -185,7 +194,9 @@ fn test_pool() { pool.add_operations(storage); //TODO: compare //assert_eq!(storage.get_op_refs(), &Set::::default()); - let op_thread = op.creator_address.get_thread(pool_config.thread_count); + let op_thread = op + .content_creator_address + .get_thread(pool_config.thread_count); let (ids, _) = pool.get_block_operations(&Slot::new(expire_period - 1, op_thread)); assert!(ids.is_empty()); } diff --git a/massa-pool-worker/src/tests/tools.rs b/massa-pool-worker/src/tests/tools.rs index c7c8c0a0eb8..130d0f6beb5 100644 --- a/massa-pool-worker/src/tests/tools.rs +++ b/massa-pool-worker/src/tests/tools.rs @@ -8,24 +8,25 @@ use massa_hash::Hash; use massa_models::{ address::Address, amount::Amount, - block::BlockId, - endorsement::{Endorsement, EndorsementSerializer, WrappedEndorsement}, - operation::{Operation, OperationSerializer, OperationType, WrappedOperation}, + block_id::BlockId, + endorsement::{Endorsement, EndorsementSerializer, SecureShareEndorsement}, + operation::{Operation, OperationSerializer, OperationType, SecureShareOperation}, + secure_share::SecureShareContent, slot::Slot, - wrapped::WrappedContent, }; -use massa_pool_exports::{PoolConfig, PoolController, PoolManager}; +use massa_pool_exports::{PoolChannels, PoolConfig, PoolController, PoolManager}; use massa_signature::{KeyPair, PublicKey}; use massa_storage::Storage; use std::str::FromStr; use std::sync::mpsc::Receiver; +use tokio::sync::broadcast; /// Tooling to create a transaction with an expire periods /// TODO move tooling in a dedicated module pub fn create_operation_with_expire_period( keypair: &KeyPair, expire_period: u64, -) -> WrappedOperation { +) -> SecureShareOperation { let recv_keypair = KeyPair::generate(); let op = OperationType::Transaction { @@ -37,15 +38,15 @@ pub fn create_operation_with_expire_period( op, expire_period, }; - Operation::new_wrapped(content, OperationSerializer::new(), keypair).unwrap() + Operation::new_verifiable(content, OperationSerializer::new(), keypair).unwrap() } -/// Return `n` wrapped operations +/// Return `n` signed operations pub fn create_some_operations( n: usize, keypair: &KeyPair, expire_period: u64, -) -> Vec { +) -> Vec { (0..n) .map(|_| create_operation_with_expire_period(keypair, expire_period)) .collect() @@ -61,10 +62,14 @@ where ), { let storage: Storage = Storage::create_root(); - + let operation_sender = broadcast::channel(5000).0; let (execution_controller, execution_receiver) = MockExecutionController::new_with_receiver(); - let (pool_manager, pool_controller) = - start_pool_controller(cfg, &storage, execution_controller); + let (pool_manager, pool_controller) = start_pool_controller( + cfg, + &storage, + execution_controller, + PoolChannels { operation_sender }, + ); test(pool_manager, pool_controller, execution_receiver, storage) } @@ -73,15 +78,21 @@ pub fn operation_pool_test(cfg: PoolConfig, test: F) where F: FnOnce(OperationPool, Storage), { + let operation_sender = broadcast::channel(5000).0; let (execution_controller, _) = MockExecutionController::new_with_receiver(); let storage = Storage::create_root(); test( - OperationPool::init(cfg, &storage.clone_without_refs(), execution_controller), + OperationPool::init( + cfg, + &storage.clone_without_refs(), + execution_controller, + PoolChannels { operation_sender }, + ), storage, ) } -pub fn _get_transaction(expire_period: u64, fee: u64) -> WrappedOperation { +pub fn _get_transaction(expire_period: u64, fee: u64) -> SecureShareOperation { let sender_keypair = KeyPair::generate(); let op = OperationType::Transaction { @@ -93,11 +104,11 @@ pub fn _get_transaction(expire_period: u64, fee: u64) -> WrappedOperation { op, expire_period, }; - Operation::new_wrapped(content, OperationSerializer::new(), &sender_keypair).unwrap() + Operation::new_verifiable(content, OperationSerializer::new(), &sender_keypair).unwrap() } /// Creates an endorsement for use in pool tests. -pub fn _create_endorsement(slot: Slot) -> WrappedEndorsement { +pub fn _create_endorsement(slot: Slot) -> SecureShareEndorsement { let sender_keypair = KeyPair::generate(); let content = Endorsement { @@ -105,7 +116,7 @@ pub fn _create_endorsement(slot: Slot) -> WrappedEndorsement { index: 0, endorsed_block: BlockId(Hash::compute_from("blabla".as_bytes())), }; - Endorsement::new_wrapped(content, EndorsementSerializer::new(), &sender_keypair).unwrap() + Endorsement::new_verifiable(content, EndorsementSerializer::new(), &sender_keypair).unwrap() } pub fn _get_transaction_with_addresses( @@ -113,7 +124,7 @@ pub fn _get_transaction_with_addresses( fee: u64, sender_keypair: &KeyPair, recv_pub: PublicKey, -) -> WrappedOperation { +) -> SecureShareOperation { let op = OperationType::Transaction { recipient_address: Address::from_public_key(&recv_pub), amount: Amount::default(), @@ -123,5 +134,5 @@ pub fn _get_transaction_with_addresses( op, expire_period, }; - Operation::new_wrapped(content, OperationSerializer::new(), sender_keypair).unwrap() + Operation::new_verifiable(content, OperationSerializer::new(), sender_keypair).unwrap() } diff --git a/massa-pool-worker/src/types.rs b/massa-pool-worker/src/types.rs index 3ba6129a025..871f3c707e9 100644 --- a/massa-pool-worker/src/types.rs +++ b/massa-pool-worker/src/types.rs @@ -1,7 +1,7 @@ use massa_models::{ address::Address, amount::Amount, - operation::{OperationId, WrappedOperation}, + operation::{OperationId, SecureShareOperation}, }; use num::rational::Ratio; use std::cmp::Reverse; @@ -40,7 +40,7 @@ pub struct OperationInfo { impl OperationInfo { pub fn from_op( - op: &WrappedOperation, + op: &SecureShareOperation, operation_validity_periods: u64, roll_price: Amount, thread_count: u8, @@ -50,9 +50,9 @@ impl OperationInfo { cursor: build_operation_cursor(op), size: op.serialized_size(), max_gas: op.get_gas_usage(), - creator_address: op.creator_address, + creator_address: op.content_creator_address, fee: op.content.fee, - thread: op.creator_address.get_thread(thread_count), + thread: op.content_creator_address.get_thread(thread_count), validity_period_range: op.get_validity_range(operation_validity_periods), max_spending: op.get_max_spending(roll_price), } @@ -60,7 +60,7 @@ impl OperationInfo { } /// build a cursor from an operation -fn build_operation_cursor(op: &WrappedOperation) -> PoolOperationCursor { +fn build_operation_cursor(op: &SecureShareOperation) -> PoolOperationCursor { let quality = Ratio::new(op.content.fee.to_raw(), op.serialized_size() as u64); let inner = (Reverse(quality), op.id); // TODO take into account max_gas as well in the future (multi-dimensional packing) diff --git a/massa-pool-worker/src/worker.rs b/massa-pool-worker/src/worker.rs index 23bf89dbfe2..918f0ed4468 100644 --- a/massa-pool-worker/src/worker.rs +++ b/massa-pool-worker/src/worker.rs @@ -7,14 +7,13 @@ use crate::operation_pool::OperationPool; use crate::{controller_impl::PoolControllerImpl, endorsement_pool::EndorsementPool}; use massa_execution_exports::ExecutionController; use massa_pool_exports::PoolConfig; -use massa_pool_exports::{PoolController, PoolManager}; +use massa_pool_exports::{PoolChannels, PoolController, PoolManager}; use massa_storage::Storage; use parking_lot::RwLock; -use std::sync::mpsc::RecvError; -use std::thread; use std::{ - sync::mpsc::{sync_channel, Receiver}, + sync::mpsc::{sync_channel, Receiver, RecvError}, sync::Arc, + thread, thread::JoinHandle, }; @@ -112,6 +111,7 @@ pub fn start_pool_controller( config: PoolConfig, storage: &Storage, execution_controller: Box, + channels: PoolChannels, ) -> (Box, Box) { let (operations_input_sender, operations_input_receiver) = sync_channel(config.channels_size); let (endorsements_input_sender, endorsements_input_receiver) = @@ -120,6 +120,7 @@ pub fn start_pool_controller( config, storage, execution_controller, + channels, ))); let endorsement_pool = Arc::new(RwLock::new(EndorsementPool::init(config, storage))); let controller = PoolControllerImpl { diff --git a/massa-pos-exports/Cargo.toml b/massa-pos-exports/Cargo.toml index 78bb3be0f37..47e582ebdfa 100644 --- a/massa-pos-exports/Cargo.toml +++ b/massa-pos-exports/Cargo.toml @@ -13,7 +13,7 @@ nom = "7.1" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" thiserror = "1.0" -tokio = { version = "1.21", features = ["full"] } +tokio = { version = "1.23", features = ["full"] } tracing = "0.1" anyhow = "1.0" num = { version = "0.4", features = ["serde"] } diff --git a/massa-pos-exports/src/controller_traits.rs b/massa-pos-exports/src/controller_traits.rs index 2bfb80b7da1..8b7af8a8b2b 100644 --- a/massa-pos-exports/src/controller_traits.rs +++ b/massa-pos-exports/src/controller_traits.rs @@ -7,9 +7,10 @@ use std::collections::BTreeMap; use crate::PosResult; use massa_hash::Hash; -use massa_models::address::Address; -use massa_models::api::IndexedSlot; -use massa_models::slot::Slot; +use massa_models::{ + address::Address, + slot::{IndexedSlot, Slot}, +}; #[cfg(feature = "testing")] use std::collections::{HashMap, VecDeque}; diff --git a/massa-pos-exports/src/cycle_info.rs b/massa-pos-exports/src/cycle_info.rs index e911c566591..ac1ab74a79b 100644 --- a/massa-pos-exports/src/cycle_info.rs +++ b/massa-pos-exports/src/cycle_info.rs @@ -20,6 +20,7 @@ use nom::{ IResult, Parser, }; use num::rational::Ratio; +use serde::{Deserialize, Serialize}; use std::collections::BTreeMap; use std::ops::Bound::Included; @@ -450,7 +451,7 @@ impl Deserializer for CycleInfoDeserializer { } /// Block production statistics -#[derive(Default, Debug, Copy, Clone, PartialEq, Eq)] +#[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Deserialize, Serialize)] pub struct ProductionStats { /// Number of successfully created blocks pub block_success_count: u64, diff --git a/massa-pos-exports/src/deferred_credits.rs b/massa-pos-exports/src/deferred_credits.rs index f5f72aa02af..0ff94a82017 100644 --- a/massa-pos-exports/src/deferred_credits.rs +++ b/massa-pos-exports/src/deferred_credits.rs @@ -14,12 +14,13 @@ use nom::{ sequence::tuple, IResult, Parser, }; +use serde::{Deserialize, Serialize}; use std::collections::BTreeMap; use std::ops::Bound::{Excluded, Included}; const DEFERRED_CREDITS_HASH_INITIAL_BYTES: &[u8; 32] = &[0; HASH_SIZE_BYTES]; -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Deserialize, Serialize)] /// Structure containing all the PoS deferred credits information pub struct DeferredCredits { /// Deferred credits diff --git a/massa-pos-exports/src/pos_changes.rs b/massa-pos-exports/src/pos_changes.rs index 81143043053..bd189e8de66 100644 --- a/massa-pos-exports/src/pos_changes.rs +++ b/massa-pos-exports/src/pos_changes.rs @@ -14,9 +14,10 @@ use nom::{ sequence::tuple, IResult, Parser, }; +use serde::{Deserialize, Serialize}; /// Recap of all PoS changes -#[derive(Default, Debug, Clone)] +#[derive(Default, Debug, Clone, Deserialize, Serialize)] pub struct PoSChanges { /// extra block seed bits added pub seed_bits: BitVec, diff --git a/massa-pos-exports/src/test_exports/mock.rs b/massa-pos-exports/src/test_exports/mock.rs index 86d23a3717a..8b5dc5b86dd 100644 --- a/massa-pos-exports/src/test_exports/mock.rs +++ b/massa-pos-exports/src/test_exports/mock.rs @@ -10,7 +10,10 @@ use std::{ }; use massa_hash::Hash; -use massa_models::{address::Address, api::IndexedSlot, slot::Slot}; +use massa_models::{ + address::Address, + slot::{IndexedSlot, Slot}, +}; use crate::{PosResult, Selection, SelectorController}; diff --git a/massa-pos-worker/src/controller.rs b/massa-pos-worker/src/controller.rs index 4d6e19e08dd..79bd08b9d69 100644 --- a/massa-pos-worker/src/controller.rs +++ b/massa-pos-worker/src/controller.rs @@ -7,7 +7,10 @@ use std::collections::BTreeMap; use crate::{Command, DrawCachePtr}; use massa_hash::Hash; -use massa_models::{address::Address, api::IndexedSlot, slot::Slot}; +use massa_models::{ + address::Address, + slot::{IndexedSlot, Slot}, +}; use massa_pos_exports::{PosError, PosResult, Selection, SelectorController, SelectorManager}; #[cfg(feature = "testing")] use std::collections::{HashMap, VecDeque}; diff --git a/massa-protocol-exports/Cargo.toml b/massa-protocol-exports/Cargo.toml index 2f9d48039a5..4f26346abc9 100644 --- a/massa-protocol-exports/Cargo.toml +++ b/massa-protocol-exports/Cargo.toml @@ -12,7 +12,7 @@ lazy_static = "1.4" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" thiserror = "1.0" -tokio = { version = "1.21", features = ["full"] } +tokio = { version = "1.23", features = ["full"] } tracing = "0.1" futures = {version = "0.3", optional = true } diff --git a/massa-protocol-exports/src/channels.rs b/massa-protocol-exports/src/channels.rs index 57b29d61f16..4189843c3c9 100644 --- a/massa-protocol-exports/src/channels.rs +++ b/massa-protocol-exports/src/channels.rs @@ -1,4 +1,3 @@ -use massa_models::operation::Operation; use massa_network_exports::{NetworkCommandSender, NetworkEventReceiver}; use tokio::sync::mpsc; @@ -10,8 +9,6 @@ use crate::ProtocolCommand; pub struct ProtocolSenders { /// network command sender pub network_command_sender: NetworkCommandSender, - /// Broadcast sender(channel) for new operations - pub operation_sender: tokio::sync::broadcast::Sender, } /// Contains channels(receivers) used by the protocol worker diff --git a/massa-protocol-exports/src/protocol_controller.rs b/massa-protocol-exports/src/protocol_controller.rs index 4f615dc194c..18e594c5905 100644 --- a/massa-protocol-exports/src/protocol_controller.rs +++ b/massa-protocol-exports/src/protocol_controller.rs @@ -5,8 +5,7 @@ use massa_logging::massa_trace; use massa_models::prehash::{PreHashMap, PreHashSet}; use massa_models::{ - block::{BlockId, WrappedHeader}, - endorsement::EndorsementId, + block_header::SecuredHeader, block_id::BlockId, endorsement::EndorsementId, operation::OperationId, }; use massa_network_exports::NetworkEventReceiver; @@ -40,7 +39,7 @@ pub enum ProtocolCommand { /// Wish list delta WishlistDelta { /// add to wish list - new: PreHashMap>, + new: PreHashMap>, /// remove from wish list remove: PreHashSet, }, @@ -94,7 +93,7 @@ impl ProtocolCommandSender { /// update the block wish list pub fn send_wishlist_delta( &mut self, - new: PreHashMap>, + new: PreHashMap>, remove: PreHashSet, ) -> Result<(), ProtocolError> { massa_trace!("protocol.command_sender.send_wishlist_delta", { "new": new, "remove": remove }); diff --git a/massa-protocol-exports/src/settings.rs b/massa-protocol-exports/src/settings.rs index a86156ae7ee..6c2ad08291a 100644 --- a/massa-protocol-exports/src/settings.rs +++ b/massa-protocol-exports/src/settings.rs @@ -55,8 +55,4 @@ pub struct ProtocolConfig { pub max_operations_propagation_time: MassaTime, /// max time we propagate endorsements pub max_endorsements_propagation_time: MassaTime, - /// Whether WebSockets are enabled - pub broadcast_enabled: bool, - /// operation sender sender(channel) capacity - pub broadcast_operations_capacity: usize, } diff --git a/massa-protocol-exports/src/test_exports/mock.rs b/massa-protocol-exports/src/test_exports/mock.rs index b38436b1606..8e159a73bf7 100644 --- a/massa-protocol-exports/src/test_exports/mock.rs +++ b/massa-protocol-exports/src/test_exports/mock.rs @@ -1,7 +1,7 @@ // Copyright (c) 2022 MASSA LABS use crate::{ProtocolCommand, ProtocolCommandSender}; -use massa_models::block::BlockId; +use massa_models::block_id::BlockId; use massa_time::MassaTime; use tokio::{sync::mpsc, time::sleep}; diff --git a/massa-protocol-exports/src/tests/mock_network_controller.rs b/massa-protocol-exports/src/tests/mock_network_controller.rs index db9ae9b627b..da87b32cd48 100644 --- a/massa-protocol-exports/src/tests/mock_network_controller.rs +++ b/massa-protocol-exports/src/tests/mock_network_controller.rs @@ -1,13 +1,12 @@ // Copyright (c) 2022 MASSA LABS use massa_models::{ - block::{BlockId, WrappedHeader}, - endorsement::WrappedEndorsement, + block_header::SecuredHeader, block_id::BlockId, endorsement::SecureShareEndorsement, }; use massa_models::{ config::CHANNEL_SIZE, node::NodeId, - operation::{OperationId, WrappedOperation}, + operation::{OperationId, SecureShareOperation}, }; use massa_network_exports::{ AskForBlocksInfo, BlockInfoReply, NetworkCommand, NetworkCommandSender, NetworkEvent, @@ -74,7 +73,7 @@ impl MockNetworkController { /// send header /// todo inconsistency with names - pub async fn send_header(&mut self, source_node_id: NodeId, header: WrappedHeader) { + pub async fn send_header(&mut self, source_node_id: NodeId, header: SecuredHeader) { self.network_event_tx .send(NetworkEvent::ReceivedBlockHeader { source_node_id, @@ -89,7 +88,7 @@ impl MockNetworkController { pub async fn send_operations( &mut self, source_node_id: NodeId, - operations: Vec, + operations: Vec, ) { self.network_event_tx .send(NetworkEvent::ReceivedOperations { @@ -137,7 +136,7 @@ impl MockNetworkController { pub async fn send_endorsements( &mut self, source_node_id: NodeId, - endorsements: Vec, + endorsements: Vec, ) { self.network_event_tx .send(NetworkEvent::ReceivedEndorsements { diff --git a/massa-protocol-exports/src/tests/tools.rs b/massa-protocol-exports/src/tests/tools.rs index 2928ad7a206..bf463cb6fa2 100644 --- a/massa-protocol-exports/src/tests/tools.rs +++ b/massa-protocol-exports/src/tests/tools.rs @@ -5,13 +5,15 @@ use crate::ProtocolConfig; use massa_hash::Hash; use massa_models::node::NodeId; use massa_models::operation::OperationSerializer; -use massa_models::wrapped::WrappedContent; +use massa_models::secure_share::SecureShareContent; use massa_models::{ address::Address, amount::Amount, - block::{Block, BlockHeader, BlockHeaderSerializer, BlockId, BlockSerializer, WrappedBlock}, - endorsement::{Endorsement, EndorsementSerializerLW, WrappedEndorsement}, - operation::{Operation, OperationType, WrappedOperation}, + block::{Block, BlockSerializer, SecureShareBlock}, + block_header::{BlockHeader, BlockHeaderSerializer}, + block_id::BlockId, + endorsement::{Endorsement, EndorsementSerializerLW, SecureShareEndorsement}, + operation::{Operation, OperationType, SecureShareOperation}, slot::Slot, }; use massa_network_exports::{AskForBlocksInfo, NetworkCommand}; @@ -55,8 +57,8 @@ pub async fn create_and_connect_nodes( /// Creates a block for use in protocol, /// without paying attention to consensus related things /// like slot, parents, and merkle root. -pub fn create_block(keypair: &KeyPair) -> WrappedBlock { - let header = BlockHeader::new_wrapped( +pub fn create_block(keypair: &KeyPair) -> SecureShareBlock { + let header = BlockHeader::new_verifiable( BlockHeader { slot: Slot::new(1, 0), parents: vec![ @@ -71,7 +73,7 @@ pub fn create_block(keypair: &KeyPair) -> WrappedBlock { ) .unwrap(); - Block::new_wrapped( + Block::new_verifiable( Block { header, operations: Default::default(), @@ -90,14 +92,14 @@ pub fn create_block(keypair: &KeyPair) -> WrappedBlock { pub fn create_block_with_operations( keypair: &KeyPair, slot: Slot, - operations: Vec, -) -> WrappedBlock { + operations: Vec, +) -> SecureShareBlock { let operation_merkle_root = Hash::compute_from( &operations.iter().fold(Vec::new(), |acc, v| { [acc, v.id.to_bytes().to_vec()].concat() })[..], ); - let header = BlockHeader::new_wrapped( + let header = BlockHeader::new_verifiable( BlockHeader { slot, parents: vec![ @@ -113,7 +115,7 @@ pub fn create_block_with_operations( .unwrap(); let op_ids = operations.into_iter().map(|op| op.id).collect(); - Block::new_wrapped( + Block::new_verifiable( Block { header, operations: op_ids, @@ -132,9 +134,9 @@ pub fn create_block_with_operations( pub fn create_block_with_endorsements( keypair: &KeyPair, slot: Slot, - endorsements: Vec, -) -> WrappedBlock { - let header = BlockHeader::new_wrapped( + endorsements: Vec, +) -> SecureShareBlock { + let header = BlockHeader::new_verifiable( BlockHeader { slot, parents: vec![ @@ -149,7 +151,7 @@ pub fn create_block_with_endorsements( ) .unwrap(); - Block::new_wrapped( + Block::new_verifiable( Block { header, operations: Default::default(), @@ -162,7 +164,7 @@ pub fn create_block_with_endorsements( /// Creates an endorsement for use in protocol tests, /// without paying attention to consensus related things. -pub fn create_endorsement() -> WrappedEndorsement { +pub fn create_endorsement() -> SecureShareEndorsement { let keypair = KeyPair::generate(); let content = Endorsement { @@ -170,14 +172,14 @@ pub fn create_endorsement() -> WrappedEndorsement { index: 0, endorsed_block: BlockId(Hash::compute_from(&[])), }; - Endorsement::new_wrapped(content, EndorsementSerializerLW::new(), &keypair).unwrap() + Endorsement::new_verifiable(content, EndorsementSerializerLW::new(), &keypair).unwrap() } /// Create an operation, from a specific sender, and with a specific expire period. pub fn create_operation_with_expire_period( keypair: &KeyPair, expire_period: u64, -) -> WrappedOperation { +) -> SecureShareOperation { let recv_keypair = KeyPair::generate(); let op = OperationType::Transaction { @@ -189,7 +191,7 @@ pub fn create_operation_with_expire_period( op, expire_period, }; - Operation::new_wrapped(content, OperationSerializer::new(), keypair).unwrap() + Operation::new_verifiable(content, OperationSerializer::new(), keypair).unwrap() } lazy_static::lazy_static! { @@ -224,8 +226,6 @@ pub fn create_protocol_config() -> ProtocolConfig { t0: MassaTime::from_millis(16000), max_operations_propagation_time: MassaTime::from_millis(30000), max_endorsements_propagation_time: MassaTime::from_millis(60000), - broadcast_enabled: false, - broadcast_operations_capacity: 128, } } diff --git a/massa-protocol-worker/Cargo.toml b/massa-protocol-worker/Cargo.toml index 7d600ba7791..f81e8c3cb51 100644 --- a/massa-protocol-worker/Cargo.toml +++ b/massa-protocol-worker/Cargo.toml @@ -8,7 +8,7 @@ edition = "2021" [dependencies] serde_json = "1.0" -tokio = { version = "1.21", features = ["full"] } +tokio = { version = "1.23", features = ["full"] } tracing = "0.1" rayon = "1.5" # custom modules diff --git a/massa-protocol-worker/src/node_info.rs b/massa-protocol-worker/src/node_info.rs index a47c6812d3c..5abae7c746d 100644 --- a/massa-protocol-worker/src/node_info.rs +++ b/massa-protocol-worker/src/node_info.rs @@ -8,7 +8,7 @@ use massa_models::operation::OperationPrefixId; use massa_models::prehash::{CapacityAllocator, PreHashMap}; -use massa_models::{block::BlockId, endorsement::EndorsementId}; +use massa_models::{block_id::BlockId, endorsement::EndorsementId}; use massa_protocol_exports::ProtocolConfig; use tokio::time::Instant; diff --git a/massa-protocol-worker/src/protocol_network.rs b/massa-protocol-worker/src/protocol_network.rs index 2c34e74d402..52acc97cb07 100644 --- a/massa-protocol-worker/src/protocol_network.rs +++ b/massa-protocol-worker/src/protocol_network.rs @@ -9,12 +9,13 @@ use crate::protocol_worker::ProtocolWorker; use massa_hash::{Hash, HASH_SIZE_BYTES}; use massa_logging::massa_trace; use massa_models::{ - block::Block, - block::{BlockId, BlockSerializer, WrappedHeader}, + block::{Block, BlockSerializer}, + block_header::SecuredHeader, + block_id::BlockId, node::NodeId, - operation::{OperationId, WrappedOperation}, + operation::{OperationId, SecureShareOperation}, prehash::{CapacityAllocator, PreHashSet}, - wrapped::{Id, Wrapped}, + secure_share::{Id, SecureShare}, }; use massa_network_exports::{AskForBlocksInfo, BlockInfoReply, NetworkEvent}; use massa_protocol_exports::ProtocolError; @@ -171,9 +172,9 @@ impl ProtocolWorker { let mut all_blocks_info = vec![]; for (hash, info_wanted) in &list { let (header, operations_ids) = match self.storage.read_blocks().get(hash) { - Some(wrapped_block) => ( - wrapped_block.content.header.clone(), - wrapped_block.content.operations.clone(), + Some(signed_block) => ( + signed_block.content.header.clone(), + signed_block.content.operations.clone(), ), None => { // let the node know we don't have the block. @@ -238,7 +239,7 @@ impl ProtocolWorker { &mut self, from_node_id: NodeId, block_id: BlockId, - header: WrappedHeader, + header: SecuredHeader, ) -> Result<(), ProtocolError> { if let Some(info) = self.block_wishlist.get(&block_id) { if info.header.is_some() { @@ -410,7 +411,7 @@ impl ProtocolWorker { &mut self, from_node_id: NodeId, block_id: BlockId, - mut operations: Vec, + mut operations: Vec, op_timer: &mut Pin<&mut Sleep>, ) -> Result<(), ProtocolError> { if let Err(err) = self @@ -495,10 +496,10 @@ impl ProtocolWorker { .unwrap(); // wrap block - let wrapped_block = Wrapped { + let signed_block = SecureShare { signature: header.signature, - creator_public_key: header.creator_public_key, - creator_address: header.creator_address, + content_creator_pub_key: header.content_creator_pub_key, + content_creator_address: header.content_creator_address, id: block_id, content: block, serialized_data: content_serialized, @@ -509,11 +510,11 @@ impl ProtocolWorker { // add endorsements to local storage and claim ref // TODO change this if we make endorsements separate from block header block_storage.store_endorsements( - wrapped_block.content.header.content.endorsements.clone(), + signed_block.content.header.content.endorsements.clone(), ); - let slot = wrapped_block.content.header.content.slot; + let slot = signed_block.content.header.content.slot; // add block to local storage and claim ref - block_storage.store_block(wrapped_block); + block_storage.store_block(signed_block); // Send to consensus self.consensus_controller diff --git a/massa-protocol-worker/src/protocol_worker.rs b/massa-protocol-worker/src/protocol_worker.rs index 25b4875fd43..e63924a0df4 100644 --- a/massa-protocol-worker/src/protocol_worker.rs +++ b/massa-protocol-worker/src/protocol_worker.rs @@ -8,16 +8,16 @@ use crate::{node_info::NodeInfo, worker_operations_impl::OperationBatchBuffer}; use massa_consensus_exports::ConsensusController; use massa_logging::massa_trace; -use massa_models::operation::Operation; +use massa_models::secure_share::Id; use massa_models::slot::Slot; use massa_models::timeslots::get_block_slot_timestamp; -use massa_models::wrapped::Id; use massa_models::{ - block::{BlockId, WrappedHeader}, - endorsement::{EndorsementId, WrappedEndorsement}, + block_header::SecuredHeader, + block_id::BlockId, + endorsement::{EndorsementId, SecureShareEndorsement}, node::NodeId, operation::OperationPrefixId, - operation::{OperationId, WrappedOperation}, + operation::{OperationId, SecureShareOperation}, prehash::{CapacityAllocator, PreHashMap, PreHashSet}, }; use massa_network_exports::{AskForBlocksInfo, NetworkCommandSender, NetworkEventReceiver}; @@ -69,7 +69,6 @@ pub async fn start_protocol_controller( network_event_receiver: receivers.network_event_receiver, controller_command_rx: receivers.protocol_command_receiver, controller_manager_rx, - operation_sender: senders.operation_sender, }, consensus_controller, pool_controller, @@ -96,7 +95,7 @@ pub async fn start_protocol_controller( #[derive(Debug, Clone)] pub(crate) struct BlockInfo { /// The header of the block. - pub(crate) header: Option, + pub(crate) header: Option, /// Operations ids. None if not received yet pub(crate) operation_ids: Option>, /// Operations and endorsements contained in the block, @@ -107,7 +106,7 @@ pub(crate) struct BlockInfo { } impl BlockInfo { - fn new(header: Option, storage: Storage) -> Self { + fn new(header: Option, storage: Storage) -> Self { BlockInfo { header, operation_ids: None, @@ -133,8 +132,6 @@ pub struct ProtocolWorker { controller_command_rx: mpsc::Receiver, /// Channel to send management commands to the controller. controller_manager_rx: mpsc::Receiver, - /// Broadcast sender(channel) for new operations - operation_sender: tokio::sync::broadcast::Sender, /// Ids of active nodes mapped to node info. pub(crate) active_nodes: HashMap, /// List of wanted blocks, @@ -145,7 +142,7 @@ pub struct ProtocolWorker { /// Cache of processed operations pub(crate) checked_operations: CheckedOperations, /// List of processed headers - pub(crate) checked_headers: LinearHashCacheMap, + pub(crate) checked_headers: LinearHashCacheMap, /// List of ids of operations that we asked to the nodes pub(crate) asked_operations: PreHashMap)>, /// Buffer for operations that we want later @@ -166,8 +163,6 @@ pub struct ProtocolWorkerChannels { pub controller_command_rx: mpsc::Receiver, /// protocol management command receiver pub controller_manager_rx: mpsc::Receiver, - /// Broadcast sender(channel) for new operations - pub operation_sender: tokio::sync::broadcast::Sender, } impl ProtocolWorker { @@ -186,7 +181,6 @@ impl ProtocolWorker { network_event_receiver, controller_command_rx, controller_manager_rx, - operation_sender, }: ProtocolWorkerChannels, consensus_controller: Box, pool_controller: Box, @@ -200,7 +194,6 @@ impl ProtocolWorker { pool_controller, controller_command_rx, controller_manager_rx, - operation_sender, active_nodes: Default::default(), block_wishlist: Default::default(), checked_endorsements: LinearHashCacheSet::new(config.max_known_endorsements_size), @@ -378,7 +371,7 @@ impl ProtocolWorker { { "endorsements": storage.get_endorsement_refs() } ); for (node, node_info) in self.active_nodes.iter_mut() { - let new_endorsements: PreHashMap = { + let new_endorsements: PreHashMap = { let endorsements_reader = storage.read_endorsements(); storage .get_endorsement_refs() @@ -779,7 +772,7 @@ impl ProtocolWorker { /// - Block matches that of the block. pub(crate) async fn note_header_from_node( &mut self, - header: &WrappedHeader, + header: &SecuredHeader, source_node_id: &NodeId, ) -> Result, ProtocolError> { massa_trace!("protocol.protocol_worker.note_header_from_node", { "node": source_node_id, "header": header }); @@ -894,7 +887,7 @@ impl ProtocolWorker { /// - Valid signature pub(crate) async fn note_operations_from_node( &mut self, - operations: Vec, + operations: Vec, source_node_id: &NodeId, op_timer: &mut Pin<&mut Sleep>, ) -> Result<(), ProtocolError> { @@ -925,7 +918,7 @@ impl ProtocolWorker { verify_sigs_batch( &new_operations .iter() - .map(|(op_id, op)| (*op_id.get_hash(), op.signature, op.creator_public_key)) + .map(|(op_id, op)| (*op_id.get_hash(), op.signature, op.content_creator_pub_key)) .collect::>(), )?; @@ -939,11 +932,6 @@ impl ProtocolWorker { } if !new_operations.is_empty() { - if self.config.broadcast_enabled { - for op in new_operations.clone() { - let _ = self.operation_sender.send(op.1.content); - } - } // Store operation, claim locally let mut ops = self.storage.clone_without_refs(); ops.store_operations(new_operations.into_values().collect()); @@ -1001,7 +989,7 @@ impl ProtocolWorker { /// - Valid signature. pub(crate) async fn note_endorsements_from_node( &mut self, - endorsements: Vec, + endorsements: Vec, source_node_id: &NodeId, propagate: bool, ) -> Result<(), ProtocolError> { @@ -1028,7 +1016,7 @@ impl ProtocolWorker { ( *endorsement_id.get_hash(), endorsement.signature, - endorsement.creator_public_key, + endorsement.content_creator_pub_key, ) }) .collect::>(), diff --git a/massa-protocol-worker/src/tests/ask_block_scenarios.rs b/massa-protocol-worker/src/tests/ask_block_scenarios.rs index cad0469fe5d..8e40285da15 100644 --- a/massa-protocol-worker/src/tests/ask_block_scenarios.rs +++ b/massa-protocol-worker/src/tests/ask_block_scenarios.rs @@ -3,7 +3,7 @@ use super::tools::protocol_test; use massa_consensus_exports::test_exports::MockConsensusControllerMessage; use massa_models::prehash::PreHashSet; -use massa_models::{block::BlockId, slot::Slot}; +use massa_models::{block_id::BlockId, slot::Slot}; use massa_network_exports::{AskForBlocksInfo, BlockInfoReply, NetworkCommand}; use massa_protocol_exports::tests::tools; use massa_protocol_exports::tests::tools::{asked_list, assert_hash_asked_to_node}; @@ -40,7 +40,7 @@ async fn test_full_ask_block_workflow() { let op_1 = tools::create_operation_with_expire_period(&node_a.keypair, 5); let op_2 = tools::create_operation_with_expire_period(&node_a.keypair, 5); let op_thread = op_1 - .creator_address + .content_creator_address .get_thread(protocol_config.thread_count); let block = tools::create_block_with_operations( &node_a.keypair, diff --git a/massa-protocol-worker/src/tests/ban_nodes_scenarios.rs b/massa-protocol-worker/src/tests/ban_nodes_scenarios.rs index 12e5c4a76c2..b3a15230cc6 100644 --- a/massa-protocol-worker/src/tests/ban_nodes_scenarios.rs +++ b/massa-protocol-worker/src/tests/ban_nodes_scenarios.rs @@ -5,8 +5,8 @@ use massa_consensus_exports::test_exports::MockConsensusControllerMessage; use massa_hash::Hash; use massa_models::operation::OperationId; use massa_models::prehash::PreHashSet; -use massa_models::wrapped::Id; -use massa_models::{block::BlockId, slot::Slot}; +use massa_models::secure_share::Id; +use massa_models::{block_id::BlockId, slot::Slot}; use massa_network_exports::{BlockInfoReply, NetworkCommand}; use massa_pool_exports::test_exports::MockPoolControllerMessage; use massa_protocol_exports::tests::tools; diff --git a/massa-protocol-worker/src/tests/in_block_operations_scenarios.rs b/massa-protocol-worker/src/tests/in_block_operations_scenarios.rs index c891c9bbaf1..087100797c5 100644 --- a/massa-protocol-worker/src/tests/in_block_operations_scenarios.rs +++ b/massa-protocol-worker/src/tests/in_block_operations_scenarios.rs @@ -4,10 +4,11 @@ use super::tools::{protocol_test, send_and_propagate_block}; use massa_consensus_exports::test_exports::MockConsensusControllerMessage; use massa_hash::Hash; use massa_models::operation::OperationId; -use massa_models::wrapped::{Id, WrappedContent}; +use massa_models::secure_share::{Id, SecureShareContent}; use massa_models::{ address::Address, - block::{Block, BlockHeader, BlockHeaderSerializer, BlockSerializer}, + block::{Block, BlockSerializer}, + block_header::{BlockHeader, BlockHeaderSerializer}, slot::Slot, }; use massa_network_exports::NetworkCommand; @@ -47,7 +48,9 @@ async fn test_protocol_does_propagate_operations_received_in_blocks() { // block with ok operation let op = create_operation_with_expire_period(&keypair, 5); - let op_thread = op.creator_address.get_thread(protocol_config.thread_count); + let op_thread = op + .content_creator_address + .get_thread(protocol_config.thread_count); let block = create_block_with_operations( &creator_node.keypair, @@ -156,7 +159,9 @@ async fn test_protocol_sends_blocks_with_operations_to_consensus() { // block with ok operation { let op = create_operation_with_expire_period(&keypair, 5); - let op_thread = op.creator_address.get_thread(protocol_config.thread_count); + let op_thread = op + .content_creator_address + .get_thread(protocol_config.thread_count); let block = create_block_with_operations( &creator_node.keypair, @@ -208,11 +213,13 @@ async fn test_protocol_sends_blocks_with_operations_to_consensus() { // block with wrong merkle root { let op = create_operation_with_expire_period(&keypair, 5); - let op_thread = op.creator_address.get_thread(protocol_config.thread_count); + let op_thread = op + .content_creator_address + .get_thread(protocol_config.thread_count); let block = { let operation_merkle_root = Hash::compute_from("merkle root".as_bytes()); - let header = BlockHeader::new_wrapped( + let header = BlockHeader::new_verifiable( BlockHeader { slot: Slot::new(1, op_thread), parents: Vec::new(), @@ -224,7 +231,7 @@ async fn test_protocol_sends_blocks_with_operations_to_consensus() { ) .unwrap(); - Block::new_wrapped( + Block::new_verifiable( Block { header, operations: vec![op.clone()].into_iter().map(|op| op.id).collect(), @@ -271,7 +278,9 @@ async fn test_protocol_sends_blocks_with_operations_to_consensus() { // block with operation with wrong signature { let mut op = create_operation_with_expire_period(&keypair, 5); - let op_thread = op.creator_address.get_thread(protocol_config.thread_count); + let op_thread = op + .content_creator_address + .get_thread(protocol_config.thread_count); op.id = OperationId::new(Hash::compute_from("wrong signature".as_bytes())); let block = create_block_with_operations( &creator_node.keypair, diff --git a/massa-protocol-worker/src/tests/operations_scenarios.rs b/massa-protocol-worker/src/tests/operations_scenarios.rs index 3b7f73fa0a7..7c600367844 100644 --- a/massa-protocol-worker/src/tests/operations_scenarios.rs +++ b/massa-protocol-worker/src/tests/operations_scenarios.rs @@ -5,7 +5,7 @@ use super::tools::{protocol_test, protocol_test_with_storage}; use massa_consensus_exports::test_exports::MockConsensusControllerMessage; use massa_models::prehash::PreHashSet; -use massa_models::{self, address::Address, amount::Amount, block::BlockId, slot::Slot}; +use massa_models::{self, address::Address, amount::Amount, block_id::BlockId, slot::Slot}; use massa_network_exports::{BlockInfoReply, NetworkCommand}; use massa_pool_exports::test_exports::MockPoolControllerMessage; use massa_protocol_exports::tests::tools::{self, assert_hash_asked_to_node}; @@ -574,7 +574,7 @@ async fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_ let op_1 = tools::create_operation_with_expire_period(&node_a.keypair, 5); let op_2 = tools::create_operation_with_expire_period(&node_a.keypair, 5); let op_thread = op_1 - .creator_address + .content_creator_address .get_thread(protocol_config.thread_count); let mut block = tools::create_block_with_operations( &node_a.keypair, diff --git a/massa-protocol-worker/src/tests/scenarios.rs b/massa-protocol-worker/src/tests/scenarios.rs index 143ca55aa42..9d93c04c562 100644 --- a/massa-protocol-worker/src/tests/scenarios.rs +++ b/massa-protocol-worker/src/tests/scenarios.rs @@ -4,7 +4,7 @@ use super::tools::{protocol_test, protocol_test_with_storage}; use massa_consensus_exports::test_exports::MockConsensusControllerMessage; -use massa_models::block::BlockId; +use massa_models::block_id::BlockId; use massa_models::prehash::{PreHashMap, PreHashSet}; use massa_network_exports::{AskForBlocksInfo, NetworkCommand}; use massa_protocol_exports::tests::tools; diff --git a/massa-protocol-worker/src/tests/tools.rs b/massa-protocol-worker/src/tests/tools.rs index 3a40123a544..47491208e5d 100644 --- a/massa-protocol-worker/src/tests/tools.rs +++ b/massa-protocol-worker/src/tests/tools.rs @@ -2,9 +2,7 @@ use crate::start_protocol_controller; use futures::Future; use massa_consensus_exports::test_exports::{ConsensusEventReceiver, MockConsensusController}; use massa_models::{ - block::{BlockId, WrappedBlock}, - node::NodeId, - operation::WrappedOperation, + block::SecureShareBlock, block_id::BlockId, node::NodeId, operation::SecureShareOperation, prehash::PreHashSet, }; use massa_network_exports::BlockInfoReply; @@ -14,7 +12,7 @@ use massa_protocol_exports::{ ProtocolManager, ProtocolReceivers, ProtocolSenders, }; use massa_storage::Storage; -use tokio::sync::{broadcast, mpsc}; +use tokio::sync::mpsc; pub async fn protocol_test(protocol_config: &ProtocolConfig, test: F) where @@ -44,14 +42,12 @@ where // start protocol controller let (protocol_command_sender, protocol_command_receiver) = mpsc::channel(protocol_config.controller_channel_size); - let operation_sender = broadcast::channel(protocol_config.broadcast_operations_capacity).0; let protocol_receivers = ProtocolReceivers { network_event_receiver, protocol_command_receiver, }; let protocol_senders = ProtocolSenders { network_command_sender, - operation_sender, }; // start protocol controller let protocol_manager: ProtocolManager = start_protocol_controller( @@ -119,7 +115,6 @@ where let protocol_senders = ProtocolSenders { network_command_sender: network_command_sender.clone(), - operation_sender: broadcast::channel(protocol_config.broadcast_operations_capacity).0, }; let protocol_receivers = ProtocolReceivers { @@ -164,10 +159,10 @@ where /// send a block and assert it has been propagate (or not) pub async fn send_and_propagate_block( network_controller: &mut MockNetworkController, - block: WrappedBlock, + block: SecureShareBlock, source_node_id: NodeId, protocol_command_sender: &mut ProtocolCommandSender, - operations: Vec, + operations: Vec, ) { network_controller .send_header(source_node_id, block.content.header.clone()) diff --git a/massa-protocol-worker/src/worker_operations_impl.rs b/massa-protocol-worker/src/worker_operations_impl.rs index 77d551467a4..e9c3dd74c41 100644 --- a/massa-protocol-worker/src/worker_operations_impl.rs +++ b/massa-protocol-worker/src/worker_operations_impl.rs @@ -14,7 +14,7 @@ use crate::protocol_worker::ProtocolWorker; use massa_logging::massa_trace; use massa_models::{ node::NodeId, - operation::{OperationPrefixIds, WrappedOperation}, + operation::{OperationPrefixIds, SecureShareOperation}, prehash::CapacityAllocator, }; use massa_protocol_exports::ProtocolError; @@ -141,7 +141,7 @@ impl ProtocolWorker { pub(crate) async fn on_operations_received( &mut self, node_id: NodeId, - operations: Vec, + operations: Vec, op_timer: &mut Pin<&mut Sleep>, ) { if let Err(err) = self @@ -209,7 +209,7 @@ impl ProtocolWorker { return Ok(()); } - let mut ops: Vec = Vec::with_capacity(op_pre_ids.len()); + let mut ops: Vec = Vec::with_capacity(op_pre_ids.len()); { // Scope the lock because of the async call to `send_operations` below. let stored_ops = self.storage.read_operations(); diff --git a/massa-sdk/Cargo.toml b/massa-sdk/Cargo.toml index ce8e1434561..759252af4f2 100644 --- a/massa-sdk/Cargo.toml +++ b/massa-sdk/Cargo.toml @@ -6,5 +6,6 @@ edition = "2021" [dependencies] jsonrpsee = { version = "0.16.2", features = ["client"] } http = "0.2.8" +massa_api_exports = { path = "../massa-api-exports" } massa_models = { path = "../massa-models" } massa_time = { path = "../massa-time" } diff --git a/massa-sdk/src/config.rs b/massa-sdk/src/config.rs index 0f28e771afb..5a376d40f77 100644 --- a/massa-sdk/src/config.rs +++ b/massa-sdk/src/config.rs @@ -2,10 +2,10 @@ use massa_time::MassaTime; -/// Http client settings. -/// the Http client settings +/// Client common settings. +/// the client common settings #[derive(Debug, Clone)] -pub struct HttpConfig { +pub struct ClientConfig { /// maximum size in bytes of a request. pub max_request_body_size: u32, /// maximum size in bytes of a response. @@ -21,3 +21,27 @@ pub struct HttpConfig { /// custom headers to pass with every request. pub headers: Vec<(String, String)>, } + +/// Http client settings. +/// the Http client settings +#[derive(Debug, Clone)] +pub struct HttpConfig { + /// common client configuration. + pub client_config: ClientConfig, + /// whether to enable HTTP. + pub enabled: bool, +} + +/// WebSocket client settings. +/// the WebSocket client settings +#[derive(Debug, Clone)] +pub struct WsConfig { + /// common client configuration. + pub client_config: ClientConfig, + /// whether to enable WS. + pub enabled: bool, + /// Max notifications per subscription. + pub max_notifs_per_subscription: usize, + /// Max number of redirections. + pub max_redirections: usize, +} diff --git a/massa-sdk/src/lib.rs b/massa-sdk/src/lib.rs index 60278ecf266..0edcf622c12 100644 --- a/massa-sdk/src/lib.rs +++ b/massa-sdk/src/lib.rs @@ -5,23 +5,38 @@ #![warn(unused_crate_dependencies)] use http::header::HeaderName; -use jsonrpsee::core::client::{CertificateStore, ClientT, IdKind}; +use jsonrpsee::core::client::{ + CertificateStore, ClientT, IdKind, Subscription, SubscriptionClientT, +}; use jsonrpsee::http_client::HttpClient; use jsonrpsee::rpc_params; -use jsonrpsee::ws_client::{HeaderMap, HeaderValue}; -use massa_models::api::{ - AddressInfo, BlockInfo, BlockSummary, DatastoreEntryInput, DatastoreEntryOutput, - EndorsementInfo, EventFilter, NodeStatus, OperationInfo, OperationInput, - ReadOnlyBytecodeExecution, ReadOnlyCall, TimeInterval, +use jsonrpsee::types::error::CallError; +use jsonrpsee::types::ErrorObject; +use jsonrpsee::ws_client::{HeaderMap, HeaderValue, WsClient, WsClientBuilder}; +use massa_api_exports::{ + address::AddressInfo, + block::{BlockInfo, BlockSummary}, + datastore::{DatastoreEntryInput, DatastoreEntryOutput}, + endorsement::EndorsementInfo, + execution::{ExecuteReadOnlyResponse, ReadOnlyBytecodeExecution, ReadOnlyCall}, + node::NodeStatus, + operation::{OperationInfo, OperationInput}, + TimeInterval, }; -use massa_models::clique::Clique; -use massa_models::composite::PubkeySig; -use massa_models::execution::ExecuteReadOnlyResponse; -use massa_models::node::NodeId; -use massa_models::output_event::SCOutputEvent; -use massa_models::prehash::{PreHashMap, PreHashSet}; use massa_models::{ - address::Address, block::BlockId, endorsement::EndorsementId, operation::OperationId, + address::Address, + block::FilledBlock, + block_header::BlockHeader, + block_id::BlockId, + clique::Clique, + composite::PubkeySig, + endorsement::EndorsementId, + execution::EventFilter, + node::NodeId, + operation::{Operation, OperationId}, + output_event::SCOutputEvent, + prehash::{PreHashMap, PreHashSet}, + version::Version, }; use jsonrpsee::{core::Error as JsonRpseeError, core::RpcResult, http_client::HttpClientBuilder}; @@ -29,7 +44,9 @@ use std::net::{IpAddr, SocketAddr}; use std::str::FromStr; mod config; +pub use config::ClientConfig; pub use config::HttpConfig; +pub use config::WsConfig; /// Client pub struct Client { @@ -58,7 +75,7 @@ impl Client { } } -/// TODO add ws client +/// Rpc client pub struct RpcClient { http_client: HttpClient, } @@ -66,41 +83,8 @@ pub struct RpcClient { impl RpcClient { /// Default constructor pub async fn from_url(url: &str, http_config: &HttpConfig) -> RpcClient { - let certificate_store = match http_config.certificate_store.as_str() { - "Native" => CertificateStore::Native, - "WebPki" => CertificateStore::WebPki, - _ => CertificateStore::Native, - }; - let id_kind = match http_config.id_kind.as_str() { - "Number" => IdKind::Number, - "String" => IdKind::String, - _ => IdKind::Number, - }; - - let mut headers = HeaderMap::new(); - http_config.headers.iter().for_each(|(key, value)| { - let header_name = match HeaderName::from_str(key.as_str()) { - Ok(header_name) => header_name, - Err(_) => panic!("invalid header name: {:?}", key), - }; - let header_value = match HeaderValue::from_str(value.as_str()) { - Ok(header_name) => header_name, - Err(_) => panic!("invalid header value: {:?}", value), - }; - headers.insert(header_name, header_value); - }); - - match HttpClientBuilder::default() - .max_request_body_size(http_config.max_request_body_size) - .request_timeout(http_config.request_timeout.to_duration()) - .max_concurrent_requests(http_config.max_concurrent_requests) - .certificate_store(certificate_store) - .id_format(id_kind) - .set_headers(headers) - .build(url) - { - Ok(http_client) => RpcClient { http_client }, - Err(_) => panic!("unable to connect to Node."), + RpcClient { + http_client: http_client_from_url(url, http_config).await, } } @@ -193,50 +177,50 @@ impl RpcClient { .await } - /// Returns node bootsrap whitelist IP address(es). + /// Returns node bootstrap whitelist IP address(es). pub async fn node_bootstrap_whitelist(&self) -> RpcResult> { self.http_client .request("node_bootstrap_whitelist", rpc_params![]) .await } - /// Allow everyone to bootsrap from the node. - /// remove bootsrap whitelist configuration file. + /// Allow everyone to bootstrap from the node. + /// remove bootstrap whitelist configuration file. pub async fn node_bootstrap_whitelist_allow_all(&self) -> RpcResult<()> { self.http_client .request("node_bootstrap_whitelist_allow_all", rpc_params![]) .await } - /// Add IP address(es) to node bootsrap whitelist. + /// Add IP address(es) to node bootstrap whitelist. pub async fn node_add_to_bootstrap_whitelist(&self, ips: Vec) -> RpcResult<()> { self.http_client .request("node_add_to_bootstrap_whitelist", rpc_params![ips]) .await } - /// Remove IP address(es) to bootsrap whitelist. + /// Remove IP address(es) to bootstrap whitelist. pub async fn node_remove_from_bootstrap_whitelist(&self, ips: Vec) -> RpcResult<()> { self.http_client .request("node_remove_from_bootstrap_whitelist", rpc_params![ips]) .await } - /// Returns node bootsrap blacklist IP address(es). + /// Returns node bootstrap blacklist IP address(es). pub async fn node_bootstrap_blacklist(&self) -> RpcResult> { self.http_client .request("node_bootstrap_blacklist", rpc_params![]) .await } - /// Add IP address(es) to node bootsrap blacklist. + /// Add IP address(es) to node bootstrap blacklist. pub async fn node_add_to_bootstrap_blacklist(&self, ips: Vec) -> RpcResult<()> { self.http_client .request("node_add_to_bootstrap_blacklist", rpc_params![ips]) .await } - /// Remove IP address(es) to bootsrap blacklist. + /// Remove IP address(es) to bootstrap blacklist. pub async fn node_remove_from_bootstrap_blacklist(&self, ips: Vec) -> RpcResult<()> { self.http_client .request("node_remove_from_bootstrap_blacklist", rpc_params![ips]) @@ -286,7 +270,7 @@ impl RpcClient { } /// Returns block(s) information associated to a given list of block(s) ID(s) - pub async fn get_blocks(&self, block_ids: Vec) -> RpcResult { + pub async fn get_blocks(&self, block_ids: Vec) -> RpcResult> { self.http_client .request("get_blocks", rpc_params![block_ids]) .await @@ -376,3 +360,244 @@ impl RpcClient { }) } } + +/// Client V2 +pub struct ClientV2 { + /// API V2 component + pub api: RpcClientV2, +} + +impl ClientV2 { + /// creates a new client + pub async fn new( + ip: IpAddr, + api_port: u16, + http_config: &HttpConfig, + ws_config: &WsConfig, + ) -> ClientV2 { + let api_socket_addr = SocketAddr::new(ip, api_port); + ClientV2 { + api: RpcClientV2::from_url(api_socket_addr, http_config, ws_config).await, + } + } +} + +/// Rpc V2 client +pub struct RpcClientV2 { + http_client: Option, + ws_client: Option, +} + +impl RpcClientV2 { + /// Default constructor + pub async fn from_url( + socket_addr: SocketAddr, + http_config: &HttpConfig, + ws_config: &WsConfig, + ) -> RpcClientV2 { + let http_url = format!("http://{}", socket_addr); + let ws_url = format!("ws://{}", socket_addr); + + if http_config.enabled && !ws_config.enabled { + let http_client = http_client_from_url(&http_url, http_config).await; + return RpcClientV2 { + http_client: Some(http_client), + ws_client: None, + }; + } else if !http_config.enabled && ws_config.enabled { + let ws_client = ws_client_from_url(&ws_url, ws_config).await; + return RpcClientV2 { + http_client: None, + ws_client: Some(ws_client), + }; + } else if !http_config.enabled && !ws_config.enabled { + panic!("wrong client configuration, you can't disable both http and ws"); + } + + let http_client = http_client_from_url(&http_url, http_config).await; + let ws_client = ws_client_from_url(&ws_url, ws_config).await; + + RpcClientV2 { + http_client: Some(http_client), + ws_client: Some(ws_client), + } + } + + //////////////// + // API V2 // + //////////////// + // + // Experimental APIs. They might disappear, and they will change // + + /// Get Massa node version + pub async fn get_version(&self) -> RpcResult { + if let Some(client) = self.http_client.as_ref() { + client.request("get_version", rpc_params![]).await + } else { + Err(JsonRpseeError::Custom( + "error, no Http client instance found".to_owned(), + )) + } + } + + /// New produced blocks + pub async fn subscribe_new_blocks( + &self, + ) -> Result, jsonrpsee::core::Error> { + if let Some(client) = self.ws_client.as_ref() { + client + .subscribe( + "subscribe_new_blocks", + rpc_params![], + "unsubscribe_new_blocks", + ) + .await + } else { + Err(CallError::Custom(ErrorObject::owned( + -32080, + "error, no WebSocket client instance found".to_owned(), + None::<()>, + )) + .into()) + } + } + + /// New produced blocks headers + pub async fn subscribe_new_blocks_headers( + &self, + ) -> Result, jsonrpsee::core::Error> { + if let Some(client) = self.ws_client.as_ref() { + client + .subscribe( + "subscribe_new_blocks_headers", + rpc_params![], + "unsubscribe_new_blocks_headers", + ) + .await + } else { + Err(CallError::Custom(ErrorObject::owned( + -32080, + "error, no WebSocket client instance found".to_owned(), + None::<()>, + )) + .into()) + } + } + + /// New produced blocks with operations content. + pub async fn subscribe_new_filled_blocks( + &self, + ) -> Result, jsonrpsee::core::Error> { + if let Some(client) = self.ws_client.as_ref() { + client + .subscribe( + "subscribe_new_filled_blocks", + rpc_params![], + "unsubscribe_new_filled_blocks", + ) + .await + } else { + Err(CallError::Custom(ErrorObject::owned( + -32080, + "error, no WebSocket client instance found".to_owned(), + None::<()>, + )) + .into()) + } + } + + /// New produced operations. + pub async fn subscribe_new_operations( + &self, + ) -> Result, jsonrpsee::core::Error> { + if let Some(client) = self.ws_client.as_ref() { + client + .subscribe( + "subscribe_new_operations", + rpc_params![], + "unsubscribe_new_operations", + ) + .await + } else { + Err(CallError::Custom(ErrorObject::owned( + -32080, + "error, no WebSocket client instance found".to_owned(), + None::<()>, + )) + .into()) + } + } +} + +async fn http_client_from_url(url: &str, http_config: &HttpConfig) -> HttpClient { + match HttpClientBuilder::default() + .max_request_body_size(http_config.client_config.max_request_body_size) + .request_timeout(http_config.client_config.request_timeout.to_duration()) + .max_concurrent_requests(http_config.client_config.max_concurrent_requests) + .certificate_store(get_certificate_store( + http_config.client_config.certificate_store.as_str(), + )) + .id_format(get_id_kind(http_config.client_config.id_kind.as_str())) + .set_headers(get_headers(&http_config.client_config.headers)) + .build(url) + { + Ok(http_client) => http_client, + Err(_) => panic!("unable to create Http client."), + } +} + +async fn ws_client_from_url(url: &str, ws_config: &WsConfig) -> WsClient +where + WsClient: SubscriptionClientT, +{ + match WsClientBuilder::default() + .max_request_body_size(ws_config.client_config.max_request_body_size) + .request_timeout(ws_config.client_config.request_timeout.to_duration()) + .max_concurrent_requests(ws_config.client_config.max_concurrent_requests) + .certificate_store(get_certificate_store( + ws_config.client_config.certificate_store.as_str(), + )) + .id_format(get_id_kind(ws_config.client_config.id_kind.as_str())) + .set_headers(get_headers(&ws_config.client_config.headers)) + .max_notifs_per_subscription(ws_config.max_notifs_per_subscription) + .max_redirections(ws_config.max_redirections) + .build(url) + .await + { + Ok(ws_client) => ws_client, + Err(_) => panic!("unable to create WebSocket client"), + } +} + +fn get_certificate_store(certificate_store: &str) -> CertificateStore { + match certificate_store { + "Native" => CertificateStore::Native, + "WebPki" => CertificateStore::WebPki, + _ => CertificateStore::Native, + } +} + +fn get_id_kind(id_kind: &str) -> IdKind { + match id_kind { + "Number" => IdKind::Number, + "String" => IdKind::String, + _ => IdKind::Number, + } +} + +fn get_headers(headers: &[(String, String)]) -> HeaderMap { + let mut headers_map = HeaderMap::new(); + headers.iter().for_each(|(key, value)| { + let header_name = match HeaderName::from_str(key.as_str()) { + Ok(header_name) => header_name, + Err(_) => panic!("invalid header name: {:?}", key), + }; + let header_value = match HeaderValue::from_str(value.as_str()) { + Ok(header_name) => header_name, + Err(_) => panic!("invalid header value: {:?}", value), + }; + headers_map.insert(header_name, header_value); + }); + + headers_map +} diff --git a/massa-storage/src/block_indexes.rs b/massa-storage/src/block_indexes.rs index 251cb23fd48..97a6b163c95 100644 --- a/massa-storage/src/block_indexes.rs +++ b/massa-storage/src/block_indexes.rs @@ -2,7 +2,8 @@ use std::{collections::hash_map, collections::HashMap}; use massa_models::{ address::Address, - block::{BlockId, WrappedBlock}, + block::SecureShareBlock, + block_id::BlockId, endorsement::EndorsementId, operation::OperationId, prehash::{PreHashMap, PreHashSet}, @@ -14,7 +15,7 @@ use massa_models::{ #[derive(Default)] pub struct BlockIndexes { /// Blocks structure container - blocks: PreHashMap, + blocks: PreHashMap, /// Structure mapping creators with the created blocks index_by_creator: PreHashMap>, /// Structure mapping slot with their block id @@ -29,11 +30,11 @@ impl BlockIndexes { /// Insert a block and populate the indexes. /// Arguments: /// - block: the block to insert - pub(crate) fn insert(&mut self, block: WrappedBlock) { + pub(crate) fn insert(&mut self, block: SecureShareBlock) { if let Ok(b) = self.blocks.try_insert(block.id, block) { // update creator index self.index_by_creator - .entry(b.creator_address) + .entry(b.content_creator_address) .or_default() .insert(b.id); @@ -61,11 +62,11 @@ impl BlockIndexes { /// Remove a block, remove from the indexes and do some clean-up in indexes if necessary. /// Arguments: /// * `block_id`: the block id to remove - pub(crate) fn remove(&mut self, block_id: &BlockId) -> Option { + pub(crate) fn remove(&mut self, block_id: &BlockId) -> Option { if let Some(b) = self.blocks.remove(block_id) { // update creator index if let hash_map::Entry::Occupied(mut occ) = - self.index_by_creator.entry(b.creator_address) + self.index_by_creator.entry(b.content_creator_address) { occ.get_mut().remove(&b.id); if occ.get().is_empty() { @@ -113,7 +114,7 @@ impl BlockIndexes { /// /// Returns: /// - a reference to the block, or None if not found - pub fn get(&self, id: &BlockId) -> Option<&WrappedBlock> { + pub fn get(&self, id: &BlockId) -> Option<&SecureShareBlock> { self.blocks.get(id) } diff --git a/massa-storage/src/endorsement_indexes.rs b/massa-storage/src/endorsement_indexes.rs index f130cf909d2..92d165dd881 100644 --- a/massa-storage/src/endorsement_indexes.rs +++ b/massa-storage/src/endorsement_indexes.rs @@ -2,7 +2,7 @@ use std::collections::hash_map; use massa_models::{ address::Address, - endorsement::{EndorsementId, WrappedEndorsement}, + endorsement::{EndorsementId, SecureShareEndorsement}, prehash::{PreHashMap, PreHashSet}, }; @@ -11,7 +11,7 @@ use massa_models::{ #[derive(Default)] pub struct EndorsementIndexes { /// Endorsements structure container - endorsements: PreHashMap, + endorsements: PreHashMap, /// Structure mapping creators with the created endorsements index_by_creator: PreHashMap>, } @@ -20,11 +20,11 @@ impl EndorsementIndexes { /// Insert an endorsement and populate the indexes. /// Arguments: /// - endorsement: the endorsement to insert - pub(crate) fn insert(&mut self, endorsement: WrappedEndorsement) { + pub(crate) fn insert(&mut self, endorsement: SecureShareEndorsement) { if let Ok(e) = self.endorsements.try_insert(endorsement.id, endorsement) { // update creator index self.index_by_creator - .entry(e.creator_address) + .entry(e.content_creator_address) .or_default() .insert(e.id); } @@ -33,11 +33,14 @@ impl EndorsementIndexes { /// Remove a endorsement, remove from the indexes and made some clean-up in indexes if necessary. /// Arguments: /// * `endorsement_id`: the endorsement id to remove - pub(crate) fn remove(&mut self, endorsement_id: &EndorsementId) -> Option { + pub(crate) fn remove( + &mut self, + endorsement_id: &EndorsementId, + ) -> Option { if let Some(e) = self.endorsements.remove(endorsement_id) { // update creator index if let hash_map::Entry::Occupied(mut occ) = - self.index_by_creator.entry(e.creator_address) + self.index_by_creator.entry(e.content_creator_address) { occ.get_mut().remove(&e.id); if occ.get().is_empty() { @@ -50,7 +53,7 @@ impl EndorsementIndexes { } /// Gets a reference to a stored endorsement, if any. - pub fn get(&self, id: &EndorsementId) -> Option<&WrappedEndorsement> { + pub fn get(&self, id: &EndorsementId) -> Option<&SecureShareEndorsement> { self.endorsements.get(id) } diff --git a/massa-storage/src/lib.rs b/massa-storage/src/lib.rs index a52a2cf6b50..14c2b786be7 100644 --- a/massa-storage/src/lib.rs +++ b/massa-storage/src/lib.rs @@ -20,11 +20,12 @@ mod tests; use block_indexes::BlockIndexes; use endorsement_indexes::EndorsementIndexes; use massa_models::prehash::{CapacityAllocator, PreHashMap, PreHashSet, PreHashed}; -use massa_models::wrapped::Id; +use massa_models::secure_share::Id; use massa_models::{ - block::{BlockId, WrappedBlock}, - endorsement::{EndorsementId, WrappedEndorsement}, - operation::{OperationId, WrappedOperation}, + block::SecureShareBlock, + block_id::BlockId, + endorsement::{EndorsementId, SecureShareEndorsement}, + operation::{OperationId, SecureShareOperation}, }; use operation_indexes::OperationIndexes; use parking_lot::{RwLock, RwLockReadGuard, RwLockWriteGuard}; @@ -278,7 +279,7 @@ impl Storage { /// Store a block /// Note that this also claims a local reference to the block - pub fn store_block(&mut self, block: WrappedBlock) { + pub fn store_block(&mut self, block: SecureShareBlock) { let id = block.id; let mut owners = self.block_owners.write(); let mut blocks = self.blocks.write(); @@ -362,7 +363,7 @@ impl Storage { /// Store operations /// Claims a local reference to the added operation - pub fn store_operations(&mut self, operations: Vec) { + pub fn store_operations(&mut self, operations: Vec) { if operations.is_empty() { return; } @@ -460,7 +461,7 @@ impl Storage { /// Store endorsements /// Claims local references to the added endorsements - pub fn store_endorsements(&mut self, endorsements: Vec) { + pub fn store_endorsements(&mut self, endorsements: Vec) { if endorsements.is_empty() { return; } diff --git a/massa-storage/src/operation_indexes.rs b/massa-storage/src/operation_indexes.rs index 3fa4f1e3d02..30c42211840 100644 --- a/massa-storage/src/operation_indexes.rs +++ b/massa-storage/src/operation_indexes.rs @@ -2,7 +2,7 @@ use std::collections::hash_map; use massa_models::{ address::Address, - operation::{OperationId, OperationPrefixId, WrappedOperation}, + operation::{OperationId, OperationPrefixId, SecureShareOperation}, prehash::{PreHashMap, PreHashSet}, }; @@ -11,7 +11,7 @@ use massa_models::{ #[derive(Default)] pub struct OperationIndexes { /// Operations structure container - operations: PreHashMap, + operations: PreHashMap, /// Structure mapping creators with the created operations index_by_creator: PreHashMap>, /// Structure indexing operations by ID prefix @@ -22,11 +22,11 @@ impl OperationIndexes { /// Insert an operation and populate the indexes. /// Arguments: /// * `operation`: the operation to insert - pub(crate) fn insert(&mut self, operation: WrappedOperation) { + pub(crate) fn insert(&mut self, operation: SecureShareOperation) { if let Ok(o) = self.operations.try_insert(operation.id, operation) { // update creator index self.index_by_creator - .entry(o.creator_address) + .entry(o.content_creator_address) .or_default() .insert(o.id); // update prefix index @@ -40,11 +40,11 @@ impl OperationIndexes { /// Remove a operation, remove from the indexes and made some clean-up in indexes if necessary. /// Arguments: /// * `operation_id`: the operation id to remove - pub(crate) fn remove(&mut self, operation_id: &OperationId) -> Option { + pub(crate) fn remove(&mut self, operation_id: &OperationId) -> Option { if let Some(o) = self.operations.remove(operation_id) { // update creator index if let hash_map::Entry::Occupied(mut occ) = - self.index_by_creator.entry(o.creator_address) + self.index_by_creator.entry(o.content_creator_address) { occ.get_mut().remove(&o.id); if occ.get().is_empty() { @@ -64,7 +64,7 @@ impl OperationIndexes { } /// Gets a reference to a stored operation, if any. - pub fn get(&self, id: &OperationId) -> Option<&WrappedOperation> { + pub fn get(&self, id: &OperationId) -> Option<&SecureShareOperation> { self.operations.get(id) } diff --git a/massa-wallet/src/lib.rs b/massa-wallet/src/lib.rs index e27a7a2b5a0..5a84c05c8fe 100644 --- a/massa-wallet/src/lib.rs +++ b/massa-wallet/src/lib.rs @@ -11,9 +11,9 @@ use massa_cipher::{decrypt, encrypt}; use massa_hash::Hash; use massa_models::address::Address; use massa_models::composite::PubkeySig; -use massa_models::operation::{Operation, OperationSerializer, WrappedOperation}; +use massa_models::operation::{Operation, OperationSerializer, SecureShareOperation}; use massa_models::prehash::{PreHashMap, PreHashSet}; -use massa_models::wrapped::WrappedContent; +use massa_models::secure_share::SecureShareContent; use massa_signature::{KeyPair, PublicKey}; use serde::{Deserialize, Serialize}; use std::path::PathBuf; @@ -142,11 +142,11 @@ impl Wallet { &self, content: Operation, address: Address, - ) -> Result { + ) -> Result { let sender_keypair = self .find_associated_keypair(&address) .ok_or_else(|| WalletError::MissingKeyError(address))?; - Ok(Operation::new_wrapped(content, OperationSerializer::new(), sender_keypair).unwrap()) + Ok(Operation::new_verifiable(content, OperationSerializer::new(), sender_keypair).unwrap()) } } diff --git a/rust-toolchain.toml b/rust-toolchain.toml index e1431a3b8ba..eb673314741 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,2 +1,2 @@ [toolchain] -channel = "nightly-2022-12-24" \ No newline at end of file +channel = "nightly-2023-01-30" \ No newline at end of file diff --git a/tools/setup_test.rs b/tools/setup_test.rs index 790a6e86e65..4401cc9e06e 100644 --- a/tools/setup_test.rs +++ b/tools/setup_test.rs @@ -26,7 +26,7 @@ use glob::glob; use tar::Archive; // git tag -const TAG: &str = "TEST.18.2"; +const TAG: &str = "TEST.19.1"; // Maximum archive file size to download in bytes (here: 1Mb) // const ARCHIVE_MAX_SIZE: u64 = 2; // Maximum archive file size to download in bytes (DEBUG)