diff --git a/.cargo/config.toml b/.cargo/config.toml
index 4796a2c26965c..042dded2fa9b3 100644
--- a/.cargo/config.toml
+++ b/.cargo/config.toml
@@ -1,4 +1,9 @@
-#
+[build]
+rustdocflags = [
+  "-Dwarnings",
+  "-Arustdoc::redundant_explicit_links", # stylistic
+]
+
 # An auto defined `clippy` feature was introduced,
 # but it was found to clash with user defined features,
 # so was renamed to `cargo-clippy`.
@@ -30,4 +35,5 @@ rustflags = [
   "-Aclippy::derivable_impls",           # false positives
   "-Aclippy::stable_sort_primitive",     # prefer stable sort
   "-Aclippy::extra-unused-type-parameters", # stylistic
+  "-Aclippy::default_constructed_unit_structs", # stylistic
 ]
diff --git a/.github/review-bot.yml b/.github/review-bot.yml
index c9eadd6e58ba1..581e33762608a 100644
--- a/.github/review-bot.yml
+++ b/.github/review-bot.yml
@@ -10,7 +10,7 @@ rules:
         - ^\.cargo/.*
       exclude: 
         - ^./gitlab/pipeline/zombienet.*
-    min_approvals: 2
+    minApprovals: 2
     type: basic
     teams:
       - ci
@@ -27,7 +27,7 @@ rules:
       exclude: 
         - ^polkadot/runtime\/(kusama|polkadot)\/src\/weights\/.+\.rs$
         - ^substrate\/frame\/.+\.md$
-    min_approvals: 1
+    minApprovals: 1
     allowedToSkipRule:
       teams:
         - core-devs
@@ -54,7 +54,7 @@ rules:
         - ^\.gitlab/.*
         - ^\.config/nextest.toml
         - ^\.cargo/.*
-    min_approvals: 2
+    minApprovals: 2
     type: basic
     teams:
       - core-devs
@@ -70,10 +70,10 @@ rules:
         - ^cumulus/parachains/common/src/[^/]+\.rs$
     type: and-distinct
     reviewers:
-      - min_approvals: 1
+      - minApprovals: 1
         teams:
           - locks-review
-      - min_approvals: 1
+      - minApprovals: 1
         teams:
           - polkadot-review
 
@@ -83,7 +83,7 @@ rules:
     condition: 
       include:
         - ^bridges/.*
-    min_approvals: 1
+    minApprovals: 1
     teams:
       - bridges-core
 
@@ -95,10 +95,10 @@ rules:
         - ^substrate/frame/(?!.*(nfts/.*|uniques/.*|babe/.*|grandpa/.*|beefy|merkle-mountain-range/.*|contracts/.*|election|nomination-pools/.*|staking/.*|aura/.*))
     type: "and"
     reviewers:
-      - min_approvals: 2
+      - minApprovals: 2
         teams:
           - core-devs
-      - min_approvals: 1
+      - minApprovals: 1
         teams:
           - frame-coders
 
@@ -107,15 +107,14 @@ rules:
     condition:
       include: 
         - review-bot\.yml
-    min_approvals: 2
     type: "and"
     reviewers:
-      - min_approvals: 1
+      - minApprovals: 1
         teams:
           - opstooling
-      - min_approvals: 1
+      - minApprovals: 1
         teams:
           - locks-review
-      - min_approvals: 1
+      - minApprovals: 1
         teams:
           - ci
diff --git a/.github/workflows/review-bot.yml b/.github/workflows/review-bot.yml
index aeb33b5da3d3b..b9799935abe67 100644
--- a/.github/workflows/review-bot.yml
+++ b/.github/workflows/review-bot.yml
@@ -24,7 +24,7 @@ jobs:
           app_id: ${{ secrets.REVIEW_APP_ID }}
           private_key: ${{ secrets.REVIEW_APP_KEY }}
       - name: "Evaluates PR reviews and assigns reviewers"
-        uses: paritytech/review-bot@v1.1.0
+        uses: paritytech/review-bot@v2.0.1
         with:
           repo-token: ${{ secrets.GITHUB_TOKEN }}
           team-token: ${{ steps.team_token.outputs.token }}
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index ee6b9c9873339..61451a9c46201 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -30,7 +30,7 @@ variables:
   RUSTY_CACHIER_COMPRESSION_METHOD: zstd
   NEXTEST_FAILURE_OUTPUT: immediate-final
   NEXTEST_SUCCESS_OUTPUT: final
-  ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.68"
+  ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.69"
   DOCKER_IMAGES_VERSION: "${CI_COMMIT_REF_NAME}-${CI_COMMIT_SHORT_SHA}"
 
 default:
diff --git a/.gitlab/pipeline/build.yml b/.gitlab/pipeline/build.yml
index 029c0f6a3cddd..fefa3739a9ff4 100644
--- a/.gitlab/pipeline/build.yml
+++ b/.gitlab/pipeline/build.yml
@@ -91,6 +91,7 @@ build-rustdoc:
     - .run-immediately
   variables:
     SKIP_WASM_BUILD: 1
+    RUSTDOCFLAGS: ""
   artifacts:
     name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}-doc"
     when: on_success
@@ -99,7 +100,6 @@ build-rustdoc:
       - ./crate-docs/
   script:
     # FIXME: it fails with `RUSTDOCFLAGS="-Dwarnings"` and `--all-features`
-    # FIXME: return to stable when https://github.com/rust-lang/rust/issues/96937 gets into stable
     - time cargo doc --features try-runtime,experimental --workspace --no-deps
     - rm -f ./target/doc/.lock
     - mv ./target/doc ./crate-docs
diff --git a/.gitlab/pipeline/test.yml b/.gitlab/pipeline/test.yml
index e1e8b96bca5d6..12ce2140b1468 100644
--- a/.gitlab/pipeline/test.yml
+++ b/.gitlab/pipeline/test.yml
@@ -181,7 +181,6 @@ test-rustdoc:
     - .run-immediately
   variables:
     SKIP_WASM_BUILD: 1
-    RUSTDOCFLAGS: "-Dwarnings"
   script:
     - time cargo doc --workspace --all-features --no-deps
   allow_failure: true
diff --git a/Cargo.lock b/Cargo.lock
index e0ca0b012c640..58bacc9db7399 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -116,7 +116,7 @@ dependencies = [
  "cipher 0.3.0",
  "ctr 0.8.0",
  "ghash 0.4.4",
- "subtle",
+ "subtle 2.4.1",
 ]
 
 [[package]]
@@ -130,7 +130,7 @@ dependencies = [
  "cipher 0.4.4",
  "ctr 0.9.2",
  "ghash 0.5.0",
- "subtle",
+ "subtle 2.4.1",
 ]
 
 [[package]]
@@ -229,9 +229,9 @@ dependencies = [
 
 [[package]]
 name = "anstream"
-version = "0.5.0"
+version = "0.6.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b1f58811cfac344940f1a400b6e6231ce35171f614f26439e80f8c1465c5cc0c"
+checksum = "2ab91ebe16eb252986481c5b62f6098f3b698a45e34b5b98200cf20dd2484a44"
 dependencies = [
  "anstyle",
  "anstyle-parse",
@@ -267,9 +267,9 @@ dependencies = [
 
 [[package]]
 name = "anstyle-wincon"
-version = "2.1.0"
+version = "3.0.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "58f54d10c6dfa51283a066ceab3ec1ab78d13fae00aa49243a45e4571fb79dfd"
+checksum = "f0699d10d2f4d628a98ee7b57b289abbc98ff3bad977cb3152709d4bf2330628"
 dependencies = [
  "anstyle",
  "windows-sys 0.48.0",
@@ -571,6 +571,12 @@ dependencies = [
  "sha3",
 ]
 
+[[package]]
+name = "array-bytes"
+version = "4.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f52f63c5c1316a16a4b35eaac8b76a98248961a533f061684cb2a7cb0eafb6c6"
+
 [[package]]
 name = "array-bytes"
 version = "6.1.0"
@@ -840,20 +846,27 @@ version = "1.0.0"
 dependencies = [
  "assert_matches",
  "asset-hub-westend-runtime",
+ "cumulus-pallet-dmp-queue",
+ "cumulus-pallet-parachain-system",
  "frame-support",
  "frame-system",
  "integration-tests-common",
  "pallet-asset-conversion",
+ "pallet-asset-rate",
  "pallet-assets",
  "pallet-balances",
+ "pallet-treasury",
  "pallet-xcm",
  "parachains-common",
  "parity-scale-codec",
  "polkadot-core-primitives",
  "polkadot-parachain-primitives",
+ "polkadot-runtime-common",
  "polkadot-runtime-parachains",
  "sp-runtime",
  "staging-xcm",
+ "staging-xcm-builder",
+ "staging-xcm-executor",
  "xcm-emulator",
 ]
 
@@ -1083,17 +1096,6 @@ dependencies = [
  "windows-sys 0.48.0",
 ]
 
-[[package]]
-name = "async-recursion"
-version = "1.0.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0e97ce7de6cf12de5d7226c73f5ba9811622f4db3a5b91b55c53e987e5f91cba"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn 2.0.37",
-]
-
 [[package]]
 name = "async-stream"
 version = "0.3.5"
@@ -1113,7 +1115,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.37",
+ "syn 2.0.38",
 ]
 
 [[package]]
@@ -1130,7 +1132,7 @@ checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.37",
+ "syn 2.0.38",
 ]
 
 [[package]]
@@ -1269,7 +1271,7 @@ dependencies = [
 name = "binary-merkle-tree"
 version = "4.0.0-dev"
 dependencies = [
- "array-bytes",
+ "array-bytes 6.1.0",
  "env_logger 0.9.3",
  "hash-db",
  "log",
@@ -1304,7 +1306,7 @@ dependencies = [
  "regex",
  "rustc-hash",
  "shlex",
- "syn 2.0.37",
+ "syn 2.0.38",
 ]
 
 [[package]]
@@ -1346,6 +1348,18 @@ dependencies = [
  "wyz",
 ]
 
+[[package]]
+name = "blake2"
+version = "0.8.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "94cb07b0da6a73955f8fb85d24c466778e70cda767a568229b104f0264089330"
+dependencies = [
+ "byte-tools",
+ "crypto-mac 0.7.0",
+ "digest 0.8.1",
+ "opaque-debug 0.2.3",
+]
+
 [[package]]
 name = "blake2"
 version = "0.10.6"
@@ -1654,6 +1668,23 @@ dependencies = [
  "sp-std",
 ]
 
+[[package]]
+name = "bp-polkadot-bulletin"
+version = "0.1.0"
+dependencies = [
+ "bp-header-chain",
+ "bp-messages",
+ "bp-polkadot-core",
+ "bp-runtime",
+ "frame-support",
+ "frame-system",
+ "parity-scale-codec",
+ "scale-info",
+ "sp-api",
+ "sp-runtime",
+ "sp-std",
+]
+
 [[package]]
 name = "bp-polkadot-core"
 version = "0.1.0"
@@ -2156,6 +2187,16 @@ dependencies = [
  "pkg-config",
 ]
 
+[[package]]
+name = "c2-chacha"
+version = "0.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d27dae93fe7b1e0424dc57179ac396908c26b035a87234809f5c4dfd1b47dc80"
+dependencies = [
+ "cipher 0.2.5",
+ "ppv-lite86",
+]
+
 [[package]]
 name = "camino"
 version = "1.1.6"
@@ -2212,7 +2253,7 @@ checksum = "5aca1a8fbc20b50ac9673ff014abfb2b5f4085ee1a850d408f14a159c5853ac7"
 dependencies = [
  "aead 0.3.2",
  "cipher 0.2.5",
- "subtle",
+ "subtle 2.4.1",
 ]
 
 [[package]]
@@ -2245,6 +2286,16 @@ version = "0.1.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e"
 
+[[package]]
+name = "chacha"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ddf3c081b5fba1e5615640aae998e0fbd10c24cbd897ee39ed754a77601a4862"
+dependencies = [
+ "byteorder",
+ "keystream",
+]
+
 [[package]]
 name = "chacha20"
 version = "0.8.2"
@@ -2286,7 +2337,7 @@ name = "chain-spec-builder"
 version = "2.0.0"
 dependencies = [
  "ansi_term",
- "clap 4.4.4",
+ "clap 4.4.6",
  "node-cli",
  "rand 0.8.5",
  "sc-chain-spec",
@@ -2416,9 +2467,9 @@ dependencies = [
 
 [[package]]
 name = "clap"
-version = "4.4.4"
+version = "4.4.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b1d7b8d5ec32af0fadc644bf1fd509a688c2103b185644bb1e29d164e0703136"
+checksum = "d04704f56c2cde07f43e8e2c154b43f216dc5c92fc98ada720177362f953b956"
 dependencies = [
  "clap_builder",
  "clap_derive 4.4.2",
@@ -2426,9 +2477,9 @@ dependencies = [
 
 [[package]]
 name = "clap_builder"
-version = "4.4.4"
+version = "4.4.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5179bb514e4d7c2051749d8fcefa2ed6d06a9f4e6d69faf3805f5d80b8cf8d56"
+checksum = "0e231faeaca65ebd1ea3c737966bf858971cd38c3849107aa3ea7de90a804e45"
 dependencies = [
  "anstream",
  "anstyle",
@@ -2442,7 +2493,7 @@ version = "4.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "586a385f7ef2f8b4d86bddaa0c094794e7ccbfe5ffef1f434fe928143fc783a5"
 dependencies = [
- "clap 4.4.4",
+ "clap 4.4.6",
 ]
 
 [[package]]
@@ -2467,7 +2518,7 @@ dependencies = [
  "heck",
  "proc-macro2",
  "quote",
- "syn 2.0.37",
+ "syn 2.0.38",
 ]
 
 [[package]]
@@ -3014,7 +3065,7 @@ dependencies = [
  "anes",
  "cast",
  "ciborium",
- "clap 4.4.4",
+ "clap 4.4.6",
  "criterion-plot",
  "futures",
  "is-terminal",
@@ -3110,7 +3161,7 @@ checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef"
 dependencies = [
  "generic-array 0.14.7",
  "rand_core 0.6.4",
- "subtle",
+ "subtle 2.4.1",
  "zeroize",
 ]
 
@@ -3122,7 +3173,7 @@ checksum = "cf4c2f4e1afd912bc40bfd6fed5d9dc1f288e0ba01bfcc835cc5bc3eb13efe15"
 dependencies = [
  "generic-array 0.14.7",
  "rand_core 0.6.4",
- "subtle",
+ "subtle 2.4.1",
  "zeroize",
 ]
 
@@ -3137,6 +3188,16 @@ dependencies = [
  "typenum",
 ]
 
+[[package]]
+name = "crypto-mac"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4434400df11d95d556bac068ddfedd482915eb18fe8bea89bc80b6e4b1c179e5"
+dependencies = [
+ "generic-array 0.12.4",
+ "subtle 1.0.0",
+]
+
 [[package]]
 name = "crypto-mac"
 version = "0.8.0"
@@ -3144,7 +3205,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab"
 dependencies = [
  "generic-array 0.14.7",
- "subtle",
+ "subtle 2.4.1",
 ]
 
 [[package]]
@@ -3154,7 +3215,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714"
 dependencies = [
  "generic-array 0.14.7",
- "subtle",
+ "subtle 2.4.1",
 ]
 
 [[package]]
@@ -3179,7 +3240,7 @@ dependencies = [
 name = "cumulus-client-cli"
 version = "0.1.0"
 dependencies = [
- "clap 4.4.4",
+ "clap 4.4.6",
  "parity-scale-codec",
  "sc-chain-spec",
  "sc-cli",
@@ -3503,7 +3564,7 @@ dependencies = [
  "proc-macro-crate",
  "proc-macro2",
  "quote",
- "syn 2.0.37",
+ "syn 2.0.38",
 ]
 
 [[package]]
@@ -3717,7 +3778,7 @@ dependencies = [
 name = "cumulus-relay-chain-minimal-node"
 version = "0.1.0"
 dependencies = [
- "array-bytes",
+ "array-bytes 6.1.0",
  "async-trait",
  "cumulus-primitives-core",
  "cumulus-relay-chain-interface",
@@ -3864,7 +3925,7 @@ name = "cumulus-test-service"
 version = "0.1.0"
 dependencies = [
  "async-trait",
- "clap 4.4.4",
+ "clap 4.4.6",
  "criterion 0.5.1",
  "cumulus-client-cli",
  "cumulus-client-consensus-common",
@@ -3944,7 +4005,7 @@ dependencies = [
  "byteorder",
  "digest 0.8.1",
  "rand_core 0.5.1",
- "subtle",
+ "subtle 2.4.1",
  "zeroize",
 ]
 
@@ -3957,7 +4018,7 @@ dependencies = [
  "byteorder",
  "digest 0.9.0",
  "rand_core 0.5.1",
- "subtle",
+ "subtle 2.4.1",
  "zeroize",
 ]
 
@@ -3974,7 +4035,7 @@ dependencies = [
  "fiat-crypto",
  "platforms",
  "rustc_version 0.4.0",
- "subtle",
+ "subtle 2.4.1",
  "zeroize",
 ]
 
@@ -3986,7 +4047,7 @@ checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.37",
+ "syn 2.0.38",
 ]
 
 [[package]]
@@ -4026,7 +4087,7 @@ dependencies = [
  "proc-macro2",
  "quote",
  "scratch",
- "syn 2.0.37",
+ "syn 2.0.38",
 ]
 
 [[package]]
@@ -4043,7 +4104,7 @@ checksum = "50c49547d73ba8dcfd4ad7325d64c6d5391ff4224d498fc39a6f3f49825a530d"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.37",
+ "syn 2.0.38",
 ]
 
 [[package]]
@@ -4289,7 +4350,7 @@ dependencies = [
  "block-buffer 0.10.4",
  "const-oid",
  "crypto-common",
- "subtle",
+ "subtle 2.4.1",
 ]
 
 [[package]]
@@ -4342,7 +4403,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.37",
+ "syn 2.0.38",
 ]
 
 [[package]]
@@ -4404,7 +4465,7 @@ dependencies = [
  "proc-macro2",
  "quote",
  "regex",
- "syn 2.0.37",
+ "syn 2.0.38",
  "termcolor",
  "toml 0.7.6",
  "walkdir",
@@ -4558,7 +4619,7 @@ dependencies = [
  "pkcs8 0.9.0",
  "rand_core 0.6.4",
  "sec1 0.3.0",
- "subtle",
+ "subtle 2.4.1",
  "zeroize",
 ]
 
@@ -4577,7 +4638,7 @@ dependencies = [
  "pkcs8 0.10.2",
  "rand_core 0.6.4",
  "sec1 0.7.3",
- "subtle",
+ "subtle 2.4.1",
  "zeroize",
 ]
 
@@ -4625,7 +4686,7 @@ checksum = "5e9a1f9f7d83e59740248a6e14ecf93929ade55027844dfcea78beafccc15745"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.37",
+ "syn 2.0.38",
 ]
 
 [[package]]
@@ -4636,7 +4697,7 @@ checksum = "c2ad8cef1d801a4686bfd8919f0b30eac4c8e48968c437a6405ded4fb5272d2b"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.37",
+ "syn 2.0.38",
 ]
 
 [[package]]
@@ -4777,11 +4838,11 @@ version = "2.0.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "5f86a749cf851891866c10515ef6c299b5c69661465e9c3bbe7e07a2b77fb0f7"
 dependencies = [
- "blake2",
+ "blake2 0.10.6",
  "fs-err",
  "proc-macro2",
  "quote",
- "syn 2.0.37",
+ "syn 2.0.38",
 ]
 
 [[package]]
@@ -4878,7 +4939,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160"
 dependencies = [
  "rand_core 0.6.4",
- "subtle",
+ "subtle 2.4.1",
 ]
 
 [[package]]
@@ -4888,7 +4949,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449"
 dependencies = [
  "rand_core 0.6.4",
- "subtle",
+ "subtle 2.4.1",
 ]
 
 [[package]]
@@ -5041,7 +5102,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa"
 name = "frame-benchmarking"
 version = "4.0.0-dev"
 dependencies = [
- "array-bytes",
+ "array-bytes 6.1.0",
  "frame-support",
  "frame-support-procedural",
  "frame-system",
@@ -5069,9 +5130,9 @@ name = "frame-benchmarking-cli"
 version = "4.0.0-dev"
 dependencies = [
  "Inflector",
- "array-bytes",
+ "array-bytes 6.1.0",
  "chrono",
- "clap 4.4.4",
+ "clap 4.4.6",
  "comfy-table",
  "frame-benchmarking",
  "frame-support",
@@ -5137,7 +5198,7 @@ dependencies = [
  "quote",
  "scale-info",
  "sp-arithmetic",
- "syn 2.0.37",
+ "syn 2.0.38",
  "trybuild",
 ]
 
@@ -5163,7 +5224,7 @@ dependencies = [
 name = "frame-election-solution-type-fuzzer"
 version = "2.0.0-alpha.5"
 dependencies = [
- "clap 4.4.4",
+ "clap 4.4.6",
  "frame-election-provider-solution-type",
  "frame-election-provider-support",
  "frame-support",
@@ -5180,7 +5241,7 @@ dependencies = [
 name = "frame-executive"
 version = "4.0.0-dev"
 dependencies = [
- "array-bytes",
+ "array-bytes 6.1.0",
  "frame-support",
  "frame-system",
  "frame-try-runtime",
@@ -5214,7 +5275,6 @@ dependencies = [
 name = "frame-remote-externalities"
 version = "0.10.0-dev"
 dependencies = [
- "async-recursion",
  "futures",
  "indicatif",
  "jsonrpsee",
@@ -5237,7 +5297,7 @@ name = "frame-support"
 version = "4.0.0-dev"
 dependencies = [
  "aquamarine",
- "array-bytes",
+ "array-bytes 6.1.0",
  "assert_matches",
  "bitflags 1.3.2",
  "docify",
@@ -5289,7 +5349,8 @@ dependencies = [
  "proc-macro-warning",
  "proc-macro2",
  "quote",
- "syn 2.0.37",
+ "sp-core-hashing",
+ "syn 2.0.38",
 ]
 
 [[package]]
@@ -5300,7 +5361,7 @@ dependencies = [
  "proc-macro-crate",
  "proc-macro2",
  "quote",
- "syn 2.0.37",
+ "syn 2.0.38",
 ]
 
 [[package]]
@@ -5309,7 +5370,7 @@ version = "3.0.0"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.37",
+ "syn 2.0.38",
 ]
 
 [[package]]
@@ -5532,7 +5593,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.37",
+ "syn 2.0.38",
 ]
 
 [[package]]
@@ -5721,6 +5782,7 @@ dependencies = [
  "cumulus-pallet-aura-ext",
  "cumulus-pallet-parachain-system",
  "cumulus-pallet-xcm",
+ "cumulus-primitives-aura",
  "cumulus-primitives-core",
  "cumulus-primitives-timestamp",
  "frame-benchmarking",
@@ -5765,7 +5827,7 @@ checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7"
 dependencies = [
  "ff 0.12.1",
  "rand_core 0.6.4",
- "subtle",
+ "subtle 2.4.1",
 ]
 
 [[package]]
@@ -5776,7 +5838,7 @@ checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63"
 dependencies = [
  "ff 0.13.0",
  "rand_core 0.6.4",
- "subtle",
+ "subtle 2.4.1",
 ]
 
 [[package]]
@@ -5862,6 +5924,15 @@ dependencies = [
  "serde",
 ]
 
+[[package]]
+name = "hashlink"
+version = "0.8.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7"
+dependencies = [
+ "hashbrown 0.14.0",
+]
+
 [[package]]
 name = "heck"
 version = "0.4.1"
@@ -6637,6 +6708,12 @@ dependencies = [
  "tiny-keccak",
 ]
 
+[[package]]
+name = "keystream"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c33070833c9ee02266356de0c43f723152bd38bd96ddf52c82b3af10c9138b28"
+
 [[package]]
 name = "kitchensink-runtime"
 version = "3.0.0-dev"
@@ -6685,6 +6762,7 @@ dependencies = [
  "pallet-lottery",
  "pallet-membership",
  "pallet-message-queue",
+ "pallet-mixnet",
  "pallet-mmr",
  "pallet-multisig",
  "pallet-nft-fractionalization",
@@ -6738,6 +6816,7 @@ dependencies = [
  "sp-genesis-builder",
  "sp-inherents",
  "sp-io",
+ "sp-mixnet",
  "sp-offchain",
  "sp-runtime",
  "sp-session",
@@ -6795,9 +6874,9 @@ dependencies = [
 
 [[package]]
 name = "landlock"
-version = "0.2.0"
+version = "0.3.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "520baa32708c4e957d2fc3a186bc5bd8d26637c33137f399ddfc202adb240068"
+checksum = "1530c5b973eeed4ac216af7e24baf5737645a6272e361f1fb95710678b67d9cc"
 dependencies = [
  "enumflags2",
  "libc",
@@ -7340,7 +7419,7 @@ checksum = "5be9b9bb642d8522a44d533eab56c16c738301965504753b03ad1de3425d5451"
 dependencies = [
  "crunchy",
  "digest 0.9.0",
- "subtle",
+ "subtle 2.4.1",
 ]
 
 [[package]]
@@ -7423,6 +7502,18 @@ version = "0.4.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "57bcfdad1b858c2db7c38303a6d2ad4dfaf5eb53dfeb0910128b2c26d6158503"
 
+[[package]]
+name = "lioness"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4ae926706ba42c425c9457121178330d75e273df2e82e28b758faf3de3a9acb9"
+dependencies = [
+ "arrayref",
+ "blake2 0.8.1",
+ "chacha",
+ "keystream",
+]
+
 [[package]]
 name = "lite-json"
 version = "0.2.0"
@@ -7525,50 +7616,50 @@ dependencies = [
 
 [[package]]
 name = "macro_magic"
-version = "0.4.2"
+version = "0.5.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "aee866bfee30d2d7e83835a4574aad5b45adba4cc807f2a3bbba974e5d4383c9"
+checksum = "e03844fc635e92f3a0067e25fa4bf3e3dbf3f2927bf3aa01bb7bc8f1c428949d"
 dependencies = [
  "macro_magic_core",
  "macro_magic_macros",
  "quote",
- "syn 2.0.37",
+ "syn 2.0.38",
 ]
 
 [[package]]
 name = "macro_magic_core"
-version = "0.4.2"
+version = "0.5.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7e766a20fd9c72bab3e1e64ed63f36bd08410e75803813df210d1ce297d7ad00"
+checksum = "468155613a44cfd825f1fb0ffa532b018253920d404e6fca1e8d43155198a46d"
 dependencies = [
  "const-random",
  "derive-syn-parse",
  "macro_magic_core_macros",
  "proc-macro2",
  "quote",
- "syn 2.0.37",
+ "syn 2.0.38",
 ]
 
 [[package]]
 name = "macro_magic_core_macros"
-version = "0.4.2"
+version = "0.5.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c12469fc165526520dff2807c2975310ab47cf7190a45b99b49a7dc8befab17b"
+checksum = "9ea73aa640dc01d62a590d48c0c3521ed739d53b27f919b25c3551e233481654"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.37",
+ "syn 2.0.38",
 ]
 
 [[package]]
 name = "macro_magic_macros"
-version = "0.4.2"
+version = "0.5.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b8fb85ec1620619edf2984a7693497d4ec88a9665d8b87e942856884c92dbf2a"
+checksum = "ef9d79ae96aaba821963320eb2b6e34d17df1e5a83d8a1985c29cc5be59577b3"
 dependencies = [
  "macro_magic_core",
  "quote",
- "syn 2.0.37",
+ "syn 2.0.38",
 ]
 
 [[package]]
@@ -7753,6 +7844,31 @@ dependencies = [
  "windows-sys 0.48.0",
 ]
 
+[[package]]
+name = "mixnet"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "daa3eb39495d8e2e2947a1d862852c90cc6a4a8845f8b41c8829cb9fcc047f4a"
+dependencies = [
+ "arrayref",
+ "arrayvec 0.7.4",
+ "bitflags 1.3.2",
+ "blake2 0.10.6",
+ "c2-chacha",
+ "curve25519-dalek 4.0.0",
+ "either",
+ "hashlink",
+ "lioness",
+ "log",
+ "parking_lot 0.12.1",
+ "rand 0.8.5",
+ "rand_chacha 0.3.1",
+ "rand_distr",
+ "subtle 2.4.1",
+ "thiserror",
+ "zeroize",
+]
+
 [[package]]
 name = "mmr-gadget"
 version = "4.0.0-dev"
@@ -8054,8 +8170,8 @@ checksum = "43794a0ace135be66a25d3ae77d41b91615fb68ae937f904090203e81f755b65"
 name = "node-bench"
 version = "0.9.0-dev"
 dependencies = [
- "array-bytes",
- "clap 4.4.4",
+ "array-bytes 6.1.0",
+ "clap 4.4.6",
  "derive_more",
  "fs_extra",
  "futures",
@@ -8090,9 +8206,9 @@ dependencies = [
 name = "node-cli"
 version = "3.0.0-dev"
 dependencies = [
- "array-bytes",
+ "array-bytes 6.1.0",
  "assert_cmd",
- "clap 4.4.4",
+ "clap 4.4.6",
  "clap_complete",
  "criterion 0.4.0",
  "frame-benchmarking-cli",
@@ -8131,6 +8247,7 @@ dependencies = [
  "sc-consensus-slots",
  "sc-executor",
  "sc-keystore",
+ "sc-mixnet",
  "sc-network",
  "sc-network-common",
  "sc-network-statement",
@@ -8160,6 +8277,7 @@ dependencies = [
  "sp-io",
  "sp-keyring",
  "sp-keystore",
+ "sp-mixnet",
  "sp-runtime",
  "sp-statement-store",
  "sp-timestamp",
@@ -8218,7 +8336,7 @@ dependencies = [
 name = "node-inspect"
 version = "0.9.0-dev"
 dependencies = [
- "clap 4.4.4",
+ "clap 4.4.6",
  "parity-scale-codec",
  "sc-cli",
  "sc-client-api",
@@ -8251,6 +8369,7 @@ dependencies = [
  "sc-consensus-babe-rpc",
  "sc-consensus-grandpa",
  "sc-consensus-grandpa-rpc",
+ "sc-mixnet",
  "sc-rpc",
  "sc-rpc-api",
  "sc-rpc-spec-v2",
@@ -8272,7 +8391,7 @@ dependencies = [
 name = "node-runtime-generate-bags"
 version = "3.0.0"
 dependencies = [
- "clap 4.4.4",
+ "clap 4.4.6",
  "generate-bags",
  "kitchensink-runtime",
 ]
@@ -8281,7 +8400,7 @@ dependencies = [
 name = "node-template"
 version = "4.0.0-dev"
 dependencies = [
- "clap 4.4.4",
+ "clap 4.4.6",
  "frame-benchmarking",
  "frame-benchmarking-cli",
  "frame-system",
@@ -8324,7 +8443,7 @@ dependencies = [
 name = "node-template-release"
 version = "3.0.0"
 dependencies = [
- "clap 4.4.4",
+ "clap 4.4.6",
  "flate2",
  "fs_extra",
  "glob",
@@ -8697,7 +8816,7 @@ dependencies = [
 name = "pallet-alliance"
 version = "4.0.0-dev"
 dependencies = [
- "array-bytes",
+ "array-bytes 6.1.0",
  "frame-benchmarking",
  "frame-support",
  "frame-system",
@@ -9000,7 +9119,7 @@ dependencies = [
 name = "pallet-beefy-mmr"
 version = "4.0.0-dev"
 dependencies = [
- "array-bytes",
+ "array-bytes 6.1.0",
  "binary-merkle-tree",
  "frame-support",
  "frame-system",
@@ -9223,7 +9342,7 @@ dependencies = [
 name = "pallet-contracts"
 version = "4.0.0-dev"
 dependencies = [
- "array-bytes",
+ "array-bytes 6.1.0",
  "assert_matches",
  "bitflags 1.3.2",
  "env_logger 0.9.3",
@@ -9276,7 +9395,7 @@ version = "4.0.0-dev"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.37",
+ "syn 2.0.38",
 ]
 
 [[package]]
@@ -9557,7 +9676,7 @@ dependencies = [
 name = "pallet-glutton"
 version = "4.0.0-dev"
 dependencies = [
- "blake2",
+ "blake2 0.10.6",
  "frame-benchmarking",
  "frame-support",
  "frame-system",
@@ -9725,11 +9844,30 @@ dependencies = [
  "sp-weights",
 ]
 
+[[package]]
+name = "pallet-mixnet"
+version = "0.1.0-dev"
+dependencies = [
+ "frame-benchmarking",
+ "frame-support",
+ "frame-system",
+ "log",
+ "parity-scale-codec",
+ "scale-info",
+ "serde",
+ "sp-application-crypto",
+ "sp-arithmetic",
+ "sp-io",
+ "sp-mixnet",
+ "sp-runtime",
+ "sp-std",
+]
+
 [[package]]
 name = "pallet-mmr"
 version = "4.0.0-dev"
 dependencies = [
- "array-bytes",
+ "array-bytes 6.1.0",
  "env_logger 0.9.3",
  "frame-benchmarking",
  "frame-support",
@@ -10347,7 +10485,7 @@ dependencies = [
  "proc-macro2",
  "quote",
  "sp-runtime",
- "syn 2.0.37",
+ "syn 2.0.38",
 ]
 
 [[package]]
@@ -10527,7 +10665,7 @@ dependencies = [
 name = "pallet-transaction-storage"
 version = "4.0.0-dev"
 dependencies = [
- "array-bytes",
+ "array-bytes 6.1.0",
  "frame-benchmarking",
  "frame-support",
  "frame-system",
@@ -10548,6 +10686,7 @@ dependencies = [
 name = "pallet-treasury"
 version = "4.0.0-dev"
 dependencies = [
+ "docify",
  "frame-benchmarking",
  "frame-support",
  "frame-system",
@@ -10737,7 +10876,7 @@ dependencies = [
 name = "parachain-template-node"
 version = "0.1.0"
 dependencies = [
- "clap 4.4.4",
+ "clap 4.4.6",
  "color-print",
  "cumulus-client-cli",
  "cumulus-client-collator",
@@ -10919,7 +11058,7 @@ version = "0.4.10"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "78f19d20a0d2cc52327a88d131fa1c4ea81ea4a04714aedcfeca2dd410049cf8"
 dependencies = [
- "blake2",
+ "blake2 0.10.6",
  "crc32fast",
  "fs2",
  "hex",
@@ -11214,7 +11353,7 @@ dependencies = [
  "pest_meta",
  "proc-macro2",
  "quote",
- "syn 2.0.37",
+ "syn 2.0.38",
 ]
 
 [[package]]
@@ -11255,7 +11394,7 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.37",
+ "syn 2.0.38",
 ]
 
 [[package]]
@@ -11474,7 +11613,7 @@ dependencies = [
 name = "polkadot-cli"
 version = "1.1.0"
 dependencies = [
- "clap 4.4.4",
+ "clap 4.4.6",
  "frame-benchmarking-cli",
  "futures",
  "log",
@@ -12296,7 +12435,7 @@ dependencies = [
  "bridge-hub-kusama-runtime",
  "bridge-hub-polkadot-runtime",
  "bridge-hub-rococo-runtime",
- "clap 4.4.4",
+ "clap 4.4.6",
  "collectives-polkadot-runtime",
  "color-print",
  "contracts-rococo-runtime",
@@ -12307,6 +12446,7 @@ dependencies = [
  "cumulus-client-consensus-proposer",
  "cumulus-client-consensus-relay-chain",
  "cumulus-client-service",
+ "cumulus-primitives-aura",
  "cumulus-primitives-core",
  "cumulus-primitives-parachain-inherent",
  "cumulus-relay-chain-interface",
@@ -12465,6 +12605,7 @@ dependencies = [
  "impl-trait-for-tuples",
  "libsecp256k1",
  "log",
+ "pallet-asset-rate",
  "pallet-authorship",
  "pallet-babe",
  "pallet-balances",
@@ -12499,6 +12640,7 @@ dependencies = [
  "sp-staking",
  "sp-std",
  "staging-xcm",
+ "staging-xcm-builder",
  "static_assertions",
 ]
 
@@ -12772,7 +12914,7 @@ version = "1.0.0"
 dependencies = [
  "assert_matches",
  "async-trait",
- "clap 4.4.4",
+ "clap 4.4.6",
  "color-eyre",
  "futures",
  "futures-timer",
@@ -12919,7 +13061,7 @@ dependencies = [
 name = "polkadot-voter-bags"
 version = "1.0.0"
 dependencies = [
- "clap 4.4.4",
+ "clap 4.4.6",
  "generate-bags",
  "sp-io",
  "westend-runtime",
@@ -13097,7 +13239,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "6c64d9ba0963cdcea2e1b2230fbae2bab30eb25a174be395c41e764bfb65dd62"
 dependencies = [
  "proc-macro2",
- "syn 2.0.37",
+ "syn 2.0.38",
 ]
 
 [[package]]
@@ -13173,20 +13315,20 @@ checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068"
 
 [[package]]
 name = "proc-macro-warning"
-version = "0.4.2"
+version = "1.0.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3d1eaa7fa0aa1929ffdf7eeb6eac234dde6268914a14ad44d23521ab6a9b258e"
+checksum = "9b698b0b09d40e9b7c1a47b132d66a8b54bcd20583d9b6d06e4535e383b4405c"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.37",
+ "syn 2.0.38",
 ]
 
 [[package]]
 name = "proc-macro2"
-version = "1.0.67"
+version = "1.0.68"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3d433d9f1a3e8c1263d9456598b16fec66f4acc9a74dacffd35c7bb09b3a1328"
+checksum = "5b1106fec09662ec6dd98ccac0f81cef56984d0b49f75c92d8cbad76e20c005c"
 dependencies = [
  "unicode-ident",
 ]
@@ -13225,7 +13367,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.37",
+ "syn 2.0.38",
 ]
 
 [[package]]
@@ -13616,7 +13758,7 @@ checksum = "7f7473c2cfcf90008193dd0e3e16599455cb601a9fce322b5bb55de799664925"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.37",
+ "syn 2.0.38",
 ]
 
 [[package]]
@@ -13679,7 +13821,7 @@ checksum = "e5ea92a5b6195c6ef2a0295ea818b312502c6fc94dde986c5553242e18fd4ce2"
 name = "remote-ext-tests-bags-list"
 version = "1.0.0"
 dependencies = [
- "clap 4.4.4",
+ "clap 4.4.6",
  "frame-system",
  "log",
  "pallet-bags-list-remote-tests",
@@ -13757,7 +13899,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "f8dd2a808d456c4a54e300a23e9f5a67e122c3024119acbfd73e3bf664491cb2"
 dependencies = [
  "hmac 0.12.1",
- "subtle",
+ "subtle 2.4.1",
 ]
 
 [[package]]
@@ -13770,7 +13912,7 @@ dependencies = [
  "ark-poly",
  "ark-serialize",
  "ark-std",
- "blake2",
+ "blake2 0.10.6",
  "common",
  "fflonk",
  "merlin 3.0.0",
@@ -13880,6 +14022,7 @@ dependencies = [
  "frame-try-runtime",
  "hex-literal",
  "log",
+ "pallet-asset-rate",
  "pallet-authority-discovery",
  "pallet-authorship",
  "pallet-babe",
@@ -14382,16 +14525,16 @@ dependencies = [
  "proc-macro-crate",
  "proc-macro2",
  "quote",
- "syn 2.0.37",
+ "syn 2.0.38",
 ]
 
 [[package]]
 name = "sc-cli"
 version = "0.10.0-dev"
 dependencies = [
- "array-bytes",
+ "array-bytes 6.1.0",
  "chrono",
- "clap 4.4.4",
+ "clap 4.4.6",
  "fdlimit",
  "futures",
  "futures-timer",
@@ -14405,6 +14548,7 @@ dependencies = [
  "sc-client-api",
  "sc-client-db",
  "sc-keystore",
+ "sc-mixnet",
  "sc-network",
  "sc-service",
  "sc-telemetry",
@@ -14459,7 +14603,7 @@ dependencies = [
 name = "sc-client-db"
 version = "0.10.0-dev"
 dependencies = [
- "array-bytes",
+ "array-bytes 6.1.0",
  "criterion 0.4.0",
  "hash-db",
  "kitchensink-runtime",
@@ -14624,7 +14768,7 @@ dependencies = [
 name = "sc-consensus-beefy"
 version = "4.0.0-dev"
 dependencies = [
- "array-bytes",
+ "array-bytes 6.1.0",
  "async-channel",
  "async-trait",
  "fnv",
@@ -14700,7 +14844,7 @@ name = "sc-consensus-grandpa"
 version = "0.10.0-dev"
 dependencies = [
  "ahash 0.8.3",
- "array-bytes",
+ "array-bytes 6.1.0",
  "assert_matches",
  "async-trait",
  "dyn-clone",
@@ -14855,7 +14999,7 @@ dependencies = [
 name = "sc-executor"
 version = "0.10.0-dev"
 dependencies = [
- "array-bytes",
+ "array-bytes 6.1.0",
  "assert_matches",
  "criterion 0.4.0",
  "env_logger 0.9.3",
@@ -14942,7 +15086,7 @@ dependencies = [
 name = "sc-keystore"
 version = "4.0.0-dev"
 dependencies = [
- "array-bytes",
+ "array-bytes 6.1.0",
  "parking_lot 0.12.1",
  "serde_json",
  "sp-application-crypto",
@@ -14952,11 +15096,38 @@ dependencies = [
  "thiserror",
 ]
 
+[[package]]
+name = "sc-mixnet"
+version = "0.1.0-dev"
+dependencies = [
+ "array-bytes 4.2.0",
+ "arrayvec 0.7.4",
+ "blake2 0.10.6",
+ "futures",
+ "futures-timer",
+ "libp2p-identity",
+ "log",
+ "mixnet",
+ "multiaddr",
+ "parity-scale-codec",
+ "parking_lot 0.12.1",
+ "sc-client-api",
+ "sc-network",
+ "sc-transaction-pool-api",
+ "sp-api",
+ "sp-consensus",
+ "sp-core",
+ "sp-keystore",
+ "sp-mixnet",
+ "sp-runtime",
+ "thiserror",
+]
+
 [[package]]
 name = "sc-network"
 version = "0.10.0-dev"
 dependencies = [
- "array-bytes",
+ "array-bytes 6.1.0",
  "assert_matches",
  "async-channel",
  "async-trait",
@@ -15071,7 +15242,7 @@ dependencies = [
 name = "sc-network-light"
 version = "0.10.0-dev"
 dependencies = [
- "array-bytes",
+ "array-bytes 6.1.0",
  "async-channel",
  "futures",
  "libp2p-identity",
@@ -15091,7 +15262,7 @@ dependencies = [
 name = "sc-network-statement"
 version = "0.10.0-dev"
 dependencies = [
- "array-bytes",
+ "array-bytes 6.1.0",
  "async-channel",
  "futures",
  "libp2p",
@@ -15108,7 +15279,7 @@ dependencies = [
 name = "sc-network-sync"
 version = "0.10.0-dev"
 dependencies = [
- "array-bytes",
+ "array-bytes 6.1.0",
  "async-channel",
  "async-trait",
  "fork-tree",
@@ -15178,7 +15349,7 @@ dependencies = [
 name = "sc-network-transactions"
 version = "0.10.0-dev"
 dependencies = [
- "array-bytes",
+ "array-bytes 6.1.0",
  "futures",
  "libp2p",
  "log",
@@ -15195,7 +15366,7 @@ dependencies = [
 name = "sc-offchain"
 version = "4.0.0-dev"
 dependencies = [
- "array-bytes",
+ "array-bytes 6.1.0",
  "bytes",
  "fnv",
  "futures",
@@ -15255,6 +15426,7 @@ dependencies = [
  "sc-block-builder",
  "sc-chain-spec",
  "sc-client-api",
+ "sc-mixnet",
  "sc-network",
  "sc-network-common",
  "sc-rpc-api",
@@ -15286,6 +15458,7 @@ dependencies = [
  "jsonrpsee",
  "parity-scale-codec",
  "sc-chain-spec",
+ "sc-mixnet",
  "sc-transaction-pool-api",
  "scale-info",
  "serde",
@@ -15315,7 +15488,7 @@ dependencies = [
 name = "sc-rpc-spec-v2"
 version = "0.10.0-dev"
 dependencies = [
- "array-bytes",
+ "array-bytes 6.1.0",
  "assert_matches",
  "futures",
  "futures-util",
@@ -15428,7 +15601,7 @@ dependencies = [
 name = "sc-service-test"
 version = "2.0.0"
 dependencies = [
- "array-bytes",
+ "array-bytes 6.1.0",
  "async-channel",
  "fdlimit",
  "futures",
@@ -15494,7 +15667,7 @@ dependencies = [
 name = "sc-storage-monitor"
 version = "0.1.0"
 dependencies = [
- "clap 4.4.4",
+ "clap 4.4.6",
  "fs4",
  "log",
  "sc-client-db",
@@ -15594,14 +15767,14 @@ dependencies = [
  "proc-macro-crate",
  "proc-macro2",
  "quote",
- "syn 2.0.37",
+ "syn 2.0.38",
 ]
 
 [[package]]
 name = "sc-transaction-pool"
 version = "4.0.0-dev"
 dependencies = [
- "array-bytes",
+ "array-bytes 6.1.0",
  "assert_matches",
  "async-trait",
  "criterion 0.4.0",
@@ -15721,7 +15894,7 @@ dependencies = [
  "rand 0.7.3",
  "rand_core 0.5.1",
  "sha2 0.8.2",
- "subtle",
+ "subtle 2.4.1",
  "zeroize",
 ]
 
@@ -15795,7 +15968,7 @@ dependencies = [
  "der 0.6.1",
  "generic-array 0.14.7",
  "pkcs8 0.9.0",
- "subtle",
+ "subtle 2.4.1",
  "zeroize",
 ]
 
@@ -15809,7 +15982,7 @@ dependencies = [
  "der 0.7.8",
  "generic-array 0.14.7",
  "pkcs8 0.10.2",
- "subtle",
+ "subtle 2.4.1",
  "zeroize",
 ]
 
@@ -15954,7 +16127,7 @@ checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.37",
+ "syn 2.0.38",
 ]
 
 [[package]]
@@ -16020,7 +16193,7 @@ checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.37",
+ "syn 2.0.38",
 ]
 
 [[package]]
@@ -16374,14 +16547,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "0c9d1425eb528a21de2755c75af4c9b5d57f50a0d4c3b7f1828a4cd03f8ba155"
 dependencies = [
  "aes-gcm 0.9.4",
- "blake2",
+ "blake2 0.10.6",
  "chacha20poly1305",
  "curve25519-dalek 4.0.0",
  "rand_core 0.6.4",
  "ring 0.16.20",
  "rustc_version 0.4.0",
  "sha2 0.10.7",
- "subtle",
+ "subtle 2.4.1",
 ]
 
 [[package]]
@@ -16448,12 +16621,12 @@ version = "4.0.0-dev"
 dependencies = [
  "Inflector",
  "assert_matches",
- "blake2",
+ "blake2 0.10.6",
  "expander 2.0.0",
  "proc-macro-crate",
  "proc-macro2",
  "quote",
- "syn 2.0.37",
+ "syn 2.0.38",
 ]
 
 [[package]]
@@ -16716,7 +16889,7 @@ dependencies = [
 name = "sp-consensus-beefy"
 version = "4.0.0-dev"
 dependencies = [
- "array-bytes",
+ "array-bytes 6.1.0",
  "lazy_static",
  "parity-scale-codec",
  "scale-info",
@@ -16790,10 +16963,10 @@ dependencies = [
 name = "sp-core"
 version = "21.0.0"
 dependencies = [
- "array-bytes",
+ "array-bytes 6.1.0",
  "bandersnatch_vrfs",
  "bitflags 1.3.2",
- "blake2",
+ "blake2 0.10.6",
  "bounded-collections",
  "bs58 0.5.0",
  "criterion 0.4.0",
@@ -16853,7 +17026,7 @@ version = "9.0.0"
 dependencies = [
  "quote",
  "sp-core-hashing",
- "syn 2.0.37",
+ "syn 2.0.38",
 ]
 
 [[package]]
@@ -16897,7 +17070,7 @@ version = "8.0.0"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.37",
+ "syn 2.0.38",
 ]
 
 [[package]]
@@ -16998,11 +17171,22 @@ dependencies = [
  "sp-std",
 ]
 
+[[package]]
+name = "sp-mixnet"
+version = "0.1.0-dev"
+dependencies = [
+ "parity-scale-codec",
+ "scale-info",
+ "sp-api",
+ "sp-application-crypto",
+ "sp-std",
+]
+
 [[package]]
 name = "sp-mmr-primitives"
 version = "4.0.0-dev"
 dependencies = [
- "array-bytes",
+ "array-bytes 6.1.0",
  "ckb-merkle-mountain-range",
  "log",
  "parity-scale-codec",
@@ -17035,7 +17219,7 @@ dependencies = [
 name = "sp-npos-elections-fuzzer"
 version = "2.0.0-alpha.5"
 dependencies = [
- "clap 4.4.4",
+ "clap 4.4.6",
  "honggfuzz",
  "rand 0.8.5",
  "sp-npos-elections",
@@ -17128,7 +17312,7 @@ dependencies = [
  "proc-macro-crate",
  "proc-macro2",
  "quote",
- "syn 2.0.37",
+ "syn 2.0.38",
 ]
 
 [[package]]
@@ -17200,7 +17384,7 @@ dependencies = [
 name = "sp-state-machine"
 version = "0.28.0"
 dependencies = [
- "array-bytes",
+ "array-bytes 6.1.0",
  "assert_matches",
  "hash-db",
  "log",
@@ -17322,7 +17506,7 @@ name = "sp-trie"
 version = "22.0.0"
 dependencies = [
  "ahash 0.8.3",
- "array-bytes",
+ "array-bytes 6.1.0",
  "criterion 0.4.0",
  "hash-db",
  "hashbrown 0.13.2",
@@ -17368,7 +17552,7 @@ dependencies = [
  "proc-macro2",
  "quote",
  "sp-version",
- "syn 2.0.37",
+ "syn 2.0.38",
 ]
 
 [[package]]
@@ -17628,7 +17812,7 @@ dependencies = [
  "md-5",
  "rand 0.8.5",
  "ring 0.16.20",
- "subtle",
+ "subtle 2.4.1",
  "thiserror",
  "tokio",
  "url",
@@ -17639,8 +17823,27 @@ dependencies = [
 name = "subkey"
 version = "3.0.0"
 dependencies = [
- "clap 4.4.4",
+ "clap 4.4.6",
+ "sc-cli",
+]
+
+[[package]]
+name = "substrate"
+version = "1.0.0"
+dependencies = [
+ "aquamarine",
+ "chain-spec-builder",
+ "frame-support",
  "sc-cli",
+ "sc-consensus-aura",
+ "sc-consensus-babe",
+ "sc-consensus-beefy",
+ "sc-consensus-grandpa",
+ "sc-consensus-manual-seal",
+ "sc-consensus-pow",
+ "sc-service",
+ "sp-runtime",
+ "subkey",
 ]
 
 [[package]]
@@ -17681,7 +17884,7 @@ dependencies = [
 name = "substrate-frame-cli"
 version = "4.0.0-dev"
 dependencies = [
- "clap 4.4.4",
+ "clap 4.4.6",
  "frame-support",
  "frame-system",
  "sc-cli",
@@ -17775,7 +17978,7 @@ dependencies = [
 name = "substrate-test-client"
 version = "2.0.1"
 dependencies = [
- "array-bytes",
+ "array-bytes 6.1.0",
  "async-trait",
  "futures",
  "parity-scale-codec",
@@ -17800,7 +18003,7 @@ dependencies = [
 name = "substrate-test-runtime"
 version = "2.0.0"
 dependencies = [
- "array-bytes",
+ "array-bytes 6.1.0",
  "frame-executive",
  "frame-support",
  "frame-system",
@@ -17914,6 +18117,12 @@ dependencies = [
  "autocfg",
 ]
 
+[[package]]
+name = "subtle"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2d67a5a62ba6e01cb2192ff309324cb4875d0c451d55fe2319433abe7a05a8ee"
+
 [[package]]
 name = "subtle"
 version = "2.4.1"
@@ -18030,9 +18239,9 @@ dependencies = [
 
 [[package]]
 name = "syn"
-version = "2.0.37"
+version = "2.0.38"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7303ef2c05cd654186cb250d29049a24840ca25d2747c25c0381c8d9e2f582e8"
+checksum = "e96b79aaa137db8f61e26363a0c9b47d8b4ec75da28b7d1d614c2303e232408b"
 dependencies = [
  "proc-macro2",
  "quote",
@@ -18140,7 +18349,7 @@ dependencies = [
 name = "test-parachain-adder-collator"
 version = "1.0.0"
 dependencies = [
- "clap 4.4.4",
+ "clap 4.4.6",
  "futures",
  "futures-timer",
  "log",
@@ -18188,7 +18397,7 @@ dependencies = [
 name = "test-parachain-undying-collator"
 version = "1.0.0"
 dependencies = [
- "clap 4.4.4",
+ "clap 4.4.6",
  "futures",
  "futures-timer",
  "log",
@@ -18277,7 +18486,7 @@ checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.37",
+ "syn 2.0.38",
 ]
 
 [[package]]
@@ -18457,7 +18666,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.37",
+ "syn 2.0.38",
 ]
 
 [[package]]
@@ -18638,7 +18847,7 @@ checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.37",
+ "syn 2.0.38",
 ]
 
 [[package]]
@@ -18681,7 +18890,7 @@ dependencies = [
  "proc-macro-crate",
  "proc-macro2",
  "quote",
- "syn 2.0.37",
+ "syn 2.0.38",
 ]
 
 [[package]]
@@ -18834,7 +19043,7 @@ version = "0.10.0-dev"
 dependencies = [
  "assert_cmd",
  "async-trait",
- "clap 4.4.4",
+ "clap 4.4.6",
  "frame-remote-externalities",
  "frame-try-runtime",
  "hex",
@@ -19005,7 +19214,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "9f214e8f697e925001e66ec2c6e37a4ef93f0f78c2eed7814394e10c62025b05"
 dependencies = [
  "generic-array 0.14.7",
- "subtle",
+ "subtle 2.4.1",
 ]
 
 [[package]]
@@ -19015,7 +19224,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea"
 dependencies = [
  "crypto-common",
- "subtle",
+ "subtle 2.4.1",
 ]
 
 [[package]]
@@ -19230,7 +19439,7 @@ dependencies = [
  "once_cell",
  "proc-macro2",
  "quote",
- "syn 2.0.37",
+ "syn 2.0.38",
  "wasm-bindgen-shared",
 ]
 
@@ -19264,7 +19473,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.37",
+ "syn 2.0.38",
  "wasm-bindgen-backend",
  "wasm-bindgen-shared",
 ]
@@ -19764,7 +19973,7 @@ dependencies = [
  "sha1",
  "sha2 0.10.7",
  "signature 1.6.4",
- "subtle",
+ "subtle 2.4.1",
  "thiserror",
  "tokio",
  "webpki 0.21.4",
@@ -19858,7 +20067,7 @@ dependencies = [
  "rtcp",
  "rtp",
  "sha-1 0.9.8",
- "subtle",
+ "subtle 2.4.1",
  "thiserror",
  "tokio",
  "webrtc-util",
@@ -19902,6 +20111,7 @@ dependencies = [
  "frame-try-runtime",
  "hex-literal",
  "log",
+ "pallet-asset-rate",
  "pallet-authority-discovery",
  "pallet-authorship",
  "pallet-babe",
@@ -20405,7 +20615,7 @@ dependencies = [
  "Inflector",
  "proc-macro2",
  "quote",
- "syn 2.0.37",
+ "syn 2.0.38",
 ]
 
 [[package]]
@@ -20524,7 +20734,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn 2.0.37",
+ "syn 2.0.38",
 ]
 
 [[package]]
diff --git a/Cargo.toml b/Cargo.toml
index 7edc28daf76d1..75da6681465d2 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -23,6 +23,7 @@ members = [
 	"bridges/primitives/chain-bridge-hub-wococo",
 	"bridges/primitives/chain-kusama",
 	"bridges/primitives/chain-polkadot",
+	"bridges/primitives/chain-polkadot-bulletin",
 	"bridges/primitives/chain-rococo",
 	"bridges/primitives/chain-wococo",
 	"bridges/primitives/header-chain",
@@ -109,11 +110,11 @@ members = [
 	"polkadot/node/core/parachains-inherent",
 	"polkadot/node/core/prospective-parachains",
 	"polkadot/node/core/provisioner",
+	"polkadot/node/core/pvf-checker",
 	"polkadot/node/core/pvf",
 	"polkadot/node/core/pvf/common",
 	"polkadot/node/core/pvf/execute-worker",
 	"polkadot/node/core/pvf/prepare-worker",
-	"polkadot/node/core/pvf-checker",
 	"polkadot/node/core/runtime-api",
 	"polkadot/node/gum",
 	"polkadot/node/gum/proc-macro",
@@ -133,10 +134,10 @@ members = [
 	"polkadot/node/overseer",
 	"polkadot/node/primitives",
 	"polkadot/node/service",
-	"polkadot/node/subsystem",
 	"polkadot/node/subsystem-test-helpers",
 	"polkadot/node/subsystem-types",
 	"polkadot/node/subsystem-util",
+	"polkadot/node/subsystem",
 	"polkadot/node/test/client",
 	"polkadot/node/test/service",
 	"polkadot/node/zombienet-backchannel",
@@ -164,8 +165,8 @@ members = [
 	"polkadot/utils/generate-bags",
 	"polkadot/utils/remote-ext-tests/bags-list",
 	"polkadot/xcm",
-	"polkadot/xcm/pallet-xcm",
 	"polkadot/xcm/pallet-xcm-benchmarks",
+	"polkadot/xcm/pallet-xcm",
 	"polkadot/xcm/procedural",
 	"polkadot/xcm/xcm-builder",
 	"polkadot/xcm/xcm-executor",
@@ -173,6 +174,10 @@ members = [
 	"polkadot/xcm/xcm-simulator",
 	"polkadot/xcm/xcm-simulator/example",
 	"polkadot/xcm/xcm-simulator/fuzzer",
+	"substrate",
+	"substrate/bin/node-template/node",
+	"substrate/bin/node-template/pallets/template",
+	"substrate/bin/node-template/runtime",
 	"substrate/bin/node/bench",
 	"substrate/bin/node/cli",
 	"substrate/bin/node/executor",
@@ -181,9 +186,6 @@ members = [
 	"substrate/bin/node/rpc",
 	"substrate/bin/node/runtime",
 	"substrate/bin/node/testing",
-	"substrate/bin/node-template/node",
-	"substrate/bin/node-template/pallets/template",
-	"substrate/bin/node-template/runtime",
 	"substrate/bin/utils/chain-spec-builder",
 	"substrate/bin/utils/subkey",
 	"substrate/client/allocator",
@@ -215,6 +217,8 @@ members = [
 	"substrate/client/keystore",
 	"substrate/client/merkle-mountain-range",
 	"substrate/client/merkle-mountain-range/rpc",
+	"substrate/client/mixnet",
+	"substrate/client/network-gossip",
 	"substrate/client/network",
 	"substrate/client/network/bitswap",
 	"substrate/client/network/common",
@@ -223,13 +227,12 @@ members = [
 	"substrate/client/network/sync",
 	"substrate/client/network/test",
 	"substrate/client/network/transactions",
-	"substrate/client/network-gossip",
 	"substrate/client/offchain",
 	"substrate/client/proposer-metrics",
-	"substrate/client/rpc",
 	"substrate/client/rpc-api",
 	"substrate/client/rpc-servers",
 	"substrate/client/rpc-spec-v2",
+	"substrate/client/rpc",
 	"substrate/client/service",
 	"substrate/client/service/test",
 	"substrate/client/state-db",
@@ -256,8 +259,8 @@ members = [
 	"substrate/frame/bags-list/fuzzer",
 	"substrate/frame/bags-list/remote-tests",
 	"substrate/frame/balances",
-	"substrate/frame/beefy",
 	"substrate/frame/beefy-mmr",
+	"substrate/frame/beefy",
 	"substrate/frame/benchmarking",
 	"substrate/frame/benchmarking/pov",
 	"substrate/frame/bounties",
@@ -296,6 +299,7 @@ members = [
 	"substrate/frame/membership",
 	"substrate/frame/merkle-mountain-range",
 	"substrate/frame/message-queue",
+	"substrate/frame/mixnet",
 	"substrate/frame/multisig",
 	"substrate/frame/nft-fractionalization",
 	"substrate/frame/nfts",
@@ -392,17 +396,18 @@ members = [
 	"substrate/primitives/maybe-compressed-blob",
 	"substrate/primitives/merkle-mountain-range",
 	"substrate/primitives/metadata-ir",
+	"substrate/primitives/mixnet",
 	"substrate/primitives/npos-elections",
 	"substrate/primitives/npos-elections/fuzzer",
 	"substrate/primitives/offchain",
 	"substrate/primitives/panic-handler",
 	"substrate/primitives/rpc",
-	"substrate/primitives/runtime",
 	"substrate/primitives/runtime-interface",
 	"substrate/primitives/runtime-interface/proc-macro",
-	"substrate/primitives/runtime-interface/test",
-	"substrate/primitives/runtime-interface/test-wasm",
 	"substrate/primitives/runtime-interface/test-wasm-deprecated",
+	"substrate/primitives/runtime-interface/test-wasm",
+	"substrate/primitives/runtime-interface/test",
+	"substrate/primitives/runtime",
 	"substrate/primitives/session",
 	"substrate/primitives/staking",
 	"substrate/primitives/state-machine",
diff --git a/bridges/bin/runtime-common/src/messages_call_ext.rs b/bridges/bin/runtime-common/src/messages_call_ext.rs
index 07a99d2c0a16c..5303fcb7ba030 100644
--- a/bridges/bin/runtime-common/src/messages_call_ext.rs
+++ b/bridges/bin/runtime-common/src/messages_call_ext.rs
@@ -18,6 +18,7 @@ use crate::messages::{
 	source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof,
 };
 use bp_messages::{target_chain::MessageDispatch, InboundLaneData, LaneId, MessageNonce};
+use bp_runtime::OwnedBridgeModule;
 use frame_support::{
 	dispatch::CallableCallFor,
 	traits::{Get, IsSubType},
@@ -187,8 +188,22 @@ pub trait MessagesCallSubType<T: Config<I, RuntimeCall = Self>, I: 'static>:
 	/// or a `ReceiveMessagesDeliveryProof` call, if the call is for the provided lane.
 	fn call_info_for(&self, lane_id: LaneId) -> Option<CallInfo>;
 
-	/// Check that a `ReceiveMessagesProof` or a `ReceiveMessagesDeliveryProof` call is trying
-	/// to deliver/confirm at least some messages that are better than the ones we know of.
+	/// Ensures that a `ReceiveMessagesProof` or a `ReceiveMessagesDeliveryProof` call:
+	///
+	/// - does not deliver already delivered messages. We require all messages in the
+	///   `ReceiveMessagesProof` call to be undelivered;
+	///
+	/// - does not submit empty `ReceiveMessagesProof` call with zero messages, unless the lane
+	///   needs to be unblocked by providing relayer rewards proof;
+	///
+	/// - brings no new delivery confirmations in a `ReceiveMessagesDeliveryProof` call. We require
+	///   at least one new delivery confirmation in the unrewarded relayers set;
+	///
+	/// - does not violate some basic (easy verifiable) messages pallet rules obsolete (like
+	///   submitting a call when a pallet is halted or delivering messages when a dispatcher is
+	///   inactive).
+	///
+	/// If one of above rules is violated, the transaction is treated as invalid.
 	fn check_obsolete_call(&self) -> TransactionValidity;
 }
 
@@ -278,7 +293,17 @@ impl<
 	}
 
 	fn check_obsolete_call(&self) -> TransactionValidity {
+		let is_pallet_halted = Pallet::<T, I>::ensure_not_halted().is_err();
 		match self.call_info() {
+			Some(proof_info) if is_pallet_halted => {
+				log::trace!(
+					target: pallet_bridge_messages::LOG_TARGET,
+					"Rejecting messages transaction on halted pallet: {:?}",
+					proof_info
+				);
+
+				return sp_runtime::transaction_validity::InvalidTransaction::Call.into()
+			},
 			Some(CallInfo::ReceiveMessagesProof(proof_info))
 				if proof_info.is_obsolete(T::MessageDispatch::is_active()) =>
 			{
diff --git a/bridges/bin/runtime-common/src/priority_calculator.rs b/bridges/bin/runtime-common/src/priority_calculator.rs
index 3d53f9da8c20e..fd10344812517 100644
--- a/bridges/bin/runtime-common/src/priority_calculator.rs
+++ b/bridges/bin/runtime-common/src/priority_calculator.rs
@@ -38,7 +38,7 @@ where
 	PriorityBoostPerMessage: Get<TransactionPriority>,
 {
 	// we don't want any boost for transaction with single message => minus one
-	PriorityBoostPerMessage::get().saturating_mul(messages - 1)
+	PriorityBoostPerMessage::get().saturating_mul(messages.saturating_sub(1))
 }
 
 #[cfg(not(feature = "integrity-test"))]
diff --git a/bridges/bin/runtime-common/src/refund_relayer_extension.rs b/bridges/bin/runtime-common/src/refund_relayer_extension.rs
index f0c2cbf44509b..6d8b211480858 100644
--- a/bridges/bin/runtime-common/src/refund_relayer_extension.rs
+++ b/bridges/bin/runtime-common/src/refund_relayer_extension.rs
@@ -24,8 +24,8 @@ use crate::messages_call_ext::{
 };
 use bp_messages::{LaneId, MessageNonce};
 use bp_relayers::{RewardsAccountOwner, RewardsAccountParams};
-use bp_runtime::{Parachain, ParachainIdOf, RangeInclusiveExt, StaticStrProvider};
-use codec::{Decode, Encode};
+use bp_runtime::{Chain, Parachain, ParachainIdOf, RangeInclusiveExt, StaticStrProvider};
+use codec::{Codec, Decode, Encode};
 use frame_support::{
 	dispatch::{CallableCallFor, DispatchInfo, PostDispatchInfo},
 	traits::IsSubType,
@@ -33,7 +33,8 @@ use frame_support::{
 	CloneNoBound, DefaultNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound,
 };
 use pallet_bridge_grandpa::{
-	CallSubType as GrandpaCallSubType, SubmitFinalityProofHelper, SubmitFinalityProofInfo,
+	CallSubType as GrandpaCallSubType, Config as GrandpaConfig, SubmitFinalityProofHelper,
+	SubmitFinalityProofInfo,
 };
 use pallet_bridge_messages::Config as MessagesConfig;
 use pallet_bridge_parachains::{
@@ -96,7 +97,7 @@ where
 /// coming from this lane.
 pub trait RefundableMessagesLaneId {
 	/// The instance of the bridge messages pallet.
-	type Instance;
+	type Instance: 'static;
 	/// The messages lane id.
 	type Id: Get<LaneId>;
 }
@@ -106,6 +107,7 @@ pub struct RefundableMessagesLane<Instance, Id>(PhantomData<(Instance, Id)>);
 
 impl<Instance, Id> RefundableMessagesLaneId for RefundableMessagesLane<Instance, Id>
 where
+	Instance: 'static,
 	Id: Get<LaneId>,
 {
 	type Instance = Instance;
@@ -165,7 +167,11 @@ pub enum CallInfo {
 		SubmitParachainHeadsInfo,
 		MessagesCallInfo,
 	),
+	/// Relay chain finality + message delivery/confirmation calls.
+	RelayFinalityAndMsgs(SubmitFinalityProofInfo<RelayBlockNumber>, MessagesCallInfo),
 	/// Parachain finality + message delivery/confirmation calls.
+	///
+	/// This variant is used only when bridging with parachain.
 	ParachainFinalityAndMsgs(SubmitParachainHeadsInfo, MessagesCallInfo),
 	/// Standalone message delivery/confirmation call.
 	Msgs(MessagesCallInfo),
@@ -184,6 +190,7 @@ impl CallInfo {
 	fn submit_finality_proof_info(&self) -> Option<SubmitFinalityProofInfo<RelayBlockNumber>> {
 		match *self {
 			Self::AllFinalityAndMsgs(info, _, _) => Some(info),
+			Self::RelayFinalityAndMsgs(info, _) => Some(info),
 			_ => None,
 		}
 	}
@@ -201,6 +208,7 @@ impl CallInfo {
 	fn messages_call_info(&self) -> &MessagesCallInfo {
 		match self {
 			Self::AllFinalityAndMsgs(_, _, info) => info,
+			Self::RelayFinalityAndMsgs(_, info) => info,
 			Self::ParachainFinalityAndMsgs(_, info) => info,
 			Self::Msgs(info) => info,
 		}
@@ -209,7 +217,7 @@ impl CallInfo {
 
 /// The actions on relayer account that need to be performed because of his actions.
 #[derive(RuntimeDebug, PartialEq)]
-enum RelayerAccountAction<AccountId, Reward> {
+pub enum RelayerAccountAction<AccountId, Reward> {
 	/// Do nothing with relayer account.
 	None,
 	/// Reward the relayer.
@@ -218,121 +226,60 @@ enum RelayerAccountAction<AccountId, Reward> {
 	Slash(AccountId, RewardsAccountParams),
 }
 
-/// Signed extension that refunds a relayer for new messages coming from a parachain.
-///
-/// Also refunds relayer for successful finality delivery if it comes in batch (`utility.batchAll`)
-/// with message delivery transaction. Batch may deliver either both relay chain header and
-/// parachain head, or just parachain head. Corresponding headers must be used in messages
-/// proof verification.
-///
-/// Extension does not refund transaction tip due to security reasons.
-#[derive(
-	DefaultNoBound,
-	CloneNoBound,
-	Decode,
-	Encode,
-	EqNoBound,
-	PartialEqNoBound,
-	RuntimeDebugNoBound,
-	TypeInfo,
-)]
-#[scale_info(skip_type_params(Runtime, Para, Msgs, Refund, Priority, Id))]
-pub struct RefundBridgedParachainMessages<Runtime, Para, Msgs, Refund, Priority, Id>(
-	PhantomData<(
-		// runtime with `frame-utility`, `pallet-bridge-grandpa`, `pallet-bridge-parachains`,
-		// `pallet-bridge-messages` and `pallet-bridge-relayers` pallets deployed
-		Runtime,
-		// implementation of `RefundableParachainId` trait, which specifies the instance of
-		// the used `pallet-bridge-parachains` pallet and the bridged parachain id
-		Para,
-		// implementation of `RefundableMessagesLaneId` trait, which specifies the instance of
-		// the used `pallet-bridge-messages` pallet and the lane within this pallet
-		Msgs,
-		// implementation of the `RefundCalculator` trait, that is used to compute refund that
-		// we give to relayer for his transaction
-		Refund,
-		// getter for per-message `TransactionPriority` boost that we give to message
-		// delivery transactions
-		Priority,
-		// the runtime-unique identifier of this signed extension
-		Id,
-	)>,
-);
-
-impl<Runtime, Para, Msgs, Refund, Priority, Id>
-	RefundBridgedParachainMessages<Runtime, Para, Msgs, Refund, Priority, Id>
+/// Everything common among our refund signed extensions.
+pub trait RefundSignedExtension:
+	'static + Clone + Codec + sp_std::fmt::Debug + Default + Eq + PartialEq + Send + Sync + TypeInfo
 where
-	Self: 'static + Send + Sync,
-	Runtime: UtilityConfig<RuntimeCall = CallOf<Runtime>>
-		+ BoundedBridgeGrandpaConfig<Runtime::BridgesGrandpaPalletInstance>
-		+ ParachainsConfig<Para::Instance>
-		+ MessagesConfig<Msgs::Instance>
-		+ RelayersConfig,
-	Para: RefundableParachainId,
-	Msgs: RefundableMessagesLaneId,
-	Refund: RefundCalculator<Balance = Runtime::Reward>,
-	Priority: Get<TransactionPriority>,
-	Id: StaticStrProvider,
-	CallOf<Runtime>: Dispatchable<Info = DispatchInfo, PostInfo = PostDispatchInfo>
-		+ IsSubType<CallableCallFor<UtilityPallet<Runtime>, Runtime>>
-		+ GrandpaCallSubType<Runtime, Runtime::BridgesGrandpaPalletInstance>
-		+ ParachainsCallSubType<Runtime, Para::Instance>
-		+ MessagesCallSubType<Runtime, Msgs::Instance>,
+	<Self::Runtime as GrandpaConfig<Self::GrandpaInstance>>::BridgedChain:
+		Chain<BlockNumber = RelayBlockNumber>,
 {
-	fn expand_call<'a>(&self, call: &'a CallOf<Runtime>) -> Vec<&'a CallOf<Runtime>> {
-		match call.is_sub_type() {
-			Some(UtilityCall::<Runtime>::batch_all { ref calls }) if calls.len() <= 3 =>
-				calls.iter().collect(),
-			Some(_) => vec![],
-			None => vec![call],
-		}
-	}
-
+	/// This chain runtime.
+	type Runtime: UtilityConfig<RuntimeCall = CallOf<Self::Runtime>>
+		+ GrandpaConfig<Self::GrandpaInstance>
+		+ MessagesConfig<<Self::Msgs as RefundableMessagesLaneId>::Instance>
+		+ RelayersConfig;
+	/// Grandpa pallet reference.
+	type GrandpaInstance: 'static;
+	/// Messages pallet and lane reference.
+	type Msgs: RefundableMessagesLaneId;
+	/// Refund amount calculator.
+	type Refund: RefundCalculator<Balance = <Self::Runtime as RelayersConfig>::Reward>;
+	/// Priority boost calculator.
+	type Priority: Get<TransactionPriority>;
+	/// Signed extension unique identifier.
+	type Id: StaticStrProvider;
+
+	/// Unpack batch runtime call.
+	fn expand_call(call: &CallOf<Self::Runtime>) -> Vec<&CallOf<Self::Runtime>>;
+
+	/// Given runtime call, check if it has supported format. Additionally, check if any of
+	/// (optionally batched) calls are obsolete and we shall reject the transaction.
 	fn parse_and_check_for_obsolete_call(
-		&self,
-		call: &CallOf<Runtime>,
-	) -> Result<Option<CallInfo>, TransactionValidityError> {
-		let calls = self.expand_call(call);
-		let total_calls = calls.len();
-		let mut calls = calls.into_iter().map(Self::check_obsolete_call).rev();
-
-		let msgs_call = calls.next().transpose()?.and_then(|c| c.call_info_for(Msgs::Id::get()));
-		let para_finality_call = calls
-			.next()
-			.transpose()?
-			.and_then(|c| c.submit_parachain_heads_info_for(Para::Id::get()));
-		let relay_finality_call =
-			calls.next().transpose()?.and_then(|c| c.submit_finality_proof_info());
+		call: &CallOf<Self::Runtime>,
+	) -> Result<Option<CallInfo>, TransactionValidityError>;
 
-		Ok(match (total_calls, relay_finality_call, para_finality_call, msgs_call) {
-			(3, Some(relay_finality_call), Some(para_finality_call), Some(msgs_call)) => Some(
-				CallInfo::AllFinalityAndMsgs(relay_finality_call, para_finality_call, msgs_call),
-			),
-			(2, None, Some(para_finality_call), Some(msgs_call)) =>
-				Some(CallInfo::ParachainFinalityAndMsgs(para_finality_call, msgs_call)),
-			(1, None, None, Some(msgs_call)) => Some(CallInfo::Msgs(msgs_call)),
-			_ => None,
-		})
-	}
+	/// Check if parsed call is already obsolete.
+	fn check_obsolete_parsed_call(
+		call: &CallOf<Self::Runtime>,
+	) -> Result<&CallOf<Self::Runtime>, TransactionValidityError>;
 
-	fn check_obsolete_call(
-		call: &CallOf<Runtime>,
-	) -> Result<&CallOf<Runtime>, TransactionValidityError> {
-		call.check_obsolete_submit_finality_proof()?;
-		call.check_obsolete_submit_parachain_heads()?;
-		call.check_obsolete_call()?;
-		Ok(call)
-	}
+	/// Called from post-dispatch and shall perform additional checks (apart from relay
+	/// chain finality and messages transaction finality) of given call result.
+	fn additional_call_result_check(
+		relayer: &AccountIdOf<Self::Runtime>,
+		call_info: &CallInfo,
+	) -> bool;
 
 	/// Given post-dispatch information, analyze the outcome of relayer call and return
 	/// actions that need to be performed on relayer account.
 	fn analyze_call_result(
-		pre: Option<Option<PreDispatchData<Runtime::AccountId>>>,
+		pre: Option<Option<PreDispatchData<AccountIdOf<Self::Runtime>>>>,
 		info: &DispatchInfo,
 		post_info: &PostDispatchInfo,
 		len: usize,
 		result: &DispatchResult,
-	) -> RelayerAccountAction<AccountIdOf<Runtime>, Runtime::Reward> {
+	) -> RelayerAccountAction<AccountIdOf<Self::Runtime>, <Self::Runtime as RelayersConfig>::Reward>
+	{
 		let mut extra_weight = Weight::zero();
 		let mut extra_size = 0;
 
@@ -344,15 +291,18 @@ where
 
 		// now we know that the relayer either needs to be rewarded, or slashed
 		// => let's prepare the correspondent account that pays reward/receives slashed amount
-		let reward_account_params = RewardsAccountParams::new(
-			Msgs::Id::get(),
-			Runtime::BridgedChainId::get(),
-			if call_info.is_receive_messages_proof_call() {
-				RewardsAccountOwner::ThisChain
-			} else {
-				RewardsAccountOwner::BridgedChain
-			},
-		);
+		let reward_account_params =
+			RewardsAccountParams::new(
+				<Self::Msgs as RefundableMessagesLaneId>::Id::get(),
+				<Self::Runtime as MessagesConfig<
+					<Self::Msgs as RefundableMessagesLaneId>::Instance,
+				>>::BridgedChainId::get(),
+				if call_info.is_receive_messages_proof_call() {
+					RewardsAccountOwner::ThisChain
+				} else {
+					RewardsAccountOwner::BridgedChain
+				},
+			);
 
 		// prepare return value for the case if the call has failed or it has not caused
 		// expected side effects (e.g. not all messages have been accepted)
@@ -376,10 +326,9 @@ where
 		if let Err(e) = result {
 			log::trace!(
 				target: "runtime::bridge",
-				"{} from parachain {} via {:?}: relayer {:?} has submitted invalid messages transaction: {:?}",
-				Self::IDENTIFIER,
-				Para::Id::get(),
-				Msgs::Id::get(),
+				"{} via {:?}: relayer {:?} has submitted invalid messages transaction: {:?}",
+				Self::Id::STR,
+				<Self::Msgs as RefundableMessagesLaneId>::Id::get(),
 				relayer,
 				e,
 			);
@@ -388,19 +337,18 @@ where
 
 		// check if relay chain state has been updated
 		if let Some(finality_proof_info) = call_info.submit_finality_proof_info() {
-			if !SubmitFinalityProofHelper::<Runtime, Runtime::BridgesGrandpaPalletInstance>::was_successful(
+			if !SubmitFinalityProofHelper::<Self::Runtime, Self::GrandpaInstance>::was_successful(
 				finality_proof_info.block_number,
 			) {
 				// we only refund relayer if all calls have updated chain state
 				log::trace!(
 					target: "runtime::bridge",
-					"{} from parachain {} via {:?}: relayer {:?} has submitted invalid relay chain finality proof",
-					Self::IDENTIFIER,
-					Para::Id::get(),
-					Msgs::Id::get(),
+					"{} via {:?}: relayer {:?} has submitted invalid relay chain finality proof",
+					Self::Id::STR,
+					<Self::Msgs as RefundableMessagesLaneId>::Id::get(),
 					relayer,
 				);
-				return slash_relayer_if_delivery_result;
+				return slash_relayer_if_delivery_result
 			}
 
 			// there's a conflict between how bridge GRANDPA pallet works and a `utility.batchAll`
@@ -416,39 +364,25 @@ where
 			extra_size = finality_proof_info.extra_size;
 		}
 
-		// check if parachain state has been updated
-		if let Some(para_proof_info) = call_info.submit_parachain_heads_info() {
-			if !SubmitParachainHeadsHelper::<Runtime, Para::Instance>::was_successful(
-				para_proof_info,
-			) {
-				// we only refund relayer if all calls have updated chain state
-				log::trace!(
-					target: "runtime::bridge",
-					"{} from parachain {} via {:?}: relayer {:?} has submitted invalid parachain finality proof",
-					Self::IDENTIFIER,
-					Para::Id::get(),
-					Msgs::Id::get(),
-					relayer,
-				);
-				return slash_relayer_if_delivery_result
-			}
-		}
-
 		// Check if the `ReceiveMessagesProof` call delivered at least some of the messages that
 		// it contained. If this happens, we consider the transaction "helpful" and refund it.
 		let msgs_call_info = call_info.messages_call_info();
-		if !MessagesCallHelper::<Runtime, Msgs::Instance>::was_successful(msgs_call_info) {
+		if !MessagesCallHelper::<Self::Runtime, <Self::Msgs as RefundableMessagesLaneId>::Instance>::was_successful(msgs_call_info) {
 			log::trace!(
 				target: "runtime::bridge",
-				"{} from parachain {} via {:?}: relayer {:?} has submitted invalid messages call",
-				Self::IDENTIFIER,
-				Para::Id::get(),
-				Msgs::Id::get(),
+				"{} via {:?}: relayer {:?} has submitted invalid messages call",
+				Self::Id::STR,
+				<Self::Msgs as RefundableMessagesLaneId>::Id::get(),
 				relayer,
 			);
 			return slash_relayer_if_delivery_result
 		}
 
+		// do additional check
+		if !Self::additional_call_result_check(&relayer, &call_info) {
+			return slash_relayer_if_delivery_result
+		}
+
 		// regarding the tip - refund that happens here (at this side of the bridge) isn't the whole
 		// relayer compensation. He'll receive some amount at the other side of the bridge. It shall
 		// (in theory) cover the tip there. Otherwise, if we'll be compensating tip here, some
@@ -464,14 +398,14 @@ where
 		// let's also replace the weight of slashing relayer with the weight of rewarding relayer
 		if call_info.is_receive_messages_proof_call() {
 			post_info_weight = post_info_weight.saturating_sub(
-				<Runtime as RelayersConfig>::WeightInfo::extra_weight_of_successful_receive_messages_proof_call(),
+				<Self::Runtime as RelayersConfig>::WeightInfo::extra_weight_of_successful_receive_messages_proof_call(),
 			);
 		}
 
 		// compute the relayer refund
 		let mut post_info = *post_info;
 		post_info.actual_weight = Some(post_info_weight);
-		let refund = Refund::compute_refund(info, &post_info, post_info_len, tip);
+		let refund = Self::Refund::compute_refund(info, &post_info, post_info_len, tip);
 
 		// we can finally reward relayer
 		RelayerAccountAction::Reward(relayer, reward_account_params, refund)
@@ -497,7 +431,11 @@ where
 		let bundled_messages = parsed_call.messages_call_info().bundled_messages().saturating_len();
 
 		// a quick check to avoid invalid high-priority transactions
-		if bundled_messages > Runtime::MaxUnconfirmedMessagesAtInboundLane::get() {
+		let max_unconfirmed_messages_in_confirmation_tx = <Self::Runtime as MessagesConfig<
+			<Self::Msgs as RefundableMessagesLaneId>::Instance,
+		>>::MaxUnconfirmedMessagesAtInboundLane::get(
+		);
+		if bundled_messages > max_unconfirmed_messages_in_confirmation_tx {
 			return None
 		}
 
@@ -505,31 +443,37 @@ where
 	}
 }
 
-impl<Runtime, Para, Msgs, Refund, Priority, Id> SignedExtension
-	for RefundBridgedParachainMessages<Runtime, Para, Msgs, Refund, Priority, Id>
+/// Adapter that allow implementing `sp_runtime::traits::SignedExtension` for any
+/// `RefundSignedExtension`.
+#[derive(
+	DefaultNoBound,
+	CloneNoBound,
+	Decode,
+	Encode,
+	EqNoBound,
+	PartialEqNoBound,
+	RuntimeDebugNoBound,
+	TypeInfo,
+)]
+pub struct RefundSignedExtensionAdapter<T: RefundSignedExtension>(T)
 where
-	Self: 'static + Send + Sync,
-	Runtime: UtilityConfig<RuntimeCall = CallOf<Runtime>>
-		+ BoundedBridgeGrandpaConfig<Runtime::BridgesGrandpaPalletInstance>
-		+ ParachainsConfig<Para::Instance>
-		+ MessagesConfig<Msgs::Instance>
-		+ RelayersConfig,
-	Para: RefundableParachainId,
-	Msgs: RefundableMessagesLaneId,
-	Refund: RefundCalculator<Balance = Runtime::Reward>,
-	Priority: Get<TransactionPriority>,
-	Id: StaticStrProvider,
-	CallOf<Runtime>: Dispatchable<Info = DispatchInfo, PostInfo = PostDispatchInfo>
-		+ IsSubType<CallableCallFor<UtilityPallet<Runtime>, Runtime>>
-		+ GrandpaCallSubType<Runtime, Runtime::BridgesGrandpaPalletInstance>
-		+ ParachainsCallSubType<Runtime, Para::Instance>
-		+ MessagesCallSubType<Runtime, Msgs::Instance>,
+	<T::Runtime as GrandpaConfig<T::GrandpaInstance>>::BridgedChain:
+		Chain<BlockNumber = RelayBlockNumber>;
+
+impl<T: RefundSignedExtension> SignedExtension for RefundSignedExtensionAdapter<T>
+where
+	<T::Runtime as GrandpaConfig<T::GrandpaInstance>>::BridgedChain:
+		Chain<BlockNumber = RelayBlockNumber>,
+	CallOf<T::Runtime>: Dispatchable<Info = DispatchInfo, PostInfo = PostDispatchInfo>
+		+ IsSubType<CallableCallFor<UtilityPallet<T::Runtime>, T::Runtime>>
+		+ GrandpaCallSubType<T::Runtime, T::GrandpaInstance>
+		+ MessagesCallSubType<T::Runtime, <T::Msgs as RefundableMessagesLaneId>::Instance>,
 {
-	const IDENTIFIER: &'static str = Id::STR;
-	type AccountId = Runtime::AccountId;
-	type Call = CallOf<Runtime>;
+	const IDENTIFIER: &'static str = T::Id::STR;
+	type AccountId = AccountIdOf<T::Runtime>;
+	type Call = CallOf<T::Runtime>;
 	type AdditionalSigned = ();
-	type Pre = Option<PreDispatchData<Runtime::AccountId>>;
+	type Pre = Option<PreDispatchData<AccountIdOf<T::Runtime>>>;
 
 	fn additional_signed(&self) -> Result<(), TransactionValidityError> {
 		Ok(())
@@ -547,34 +491,32 @@ where
 		// we're not calling `validate` from `pre_dispatch` directly because of performance
 		// reasons, so if you're adding some code that may fail here, please check if it needs
 		// to be added to the `pre_dispatch` as well
-		let parsed_call = self.parse_and_check_for_obsolete_call(call)?;
+		let parsed_call = T::parse_and_check_for_obsolete_call(call)?;
 
 		// the following code just plays with transaction priority and never returns an error
 
 		// we only boost priority of presumably correct message delivery transactions
-		let bundled_messages = match Self::bundled_messages_for_priority_boost(parsed_call.as_ref())
-		{
+		let bundled_messages = match T::bundled_messages_for_priority_boost(parsed_call.as_ref()) {
 			Some(bundled_messages) => bundled_messages,
 			None => return Ok(Default::default()),
 		};
 
 		// we only boost priority if relayer has staked required balance
-		if !RelayersPallet::<Runtime>::is_registration_active(who) {
+		if !RelayersPallet::<T::Runtime>::is_registration_active(who) {
 			return Ok(Default::default())
 		}
 
 		// compute priority boost
 		let priority_boost =
-			crate::priority_calculator::compute_priority_boost::<Priority>(bundled_messages);
+			crate::priority_calculator::compute_priority_boost::<T::Priority>(bundled_messages);
 		let valid_transaction = ValidTransactionBuilder::default().priority(priority_boost);
 
 		log::trace!(
 			target: "runtime::bridge",
-			"{} from parachain {} via {:?} has boosted priority of message delivery transaction \
+			"{} via {:?} has boosted priority of message delivery transaction \
 			of relayer {:?}: {} messages -> {} priority",
 			Self::IDENTIFIER,
-			Para::Id::get(),
-			Msgs::Id::get(),
+			<T::Msgs as RefundableMessagesLaneId>::Id::get(),
 			who,
 			bundled_messages,
 			priority_boost,
@@ -591,15 +533,14 @@ where
 		_len: usize,
 	) -> Result<Self::Pre, TransactionValidityError> {
 		// this is a relevant piece of `validate` that we need here (in `pre_dispatch`)
-		let parsed_call = self.parse_and_check_for_obsolete_call(call)?;
+		let parsed_call = T::parse_and_check_for_obsolete_call(call)?;
 
 		Ok(parsed_call.map(|call_info| {
 			log::trace!(
 				target: "runtime::bridge",
-				"{} from parachain {} via {:?} parsed bridge transaction in pre-dispatch: {:?}",
+				"{} via {:?} parsed bridge transaction in pre-dispatch: {:?}",
 				Self::IDENTIFIER,
-				Para::Id::get(),
-				Msgs::Id::get(),
+				<T::Msgs as RefundableMessagesLaneId>::Id::get(),
 				call_info,
 			);
 			PreDispatchData { relayer: who.clone(), call_info }
@@ -613,12 +554,12 @@ where
 		len: usize,
 		result: &DispatchResult,
 	) -> Result<(), TransactionValidityError> {
-		let call_result = Self::analyze_call_result(pre, info, post_info, len, result);
+		let call_result = T::analyze_call_result(pre, info, post_info, len, result);
 
 		match call_result {
 			RelayerAccountAction::None => (),
 			RelayerAccountAction::Reward(relayer, reward_account, reward) => {
-				RelayersPallet::<Runtime>::register_relayer_reward(
+				RelayersPallet::<T::Runtime>::register_relayer_reward(
 					reward_account,
 					&relayer,
 					reward,
@@ -626,22 +567,263 @@ where
 
 				log::trace!(
 					target: "runtime::bridge",
-					"{} from parachain {} via {:?} has registered reward: {:?} for {:?}",
+					"{} via {:?} has registered reward: {:?} for {:?}",
 					Self::IDENTIFIER,
-					Para::Id::get(),
-					Msgs::Id::get(),
+					<T::Msgs as RefundableMessagesLaneId>::Id::get(),
 					reward,
 					relayer,
 				);
 			},
 			RelayerAccountAction::Slash(relayer, slash_account) =>
-				RelayersPallet::<Runtime>::slash_and_deregister(&relayer, slash_account),
+				RelayersPallet::<T::Runtime>::slash_and_deregister(&relayer, slash_account),
 		}
 
 		Ok(())
 	}
 }
 
+/// Signed extension that refunds a relayer for new messages coming from a parachain.
+///
+/// Also refunds relayer for successful finality delivery if it comes in batch (`utility.batchAll`)
+/// with message delivery transaction. Batch may deliver either both relay chain header and
+/// parachain head, or just parachain head. Corresponding headers must be used in messages
+/// proof verification.
+///
+/// Extension does not refund transaction tip due to security reasons.
+#[derive(
+	DefaultNoBound,
+	CloneNoBound,
+	Decode,
+	Encode,
+	EqNoBound,
+	PartialEqNoBound,
+	RuntimeDebugNoBound,
+	TypeInfo,
+)]
+#[scale_info(skip_type_params(Runtime, Para, Msgs, Refund, Priority, Id))]
+pub struct RefundBridgedParachainMessages<Runtime, Para, Msgs, Refund, Priority, Id>(
+	PhantomData<(
+		// runtime with `frame-utility`, `pallet-bridge-grandpa`, `pallet-bridge-parachains`,
+		// `pallet-bridge-messages` and `pallet-bridge-relayers` pallets deployed
+		Runtime,
+		// implementation of `RefundableParachainId` trait, which specifies the instance of
+		// the used `pallet-bridge-parachains` pallet and the bridged parachain id
+		Para,
+		// implementation of `RefundableMessagesLaneId` trait, which specifies the instance of
+		// the used `pallet-bridge-messages` pallet and the lane within this pallet
+		Msgs,
+		// implementation of the `RefundCalculator` trait, that is used to compute refund that
+		// we give to relayer for his transaction
+		Refund,
+		// getter for per-message `TransactionPriority` boost that we give to message
+		// delivery transactions
+		Priority,
+		// the runtime-unique identifier of this signed extension
+		Id,
+	)>,
+);
+
+impl<Runtime, Para, Msgs, Refund, Priority, Id> RefundSignedExtension
+	for RefundBridgedParachainMessages<Runtime, Para, Msgs, Refund, Priority, Id>
+where
+	Self: 'static + Send + Sync,
+	Runtime: UtilityConfig<RuntimeCall = CallOf<Runtime>>
+		+ BoundedBridgeGrandpaConfig<Runtime::BridgesGrandpaPalletInstance>
+		+ ParachainsConfig<Para::Instance>
+		+ MessagesConfig<Msgs::Instance>
+		+ RelayersConfig,
+	Para: RefundableParachainId,
+	Msgs: RefundableMessagesLaneId,
+	Refund: RefundCalculator<Balance = Runtime::Reward>,
+	Priority: Get<TransactionPriority>,
+	Id: StaticStrProvider,
+	CallOf<Runtime>: Dispatchable<Info = DispatchInfo, PostInfo = PostDispatchInfo>
+		+ IsSubType<CallableCallFor<UtilityPallet<Runtime>, Runtime>>
+		+ GrandpaCallSubType<Runtime, Runtime::BridgesGrandpaPalletInstance>
+		+ ParachainsCallSubType<Runtime, Para::Instance>
+		+ MessagesCallSubType<Runtime, Msgs::Instance>,
+{
+	type Runtime = Runtime;
+	type GrandpaInstance = Runtime::BridgesGrandpaPalletInstance;
+	type Msgs = Msgs;
+	type Refund = Refund;
+	type Priority = Priority;
+	type Id = Id;
+
+	fn expand_call(call: &CallOf<Runtime>) -> Vec<&CallOf<Runtime>> {
+		match call.is_sub_type() {
+			Some(UtilityCall::<Runtime>::batch_all { ref calls }) if calls.len() <= 3 =>
+				calls.iter().collect(),
+			Some(_) => vec![],
+			None => vec![call],
+		}
+	}
+
+	fn parse_and_check_for_obsolete_call(
+		call: &CallOf<Runtime>,
+	) -> Result<Option<CallInfo>, TransactionValidityError> {
+		let calls = Self::expand_call(call);
+		let total_calls = calls.len();
+		let mut calls = calls.into_iter().map(Self::check_obsolete_parsed_call).rev();
+
+		let msgs_call = calls.next().transpose()?.and_then(|c| c.call_info_for(Msgs::Id::get()));
+		let para_finality_call = calls
+			.next()
+			.transpose()?
+			.and_then(|c| c.submit_parachain_heads_info_for(Para::Id::get()));
+		let relay_finality_call =
+			calls.next().transpose()?.and_then(|c| c.submit_finality_proof_info());
+
+		Ok(match (total_calls, relay_finality_call, para_finality_call, msgs_call) {
+			(3, Some(relay_finality_call), Some(para_finality_call), Some(msgs_call)) => Some(
+				CallInfo::AllFinalityAndMsgs(relay_finality_call, para_finality_call, msgs_call),
+			),
+			(2, None, Some(para_finality_call), Some(msgs_call)) =>
+				Some(CallInfo::ParachainFinalityAndMsgs(para_finality_call, msgs_call)),
+			(1, None, None, Some(msgs_call)) => Some(CallInfo::Msgs(msgs_call)),
+			_ => None,
+		})
+	}
+
+	fn check_obsolete_parsed_call(
+		call: &CallOf<Runtime>,
+	) -> Result<&CallOf<Runtime>, TransactionValidityError> {
+		call.check_obsolete_submit_finality_proof()?;
+		call.check_obsolete_submit_parachain_heads()?;
+		call.check_obsolete_call()?;
+		Ok(call)
+	}
+
+	fn additional_call_result_check(relayer: &Runtime::AccountId, call_info: &CallInfo) -> bool {
+		// check if parachain state has been updated
+		if let Some(para_proof_info) = call_info.submit_parachain_heads_info() {
+			if !SubmitParachainHeadsHelper::<Runtime, Para::Instance>::was_successful(
+				para_proof_info,
+			) {
+				// we only refund relayer if all calls have updated chain state
+				log::trace!(
+					target: "runtime::bridge",
+					"{} from parachain {} via {:?}: relayer {:?} has submitted invalid parachain finality proof",
+					Id::STR,
+					Para::Id::get(),
+					Msgs::Id::get(),
+					relayer,
+				);
+				return false
+			}
+		}
+
+		true
+	}
+}
+
+/// Signed extension that refunds a relayer for new messages coming from a standalone (GRANDPA)
+/// chain.
+///
+/// Also refunds relayer for successful finality delivery if it comes in batch (`utility.batchAll`)
+/// with message delivery transaction. Batch may deliver either both relay chain header and
+/// parachain head, or just parachain head. Corresponding headers must be used in messages
+/// proof verification.
+///
+/// Extension does not refund transaction tip due to security reasons.
+#[derive(
+	DefaultNoBound,
+	CloneNoBound,
+	Decode,
+	Encode,
+	EqNoBound,
+	PartialEqNoBound,
+	RuntimeDebugNoBound,
+	TypeInfo,
+)]
+#[scale_info(skip_type_params(Runtime, GrandpaInstance, Msgs, Refund, Priority, Id))]
+pub struct RefundBridgedGrandpaMessages<Runtime, GrandpaInstance, Msgs, Refund, Priority, Id>(
+	PhantomData<(
+		// runtime with `frame-utility`, `pallet-bridge-grandpa`,
+		// `pallet-bridge-messages` and `pallet-bridge-relayers` pallets deployed
+		Runtime,
+		// bridge GRANDPA pallet instance, used to track bridged chain state
+		GrandpaInstance,
+		// implementation of `RefundableMessagesLaneId` trait, which specifies the instance of
+		// the used `pallet-bridge-messages` pallet and the lane within this pallet
+		Msgs,
+		// implementation of the `RefundCalculator` trait, that is used to compute refund that
+		// we give to relayer for his transaction
+		Refund,
+		// getter for per-message `TransactionPriority` boost that we give to message
+		// delivery transactions
+		Priority,
+		// the runtime-unique identifier of this signed extension
+		Id,
+	)>,
+);
+
+impl<Runtime, GrandpaInstance, Msgs, Refund, Priority, Id> RefundSignedExtension
+	for RefundBridgedGrandpaMessages<Runtime, GrandpaInstance, Msgs, Refund, Priority, Id>
+where
+	Self: 'static + Send + Sync,
+	Runtime: UtilityConfig<RuntimeCall = CallOf<Runtime>>
+		+ BoundedBridgeGrandpaConfig<GrandpaInstance>
+		+ MessagesConfig<Msgs::Instance>
+		+ RelayersConfig,
+	GrandpaInstance: 'static,
+	Msgs: RefundableMessagesLaneId,
+	Refund: RefundCalculator<Balance = Runtime::Reward>,
+	Priority: Get<TransactionPriority>,
+	Id: StaticStrProvider,
+	CallOf<Runtime>: Dispatchable<Info = DispatchInfo, PostInfo = PostDispatchInfo>
+		+ IsSubType<CallableCallFor<UtilityPallet<Runtime>, Runtime>>
+		+ GrandpaCallSubType<Runtime, GrandpaInstance>
+		+ MessagesCallSubType<Runtime, Msgs::Instance>,
+{
+	type Runtime = Runtime;
+	type GrandpaInstance = GrandpaInstance;
+	type Msgs = Msgs;
+	type Refund = Refund;
+	type Priority = Priority;
+	type Id = Id;
+
+	fn expand_call(call: &CallOf<Runtime>) -> Vec<&CallOf<Runtime>> {
+		match call.is_sub_type() {
+			Some(UtilityCall::<Runtime>::batch_all { ref calls }) if calls.len() <= 2 =>
+				calls.iter().collect(),
+			Some(_) => vec![],
+			None => vec![call],
+		}
+	}
+
+	fn parse_and_check_for_obsolete_call(
+		call: &CallOf<Runtime>,
+	) -> Result<Option<CallInfo>, TransactionValidityError> {
+		let calls = Self::expand_call(call);
+		let total_calls = calls.len();
+		let mut calls = calls.into_iter().map(Self::check_obsolete_parsed_call).rev();
+
+		let msgs_call = calls.next().transpose()?.and_then(|c| c.call_info_for(Msgs::Id::get()));
+		let relay_finality_call =
+			calls.next().transpose()?.and_then(|c| c.submit_finality_proof_info());
+
+		Ok(match (total_calls, relay_finality_call, msgs_call) {
+			(2, Some(relay_finality_call), Some(msgs_call)) =>
+				Some(CallInfo::RelayFinalityAndMsgs(relay_finality_call, msgs_call)),
+			(1, None, Some(msgs_call)) => Some(CallInfo::Msgs(msgs_call)),
+			_ => None,
+		})
+	}
+
+	fn check_obsolete_parsed_call(
+		call: &CallOf<Runtime>,
+	) -> Result<&CallOf<Runtime>, TransactionValidityError> {
+		call.check_obsolete_submit_finality_proof()?;
+		call.check_obsolete_call()?;
+		Ok(call)
+	}
+
+	fn additional_call_result_check(_relayer: &Runtime::AccountId, _call_info: &CallInfo) -> bool {
+		true
+	}
+}
+
 #[cfg(test)]
 mod tests {
 	use super::*;
@@ -655,19 +837,24 @@ mod tests {
 		},
 		mock::*,
 	};
-	use bp_messages::{InboundLaneData, MessageNonce, OutboundLaneData, UnrewardedRelayersState};
+	use bp_messages::{
+		DeliveredMessages, InboundLaneData, MessageNonce, MessagesOperatingMode, OutboundLaneData,
+		UnrewardedRelayer, UnrewardedRelayersState,
+	};
 	use bp_parachains::{BestParaHeadHash, ParaInfo};
 	use bp_polkadot_core::parachains::{ParaHeadsProof, ParaId};
-	use bp_runtime::HeaderId;
+	use bp_runtime::{BasicOperatingMode, HeaderId};
 	use bp_test_utils::{make_default_justification, test_keyring};
 	use frame_support::{
 		assert_storage_noop, parameter_types,
 		traits::{fungible::Mutate, ReservableCurrency},
 		weights::Weight,
 	};
-	use pallet_bridge_grandpa::{Call as GrandpaCall, StoredAuthoritySet};
-	use pallet_bridge_messages::Call as MessagesCall;
-	use pallet_bridge_parachains::{Call as ParachainsCall, RelayBlockHash};
+	use pallet_bridge_grandpa::{Call as GrandpaCall, Pallet as GrandpaPallet, StoredAuthoritySet};
+	use pallet_bridge_messages::{Call as MessagesCall, Pallet as MessagesPallet};
+	use pallet_bridge_parachains::{
+		Call as ParachainsCall, Pallet as ParachainsPallet, RelayBlockHash,
+	};
 	use sp_runtime::{
 		traits::{ConstU64, Header as HeaderT},
 		transaction_validity::{InvalidTransaction, ValidTransaction},
@@ -690,7 +877,17 @@ mod tests {
 	}
 
 	bp_runtime::generate_static_str_provider!(TestExtension);
-	type TestExtension = RefundBridgedParachainMessages<
+
+	type TestGrandpaExtensionProvider = RefundBridgedGrandpaMessages<
+		TestRuntime,
+		(),
+		RefundableMessagesLane<(), TestLaneId>,
+		ActualFeeRefund<TestRuntime>,
+		ConstU64<1>,
+		StrTestExtension,
+	>;
+	type TestGrandpaExtension = RefundSignedExtensionAdapter<TestGrandpaExtensionProvider>;
+	type TestExtensionProvider = RefundBridgedParachainMessages<
 		TestRuntime,
 		DefaultRefundableParachainId<(), TestParachain>,
 		RefundableMessagesLane<(), TestLaneId>,
@@ -698,6 +895,7 @@ mod tests {
 		ConstU64<1>,
 		StrTestExtension,
 	>;
+	type TestExtension = RefundSignedExtensionAdapter<TestExtensionProvider>;
 
 	fn initial_balance_of_relayer_account_at_this_chain() -> ThisChainBalance {
 		let test_stake: ThisChainBalance = TestStake::get();
@@ -825,25 +1023,49 @@ mod tests {
 		})
 	}
 
-	fn parachain_finality_and_delivery_batch_call(
-		parachain_head_at_relay_header_number: RelayBlockNumber,
+	fn parachain_finality_and_delivery_batch_call(
+		parachain_head_at_relay_header_number: RelayBlockNumber,
+		best_message: MessageNonce,
+	) -> RuntimeCall {
+		RuntimeCall::Utility(UtilityCall::batch_all {
+			calls: vec![
+				submit_parachain_head_call(parachain_head_at_relay_header_number),
+				message_delivery_call(best_message),
+			],
+		})
+	}
+
+	fn parachain_finality_and_confirmation_batch_call(
+		parachain_head_at_relay_header_number: RelayBlockNumber,
+		best_message: MessageNonce,
+	) -> RuntimeCall {
+		RuntimeCall::Utility(UtilityCall::batch_all {
+			calls: vec![
+				submit_parachain_head_call(parachain_head_at_relay_header_number),
+				message_confirmation_call(best_message),
+			],
+		})
+	}
+
+	fn relay_finality_and_delivery_batch_call(
+		relay_header_number: RelayBlockNumber,
 		best_message: MessageNonce,
 	) -> RuntimeCall {
 		RuntimeCall::Utility(UtilityCall::batch_all {
 			calls: vec![
-				submit_parachain_head_call(parachain_head_at_relay_header_number),
+				submit_relay_header_call(relay_header_number),
 				message_delivery_call(best_message),
 			],
 		})
 	}
 
-	fn parachain_finality_and_confirmation_batch_call(
-		parachain_head_at_relay_header_number: RelayBlockNumber,
+	fn relay_finality_and_confirmation_batch_call(
+		relay_header_number: RelayBlockNumber,
 		best_message: MessageNonce,
 	) -> RuntimeCall {
 		RuntimeCall::Utility(UtilityCall::batch_all {
 			calls: vec![
-				submit_parachain_head_call(parachain_head_at_relay_header_number),
+				submit_relay_header_call(relay_header_number),
 				message_confirmation_call(best_message),
 			],
 		})
@@ -931,6 +1153,50 @@ mod tests {
 		}
 	}
 
+	fn relay_finality_pre_dispatch_data() -> PreDispatchData<ThisChainAccountId> {
+		PreDispatchData {
+			relayer: relayer_account_at_this_chain(),
+			call_info: CallInfo::RelayFinalityAndMsgs(
+				SubmitFinalityProofInfo {
+					block_number: 200,
+					extra_weight: Weight::zero(),
+					extra_size: 0,
+				},
+				MessagesCallInfo::ReceiveMessagesProof(ReceiveMessagesProofInfo {
+					base: BaseMessagesProofInfo {
+						lane_id: TEST_LANE_ID,
+						bundled_range: 101..=200,
+						best_stored_nonce: 100,
+					},
+					unrewarded_relayers: UnrewardedRelayerOccupation {
+						free_relayer_slots: MaxUnrewardedRelayerEntriesAtInboundLane::get(),
+						free_message_slots: MaxUnconfirmedMessagesAtInboundLane::get(),
+					},
+				}),
+			),
+		}
+	}
+
+	fn relay_finality_confirmation_pre_dispatch_data() -> PreDispatchData<ThisChainAccountId> {
+		PreDispatchData {
+			relayer: relayer_account_at_this_chain(),
+			call_info: CallInfo::RelayFinalityAndMsgs(
+				SubmitFinalityProofInfo {
+					block_number: 200,
+					extra_weight: Weight::zero(),
+					extra_size: 0,
+				},
+				MessagesCallInfo::ReceiveMessagesDeliveryProof(ReceiveMessagesDeliveryProofInfo(
+					BaseMessagesProofInfo {
+						lane_id: TEST_LANE_ID,
+						bundled_range: 101..=200,
+						best_stored_nonce: 100,
+					},
+				)),
+			),
+		}
+	}
+
 	fn parachain_finality_pre_dispatch_data() -> PreDispatchData<ThisChainAccountId> {
 		PreDispatchData {
 			relayer: relayer_account_at_this_chain(),
@@ -1013,6 +1279,7 @@ mod tests {
 	) -> PreDispatchData<ThisChainAccountId> {
 		let msg_info = match pre_dispatch_data.call_info {
 			CallInfo::AllFinalityAndMsgs(_, _, ref mut info) => info,
+			CallInfo::RelayFinalityAndMsgs(_, ref mut info) => info,
 			CallInfo::ParachainFinalityAndMsgs(_, ref mut info) => info,
 			CallInfo::Msgs(ref mut info) => info,
 		};
@@ -1025,7 +1292,14 @@ mod tests {
 	}
 
 	fn run_validate(call: RuntimeCall) -> TransactionValidity {
-		let extension: TestExtension = RefundBridgedParachainMessages(PhantomData);
+		let extension: TestExtension =
+			RefundSignedExtensionAdapter(RefundBridgedParachainMessages(PhantomData));
+		extension.validate(&relayer_account_at_this_chain(), &call, &DispatchInfo::default(), 0)
+	}
+
+	fn run_grandpa_validate(call: RuntimeCall) -> TransactionValidity {
+		let extension: TestGrandpaExtension =
+			RefundSignedExtensionAdapter(RefundBridgedGrandpaMessages(PhantomData));
 		extension.validate(&relayer_account_at_this_chain(), &call, &DispatchInfo::default(), 0)
 	}
 
@@ -1039,7 +1313,16 @@ mod tests {
 	fn run_pre_dispatch(
 		call: RuntimeCall,
 	) -> Result<Option<PreDispatchData<ThisChainAccountId>>, TransactionValidityError> {
-		let extension: TestExtension = RefundBridgedParachainMessages(PhantomData);
+		let extension: TestExtension =
+			RefundSignedExtensionAdapter(RefundBridgedParachainMessages(PhantomData));
+		extension.pre_dispatch(&relayer_account_at_this_chain(), &call, &DispatchInfo::default(), 0)
+	}
+
+	fn run_grandpa_pre_dispatch(
+		call: RuntimeCall,
+	) -> Result<Option<PreDispatchData<ThisChainAccountId>>, TransactionValidityError> {
+		let extension: TestGrandpaExtension =
+			RefundSignedExtensionAdapter(RefundBridgedGrandpaMessages(PhantomData));
 		extension.pre_dispatch(&relayer_account_at_this_chain(), &call, &DispatchInfo::default(), 0)
 	}
 
@@ -1311,6 +1594,99 @@ mod tests {
 		});
 	}
 
+	#[test]
+	fn ext_rejects_batch_with_grandpa_finality_proof_when_grandpa_pallet_is_halted() {
+		run_test(|| {
+			initialize_environment(100, 100, 100);
+
+			GrandpaPallet::<TestRuntime, ()>::set_operating_mode(
+				RuntimeOrigin::root(),
+				BasicOperatingMode::Halted,
+			)
+			.unwrap();
+
+			assert_eq!(
+				run_pre_dispatch(all_finality_and_delivery_batch_call(200, 200, 200)),
+				Err(TransactionValidityError::Invalid(InvalidTransaction::Call)),
+			);
+			assert_eq!(
+				run_pre_dispatch(all_finality_and_confirmation_batch_call(200, 200, 200)),
+				Err(TransactionValidityError::Invalid(InvalidTransaction::Call)),
+			);
+		});
+	}
+
+	#[test]
+	fn ext_rejects_batch_with_parachain_finality_proof_when_parachains_pallet_is_halted() {
+		run_test(|| {
+			initialize_environment(100, 100, 100);
+
+			ParachainsPallet::<TestRuntime, ()>::set_operating_mode(
+				RuntimeOrigin::root(),
+				BasicOperatingMode::Halted,
+			)
+			.unwrap();
+
+			assert_eq!(
+				run_pre_dispatch(all_finality_and_delivery_batch_call(200, 200, 200)),
+				Err(TransactionValidityError::Invalid(InvalidTransaction::Call)),
+			);
+			assert_eq!(
+				run_pre_dispatch(all_finality_and_confirmation_batch_call(200, 200, 200)),
+				Err(TransactionValidityError::Invalid(InvalidTransaction::Call)),
+			);
+
+			assert_eq!(
+				run_pre_dispatch(parachain_finality_and_delivery_batch_call(200, 200)),
+				Err(TransactionValidityError::Invalid(InvalidTransaction::Call)),
+			);
+			assert_eq!(
+				run_pre_dispatch(parachain_finality_and_confirmation_batch_call(200, 200)),
+				Err(TransactionValidityError::Invalid(InvalidTransaction::Call)),
+			);
+		});
+	}
+
+	#[test]
+	fn ext_rejects_transaction_when_messages_pallet_is_halted() {
+		run_test(|| {
+			initialize_environment(100, 100, 100);
+
+			MessagesPallet::<TestRuntime, ()>::set_operating_mode(
+				RuntimeOrigin::root(),
+				MessagesOperatingMode::Basic(BasicOperatingMode::Halted),
+			)
+			.unwrap();
+
+			assert_eq!(
+				run_pre_dispatch(all_finality_and_delivery_batch_call(200, 200, 200)),
+				Err(TransactionValidityError::Invalid(InvalidTransaction::Call)),
+			);
+			assert_eq!(
+				run_pre_dispatch(all_finality_and_confirmation_batch_call(200, 200, 200)),
+				Err(TransactionValidityError::Invalid(InvalidTransaction::Call)),
+			);
+
+			assert_eq!(
+				run_pre_dispatch(parachain_finality_and_delivery_batch_call(200, 200)),
+				Err(TransactionValidityError::Invalid(InvalidTransaction::Call)),
+			);
+			assert_eq!(
+				run_pre_dispatch(parachain_finality_and_confirmation_batch_call(200, 200)),
+				Err(TransactionValidityError::Invalid(InvalidTransaction::Call)),
+			);
+
+			assert_eq!(
+				run_pre_dispatch(message_delivery_call(200)),
+				Err(TransactionValidityError::Invalid(InvalidTransaction::Call)),
+			);
+			assert_eq!(
+				run_pre_dispatch(message_confirmation_call(200)),
+				Err(TransactionValidityError::Invalid(InvalidTransaction::Call)),
+			);
+		});
+	}
+
 	#[test]
 	fn pre_dispatch_parses_batch_with_relay_chain_and_parachain_headers() {
 		run_test(|| {
@@ -1674,7 +2050,7 @@ mod tests {
 		pre_dispatch_data: PreDispatchData<ThisChainAccountId>,
 		dispatch_result: DispatchResult,
 	) -> RelayerAccountAction<ThisChainAccountId, ThisChainBalance> {
-		TestExtension::analyze_call_result(
+		TestExtensionProvider::analyze_call_result(
 			Some(Some(pre_dispatch_data)),
 			&dispatch_info(),
 			&post_dispatch_info(),
@@ -1737,4 +2113,209 @@ mod tests {
 			);
 		});
 	}
+
+	#[test]
+	fn grandpa_ext_only_parses_valid_batches() {
+		run_test(|| {
+			initialize_environment(100, 100, 100);
+
+			// relay + parachain + message delivery calls batch is ignored
+			assert_eq!(
+				TestGrandpaExtensionProvider::parse_and_check_for_obsolete_call(
+					&all_finality_and_delivery_batch_call(200, 200, 200)
+				),
+				Ok(None),
+			);
+
+			// relay + parachain + message confirmation calls batch is ignored
+			assert_eq!(
+				TestGrandpaExtensionProvider::parse_and_check_for_obsolete_call(
+					&all_finality_and_confirmation_batch_call(200, 200, 200)
+				),
+				Ok(None),
+			);
+
+			// parachain + message delivery call batch is ignored
+			assert_eq!(
+				TestGrandpaExtensionProvider::parse_and_check_for_obsolete_call(
+					&parachain_finality_and_delivery_batch_call(200, 200)
+				),
+				Ok(None),
+			);
+
+			// parachain + message confirmation call batch is ignored
+			assert_eq!(
+				TestGrandpaExtensionProvider::parse_and_check_for_obsolete_call(
+					&parachain_finality_and_confirmation_batch_call(200, 200)
+				),
+				Ok(None),
+			);
+
+			// relay + message delivery call batch is accepted
+			assert_eq!(
+				TestGrandpaExtensionProvider::parse_and_check_for_obsolete_call(
+					&relay_finality_and_delivery_batch_call(200, 200)
+				),
+				Ok(Some(relay_finality_pre_dispatch_data().call_info)),
+			);
+
+			// relay + message confirmation call batch is accepted
+			assert_eq!(
+				TestGrandpaExtensionProvider::parse_and_check_for_obsolete_call(
+					&relay_finality_and_confirmation_batch_call(200, 200)
+				),
+				Ok(Some(relay_finality_confirmation_pre_dispatch_data().call_info)),
+			);
+
+			// message delivery call batch is accepted
+			assert_eq!(
+				TestGrandpaExtensionProvider::parse_and_check_for_obsolete_call(
+					&message_delivery_call(200)
+				),
+				Ok(Some(delivery_pre_dispatch_data().call_info)),
+			);
+
+			// message confirmation call batch is accepted
+			assert_eq!(
+				TestGrandpaExtensionProvider::parse_and_check_for_obsolete_call(
+					&message_confirmation_call(200)
+				),
+				Ok(Some(confirmation_pre_dispatch_data().call_info)),
+			);
+		});
+	}
+
+	#[test]
+	fn grandpa_ext_rejects_batch_with_obsolete_relay_chain_header() {
+		run_test(|| {
+			initialize_environment(100, 100, 100);
+
+			assert_eq!(
+				run_grandpa_pre_dispatch(relay_finality_and_delivery_batch_call(100, 200)),
+				Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)),
+			);
+
+			assert_eq!(
+				run_grandpa_validate(relay_finality_and_delivery_batch_call(100, 200)),
+				Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)),
+			);
+		});
+	}
+
+	#[test]
+	fn grandpa_ext_rejects_calls_with_obsolete_messages() {
+		run_test(|| {
+			initialize_environment(100, 100, 100);
+
+			assert_eq!(
+				run_grandpa_pre_dispatch(relay_finality_and_delivery_batch_call(200, 100)),
+				Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)),
+			);
+			assert_eq!(
+				run_grandpa_pre_dispatch(relay_finality_and_confirmation_batch_call(200, 100)),
+				Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)),
+			);
+
+			assert_eq!(
+				run_grandpa_validate(relay_finality_and_delivery_batch_call(200, 100)),
+				Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)),
+			);
+			assert_eq!(
+				run_grandpa_validate(relay_finality_and_confirmation_batch_call(200, 100)),
+				Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)),
+			);
+
+			assert_eq!(
+				run_grandpa_pre_dispatch(message_delivery_call(100)),
+				Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)),
+			);
+			assert_eq!(
+				run_grandpa_pre_dispatch(message_confirmation_call(100)),
+				Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)),
+			);
+
+			assert_eq!(
+				run_grandpa_validate(message_delivery_call(100)),
+				Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)),
+			);
+			assert_eq!(
+				run_grandpa_validate(message_confirmation_call(100)),
+				Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)),
+			);
+		});
+	}
+
+	#[test]
+	fn grandpa_ext_accepts_calls_with_new_messages() {
+		run_test(|| {
+			initialize_environment(100, 100, 100);
+
+			assert_eq!(
+				run_grandpa_pre_dispatch(relay_finality_and_delivery_batch_call(200, 200)),
+				Ok(Some(relay_finality_pre_dispatch_data()),)
+			);
+			assert_eq!(
+				run_grandpa_pre_dispatch(relay_finality_and_confirmation_batch_call(200, 200)),
+				Ok(Some(relay_finality_confirmation_pre_dispatch_data())),
+			);
+
+			assert_eq!(
+				run_grandpa_validate(relay_finality_and_delivery_batch_call(200, 200)),
+				Ok(Default::default()),
+			);
+			assert_eq!(
+				run_grandpa_validate(relay_finality_and_confirmation_batch_call(200, 200)),
+				Ok(Default::default()),
+			);
+
+			assert_eq!(
+				run_grandpa_pre_dispatch(message_delivery_call(200)),
+				Ok(Some(delivery_pre_dispatch_data())),
+			);
+			assert_eq!(
+				run_grandpa_pre_dispatch(message_confirmation_call(200)),
+				Ok(Some(confirmation_pre_dispatch_data())),
+			);
+
+			assert_eq!(run_grandpa_validate(message_delivery_call(200)), Ok(Default::default()),);
+			assert_eq!(
+				run_grandpa_validate(message_confirmation_call(200)),
+				Ok(Default::default()),
+			);
+		});
+	}
+
+	#[test]
+	fn does_not_panic_on_boosting_priority_of_empty_message_delivery_transaction() {
+		run_test(|| {
+			let best_delivered_message = MaxUnconfirmedMessagesAtInboundLane::get();
+			initialize_environment(100, 100, best_delivered_message);
+
+			// register relayer so it gets priority boost
+			BridgeRelayers::register(RuntimeOrigin::signed(relayer_account_at_this_chain()), 1000)
+				.unwrap();
+
+			// allow empty message delivery transactions
+			let lane_id = TestLaneId::get();
+			let in_lane_data = InboundLaneData {
+				last_confirmed_nonce: 0,
+				relayers: vec![UnrewardedRelayer {
+					relayer: relayer_account_at_bridged_chain(),
+					messages: DeliveredMessages { begin: 1, end: best_delivered_message },
+				}]
+				.into(),
+			};
+			pallet_bridge_messages::InboundLanes::<TestRuntime>::insert(lane_id, in_lane_data);
+
+			// now check that the priority of empty tx is the same as priority of 1-message tx
+			let priority_of_zero_messages_delivery =
+				run_validate(message_delivery_call(best_delivered_message)).unwrap().priority;
+			let priority_of_one_messages_delivery =
+				run_validate(message_delivery_call(best_delivered_message + 1))
+					.unwrap()
+					.priority;
+
+			assert_eq!(priority_of_zero_messages_delivery, priority_of_one_messages_delivery);
+		});
+	}
 }
diff --git a/bridges/modules/grandpa/src/call_ext.rs b/bridges/modules/grandpa/src/call_ext.rs
index e0648d5dd0f1d..f238064f92bca 100644
--- a/bridges/modules/grandpa/src/call_ext.rs
+++ b/bridges/modules/grandpa/src/call_ext.rs
@@ -16,7 +16,7 @@
 
 use crate::{weights::WeightInfo, BridgedBlockNumber, BridgedHeader, Config, Error, Pallet};
 use bp_header_chain::{justification::GrandpaJustification, ChainWithGrandpa};
-use bp_runtime::BlockNumberOf;
+use bp_runtime::{BlockNumberOf, OwnedBridgeModule};
 use codec::Encode;
 use frame_support::{dispatch::CallableCallFor, traits::IsSubType, weights::Weight};
 use sp_runtime::{
@@ -126,6 +126,10 @@ pub trait CallSubType<T: Config<I, RuntimeCall = Self>, I: 'static>:
 			_ => return Ok(ValidTransaction::default()),
 		};
 
+		if Pallet::<T, I>::ensure_not_halted().is_err() {
+			return InvalidTransaction::Call.into()
+		}
+
 		match SubmitFinalityProofHelper::<T, I>::check_obsolete(finality_target.block_number) {
 			Ok(_) => Ok(ValidTransaction::default()),
 			Err(Error::<T, I>::OldHeader) => InvalidTransaction::Stale.into(),
@@ -192,10 +196,10 @@ mod tests {
 	use crate::{
 		call_ext::CallSubType,
 		mock::{run_test, test_header, RuntimeCall, TestBridgedChain, TestNumber, TestRuntime},
-		BestFinalized, Config, WeightInfo,
+		BestFinalized, Config, PalletOperatingMode, WeightInfo,
 	};
 	use bp_header_chain::ChainWithGrandpa;
-	use bp_runtime::HeaderId;
+	use bp_runtime::{BasicOperatingMode, HeaderId};
 	use bp_test_utils::{
 		make_default_justification, make_justification_for_header, JustificationGeneratorParams,
 	};
@@ -238,6 +242,17 @@ mod tests {
 		});
 	}
 
+	#[test]
+	fn extension_rejects_new_header_if_pallet_is_halted() {
+		run_test(|| {
+			// when pallet is halted => tx is rejected
+			sync_to_header_10();
+			PalletOperatingMode::<TestRuntime, ()>::put(BasicOperatingMode::Halted);
+
+			assert!(!validate_block_submit(15));
+		});
+	}
+
 	#[test]
 	fn extension_accepts_new_header() {
 		run_test(|| {
diff --git a/bridges/modules/parachains/src/call_ext.rs b/bridges/modules/parachains/src/call_ext.rs
index 99640dadc61f4..198ff11be4951 100644
--- a/bridges/modules/parachains/src/call_ext.rs
+++ b/bridges/modules/parachains/src/call_ext.rs
@@ -17,6 +17,7 @@
 use crate::{Config, Pallet, RelayBlockNumber};
 use bp_parachains::BestParaHeadHash;
 use bp_polkadot_core::parachains::{ParaHash, ParaId};
+use bp_runtime::OwnedBridgeModule;
 use frame_support::{dispatch::CallableCallFor, traits::IsSubType};
 use sp_runtime::{
 	transaction_validity::{InvalidTransaction, TransactionValidity, ValidTransaction},
@@ -141,6 +142,10 @@ pub trait CallSubType<T: Config<I, RuntimeCall = Self>, I: 'static>:
 			None => return Ok(ValidTransaction::default()),
 		};
 
+		if Pallet::<T, I>::ensure_not_halted().is_err() {
+			return InvalidTransaction::Call.into()
+		}
+
 		if SubmitParachainHeadsHelper::<T, I>::is_obsolete(&update) {
 			return InvalidTransaction::Stale.into()
 		}
@@ -160,10 +165,11 @@ where
 mod tests {
 	use crate::{
 		mock::{run_test, RuntimeCall, TestRuntime},
-		CallSubType, ParaInfo, ParasInfo, RelayBlockNumber,
+		CallSubType, PalletOperatingMode, ParaInfo, ParasInfo, RelayBlockNumber,
 	};
 	use bp_parachains::BestParaHeadHash;
 	use bp_polkadot_core::parachains::{ParaHash, ParaHeadsProof, ParaId};
+	use bp_runtime::BasicOperatingMode;
 
 	fn validate_submit_parachain_heads(
 		num: RelayBlockNumber,
@@ -221,6 +227,17 @@ mod tests {
 		});
 	}
 
+	#[test]
+	fn extension_rejects_header_if_pallet_is_halted() {
+		run_test(|| {
+			// when pallet is halted => tx is rejected
+			sync_to_relay_header_10();
+			PalletOperatingMode::<TestRuntime, ()>::put(BasicOperatingMode::Halted);
+
+			assert!(!validate_submit_parachain_heads(15, vec![(ParaId(1), [2u8; 32].into())]));
+		});
+	}
+
 	#[test]
 	fn extension_accepts_new_header() {
 		run_test(|| {
diff --git a/bridges/primitives/chain-bridge-hub-kusama/src/lib.rs b/bridges/primitives/chain-bridge-hub-kusama/src/lib.rs
index 3a919648df47f..66e0dad05895c 100644
--- a/bridges/primitives/chain-bridge-hub-kusama/src/lib.rs
+++ b/bridges/primitives/chain-bridge-hub-kusama/src/lib.rs
@@ -29,7 +29,6 @@ use frame_support::{
 	sp_runtime::{MultiAddress, MultiSigner},
 };
 use sp_runtime::RuntimeDebug;
-use sp_std::prelude::Vec;
 
 /// BridgeHubKusama parachain.
 #[derive(RuntimeDebug)]
diff --git a/bridges/primitives/chain-bridge-hub-polkadot/src/lib.rs b/bridges/primitives/chain-bridge-hub-polkadot/src/lib.rs
index bf8d8e07c3a61..c3661c1adcada 100644
--- a/bridges/primitives/chain-bridge-hub-polkadot/src/lib.rs
+++ b/bridges/primitives/chain-bridge-hub-polkadot/src/lib.rs
@@ -26,7 +26,6 @@ use bp_runtime::{
 };
 use frame_support::dispatch::DispatchClass;
 use sp_runtime::RuntimeDebug;
-use sp_std::prelude::Vec;
 
 /// BridgeHubPolkadot parachain.
 #[derive(RuntimeDebug)]
diff --git a/bridges/primitives/chain-bridge-hub-rococo/src/lib.rs b/bridges/primitives/chain-bridge-hub-rococo/src/lib.rs
index b726c62ac42b3..a50bda23ac8d3 100644
--- a/bridges/primitives/chain-bridge-hub-rococo/src/lib.rs
+++ b/bridges/primitives/chain-bridge-hub-rococo/src/lib.rs
@@ -26,7 +26,7 @@ use bp_runtime::{
 };
 use frame_support::dispatch::DispatchClass;
 use sp_runtime::{MultiAddress, MultiSigner, RuntimeDebug};
-use sp_std::prelude::Vec;
+
 /// BridgeHubRococo parachain.
 #[derive(RuntimeDebug)]
 pub struct BridgeHubRococo;
diff --git a/bridges/primitives/chain-bridge-hub-wococo/src/lib.rs b/bridges/primitives/chain-bridge-hub-wococo/src/lib.rs
index 5e4758645d9ea..ce4600f5ff35d 100644
--- a/bridges/primitives/chain-bridge-hub-wococo/src/lib.rs
+++ b/bridges/primitives/chain-bridge-hub-wococo/src/lib.rs
@@ -26,7 +26,6 @@ use bp_runtime::{
 };
 use frame_support::dispatch::DispatchClass;
 use sp_runtime::RuntimeDebug;
-use sp_std::prelude::Vec;
 
 /// BridgeHubWococo parachain.
 #[derive(RuntimeDebug)]
diff --git a/bridges/primitives/chain-kusama/src/lib.rs b/bridges/primitives/chain-kusama/src/lib.rs
index 8c3fbd9c203e5..d5748aa132cea 100644
--- a/bridges/primitives/chain-kusama/src/lib.rs
+++ b/bridges/primitives/chain-kusama/src/lib.rs
@@ -23,7 +23,6 @@ pub use bp_polkadot_core::*;
 use bp_header_chain::ChainWithGrandpa;
 use bp_runtime::{decl_bridge_finality_runtime_apis, Chain};
 use frame_support::weights::Weight;
-use sp_std::prelude::Vec;
 
 /// Kusama Chain
 pub struct Kusama;
diff --git a/bridges/primitives/chain-polkadot-bulletin/Cargo.toml b/bridges/primitives/chain-polkadot-bulletin/Cargo.toml
new file mode 100644
index 0000000000000..4311aec47276e
--- /dev/null
+++ b/bridges/primitives/chain-polkadot-bulletin/Cargo.toml
@@ -0,0 +1,41 @@
+[package]
+name = "bp-polkadot-bulletin"
+description = "Primitives of Polkadot Bulletin chain runtime."
+version = "0.1.0"
+authors = ["Parity Technologies <admin@parity.io>"]
+edition = "2021"
+license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
+
+[dependencies]
+codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["derive"] }
+scale-info = { version = "2.9.0", default-features = false, features = ["derive"] }
+
+# Bridge Dependencies
+
+bp-header-chain = { path = "../header-chain", default-features = false }
+bp-messages = { path = "../messages", default-features = false }
+bp-polkadot-core = { path = "../polkadot-core", default-features = false }
+bp-runtime = { path = "../runtime", default-features = false }
+
+# Substrate Based Dependencies
+
+frame-support = { path = "../../../substrate/frame/support", default-features = false }
+frame-system = { path = "../../../substrate/frame/system", default-features = false }
+sp-api = { path = "../../../substrate/primitives/api", default-features = false }
+sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false }
+sp-std = { path = "../../../substrate/primitives/std", default-features = false }
+
+[features]
+default = [ "std" ]
+std = [
+	"bp-header-chain/std",
+	"bp-messages/std",
+	"bp-polkadot-core/std",
+	"bp-runtime/std",
+	"codec/std",
+	"frame-support/std",
+	"frame-system/std",
+	"sp-api/std",
+	"sp-runtime/std",
+	"sp-std/std",
+]
diff --git a/bridges/primitives/chain-polkadot-bulletin/src/lib.rs b/bridges/primitives/chain-polkadot-bulletin/src/lib.rs
new file mode 100644
index 0000000000000..fcc6e90eb1b29
--- /dev/null
+++ b/bridges/primitives/chain-polkadot-bulletin/src/lib.rs
@@ -0,0 +1,215 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// This file is part of Parity Bridges Common.
+
+// Parity Bridges Common is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity Bridges Common is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Polkadot Bulletin Chain primitives.
+
+#![warn(missing_docs)]
+#![cfg_attr(not(feature = "std"), no_std)]
+
+use bp_header_chain::ChainWithGrandpa;
+use bp_messages::MessageNonce;
+use bp_runtime::{
+	decl_bridge_finality_runtime_apis, decl_bridge_messages_runtime_apis,
+	extensions::{
+		CheckEra, CheckGenesis, CheckNonZeroSender, CheckNonce, CheckSpecVersion, CheckTxVersion,
+		CheckWeight, GenericSignedExtension, GenericSignedExtensionSchema,
+	},
+	Chain, TransactionEra,
+};
+use codec::{Decode, Encode};
+use frame_support::{
+	dispatch::DispatchClass,
+	parameter_types,
+	weights::{constants::WEIGHT_REF_TIME_PER_SECOND, Weight},
+};
+use frame_system::limits;
+use scale_info::TypeInfo;
+use sp_runtime::{traits::DispatchInfoOf, transaction_validity::TransactionValidityError, Perbill};
+
+// This chain reuses most of Polkadot primitives.
+pub use bp_polkadot_core::{
+	AccountAddress, AccountId, Balance, Block, BlockNumber, Hash, Hasher, Header, Nonce, Signature,
+	SignedBlock, UncheckedExtrinsic, AVERAGE_HEADER_SIZE_IN_JUSTIFICATION,
+	EXTRA_STORAGE_PROOF_SIZE, MAX_HEADER_SIZE, REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY,
+};
+
+/// Maximal number of GRANDPA authorities at Polkadot Bulletin chain.
+pub const MAX_AUTHORITIES_COUNT: u32 = 100;
+
+/// Name of the With-Polkadot Bulletin chain GRANDPA pallet instance that is deployed at bridged
+/// chains.
+pub const WITH_POLKADOT_BULLETIN_GRANDPA_PALLET_NAME: &str = "BridgePolkadotBulletinGrandpa";
+/// Name of the With-Polkadot Bulletin chain messages pallet instance that is deployed at bridged
+/// chains.
+pub const WITH_POLKADOT_BULLETIN_MESSAGES_PALLET_NAME: &str = "BridgePolkadotBulletinMessages";
+
+// There are fewer system operations on this chain (e.g. staking, governance, etc.). Use a higher
+// percentage of the block for data storage.
+const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(90);
+
+// Re following constants - we are using the same values at Cumulus parachains. They are limited
+// by the maximal transaction weight/size. Since block limits at Bulletin Chain are larger than
+// at the Cumulus Bridgeg Hubs, we could reuse the same values.
+
+/// Maximal number of unrewarded relayer entries at inbound lane for Cumulus-based parachains.
+pub const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce = 1024;
+
+/// Maximal number of unconfirmed messages at inbound lane for Cumulus-based parachains.
+pub const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce = 4096;
+
+/// This signed extension is used to ensure that the chain transactions are signed by proper
+pub type ValidateSigned = GenericSignedExtensionSchema<(), ()>;
+
+/// Signed extension schema, used by Polkadot Bulletin.
+pub type SignedExtensionSchema = GenericSignedExtension<(
+	(
+		CheckNonZeroSender,
+		CheckSpecVersion,
+		CheckTxVersion,
+		CheckGenesis<Hash>,
+		CheckEra<Hash>,
+		CheckNonce<Nonce>,
+		CheckWeight,
+	),
+	ValidateSigned,
+)>;
+
+/// Signed extension, used by Polkadot Bulletin.
+#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)]
+pub struct SignedExtension(SignedExtensionSchema);
+
+impl sp_runtime::traits::SignedExtension for SignedExtension {
+	const IDENTIFIER: &'static str = "Not needed.";
+	type AccountId = ();
+	type Call = ();
+	type AdditionalSigned =
+		<SignedExtensionSchema as sp_runtime::traits::SignedExtension>::AdditionalSigned;
+	type Pre = ();
+
+	fn additional_signed(&self) -> Result<Self::AdditionalSigned, TransactionValidityError> {
+		self.0.additional_signed()
+	}
+
+	fn pre_dispatch(
+		self,
+		_who: &Self::AccountId,
+		_call: &Self::Call,
+		_info: &DispatchInfoOf<Self::Call>,
+		_len: usize,
+	) -> Result<Self::Pre, TransactionValidityError> {
+		Ok(())
+	}
+}
+
+impl SignedExtension {
+	/// Create signed extension from its components.
+	pub fn from_params(
+		spec_version: u32,
+		transaction_version: u32,
+		era: TransactionEra<BlockNumber, Hash>,
+		genesis_hash: Hash,
+		nonce: Nonce,
+	) -> Self {
+		Self(GenericSignedExtension::new(
+			(
+				(
+					(),              // non-zero sender
+					(),              // spec version
+					(),              // tx version
+					(),              // genesis
+					era.frame_era(), // era
+					nonce.into(),    // nonce (compact encoding)
+					(),              // Check weight
+				),
+				(),
+			),
+			Some((
+				(
+					(),
+					spec_version,
+					transaction_version,
+					genesis_hash,
+					era.signed_payload(genesis_hash),
+					(),
+					(),
+				),
+				(),
+			)),
+		))
+	}
+
+	/// Return transaction nonce.
+	pub fn nonce(&self) -> Nonce {
+		let common_payload = self.0.payload.0;
+		common_payload.5 .0
+	}
+}
+
+parameter_types! {
+	/// We allow for 2 seconds of compute with a 6 second average block time.
+	pub BlockWeights: limits::BlockWeights = limits::BlockWeights::with_sensible_defaults(
+			Weight::from_parts(2u64 * WEIGHT_REF_TIME_PER_SECOND, u64::MAX),
+			NORMAL_DISPATCH_RATIO,
+		);
+	// Note: Max transaction size is 8 MB. Set max block size to 10 MB to facilitate data storage.
+	// This is double the "normal" Relay Chain block length limit.
+	/// Maximal block length at Polkadot Bulletin chain.
+	pub BlockLength: limits::BlockLength = limits::BlockLength::max_with_normal_ratio(
+		10 * 1024 * 1024,
+		NORMAL_DISPATCH_RATIO,
+	);
+}
+
+/// Polkadot Bulletin Chain declaration.
+pub struct PolkadotBulletin;
+
+impl Chain for PolkadotBulletin {
+	type BlockNumber = BlockNumber;
+	type Hash = Hash;
+	type Hasher = Hasher;
+	type Header = Header;
+
+	type AccountId = AccountId;
+	// The Bulletin Chain is a permissioned blockchain without any balances. Our `Chain` trait
+	// requires balance type, which is then used by various bridge infrastructure code. However
+	// this code is optional and we are not planning to use it in our bridge.
+	type Balance = Balance;
+	type Nonce = Nonce;
+	type Signature = Signature;
+
+	fn max_extrinsic_size() -> u32 {
+		*BlockLength::get().max.get(DispatchClass::Normal)
+	}
+
+	fn max_extrinsic_weight() -> Weight {
+		BlockWeights::get()
+			.get(DispatchClass::Normal)
+			.max_extrinsic
+			.unwrap_or(Weight::MAX)
+	}
+}
+
+impl ChainWithGrandpa for PolkadotBulletin {
+	const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = WITH_POLKADOT_BULLETIN_GRANDPA_PALLET_NAME;
+	const MAX_AUTHORITIES_COUNT: u32 = MAX_AUTHORITIES_COUNT;
+	const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 =
+		REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY;
+	const MAX_HEADER_SIZE: u32 = MAX_HEADER_SIZE;
+	const AVERAGE_HEADER_SIZE_IN_JUSTIFICATION: u32 = AVERAGE_HEADER_SIZE_IN_JUSTIFICATION;
+}
+
+decl_bridge_finality_runtime_apis!(polkadot_bulletin, grandpa);
+decl_bridge_messages_runtime_apis!(polkadot_bulletin);
diff --git a/bridges/primitives/chain-polkadot/src/lib.rs b/bridges/primitives/chain-polkadot/src/lib.rs
index d1d6f74543121..61c8ca927d807 100644
--- a/bridges/primitives/chain-polkadot/src/lib.rs
+++ b/bridges/primitives/chain-polkadot/src/lib.rs
@@ -23,7 +23,6 @@ pub use bp_polkadot_core::*;
 use bp_header_chain::ChainWithGrandpa;
 use bp_runtime::{decl_bridge_finality_runtime_apis, extensions::PrevalidateAttests, Chain};
 use frame_support::weights::Weight;
-use sp_std::prelude::Vec;
 
 /// Polkadot Chain
 pub struct Polkadot;
diff --git a/bridges/primitives/chain-rococo/src/lib.rs b/bridges/primitives/chain-rococo/src/lib.rs
index 1589d14ea5143..5436ad846468c 100644
--- a/bridges/primitives/chain-rococo/src/lib.rs
+++ b/bridges/primitives/chain-rococo/src/lib.rs
@@ -23,7 +23,6 @@ pub use bp_polkadot_core::*;
 use bp_header_chain::ChainWithGrandpa;
 use bp_runtime::{decl_bridge_finality_runtime_apis, Chain};
 use frame_support::{parameter_types, weights::Weight};
-use sp_std::prelude::Vec;
 
 /// Rococo Chain
 pub struct Rococo;
diff --git a/bridges/primitives/chain-wococo/src/lib.rs b/bridges/primitives/chain-wococo/src/lib.rs
index 5b5bde8269044..b1df65630beff 100644
--- a/bridges/primitives/chain-wococo/src/lib.rs
+++ b/bridges/primitives/chain-wococo/src/lib.rs
@@ -26,7 +26,6 @@ pub use bp_rococo::{
 use bp_header_chain::ChainWithGrandpa;
 use bp_runtime::{decl_bridge_finality_runtime_apis, Chain};
 use frame_support::weights::Weight;
-use sp_std::prelude::Vec;
 
 /// Wococo Chain
 pub struct Wococo;
diff --git a/bridges/primitives/header-chain/src/justification/verification/mod.rs b/bridges/primitives/header-chain/src/justification/verification/mod.rs
index bb8aaadf327ec..a66fc1e0d91d1 100644
--- a/bridges/primitives/header-chain/src/justification/verification/mod.rs
+++ b/bridges/primitives/header-chain/src/justification/verification/mod.rs
@@ -143,6 +143,7 @@ pub enum PrecommitError {
 }
 
 /// The context needed for validating GRANDPA finality proofs.
+#[derive(RuntimeDebug)]
 pub struct JustificationVerificationContext {
 	/// The authority set used to verify the justification.
 	pub voter_set: VoterSet<AuthorityId>,
diff --git a/bridges/primitives/runtime/src/chain.rs b/bridges/primitives/runtime/src/chain.rs
index 5caaebd42babc..e1809e145248f 100644
--- a/bridges/primitives/runtime/src/chain.rs
+++ b/bridges/primitives/runtime/src/chain.rs
@@ -311,7 +311,7 @@ macro_rules! decl_bridge_finality_runtime_apis {
 						$(
 							/// Returns the justifications accepted in the current block.
 							fn [<synced_headers_ $consensus:lower _info>](
-							) -> Vec<$justification_type>;
+							) -> sp_std::vec::Vec<$justification_type>;
 						)?
 					}
 				}
@@ -360,10 +360,10 @@ macro_rules! decl_bridge_messages_runtime_apis {
 						/// If some (or all) messages are missing from the storage, they'll also will
 						/// be missing from the resulting vector. The vector is ordered by the nonce.
 						fn message_details(
-							lane: LaneId,
-							begin: MessageNonce,
-							end: MessageNonce,
-						) -> Vec<OutboundMessageDetails>;
+							lane: bp_messages::LaneId,
+							begin: bp_messages::MessageNonce,
+							end: bp_messages::MessageNonce,
+						) -> sp_std::vec::Vec<bp_messages::OutboundMessageDetails>;
 					}
 
 					/// Inbound message lane API for messages sent by this chain.
@@ -376,9 +376,9 @@ macro_rules! decl_bridge_messages_runtime_apis {
 					pub trait [<From $chain:camel InboundLaneApi>] {
 						/// Return details of given inbound messages.
 						fn message_details(
-							lane: LaneId,
-							messages: Vec<(MessagePayload, OutboundMessageDetails)>,
-						) -> Vec<InboundMessageDetails>;
+							lane: bp_messages::LaneId,
+							messages: sp_std::vec::Vec<(bp_messages::MessagePayload, bp_messages::OutboundMessageDetails)>,
+						) -> sp_std::vec::Vec<bp_messages::InboundMessageDetails>;
 					}
 				}
 			}
diff --git a/bridges/primitives/runtime/src/lib.rs b/bridges/primitives/runtime/src/lib.rs
index ece782e352bc2..7f4a1a030b145 100644
--- a/bridges/primitives/runtime/src/lib.rs
+++ b/bridges/primitives/runtime/src/lib.rs
@@ -74,6 +74,9 @@ pub const MILLAU_CHAIN_ID: ChainId = *b"mlau";
 /// Polkadot chain id.
 pub const POLKADOT_CHAIN_ID: ChainId = *b"pdot";
 
+/// Polkadot Bulletin chain id.
+pub const POLKADOT_BULLETIN_CHAIN_ID: ChainId = *b"pdbc";
+
 /// Kusama chain id.
 pub const KUSAMA_CHAIN_ID: ChainId = *b"ksma";
 
diff --git a/cumulus/.gitattributes b/cumulus/.gitattributes
deleted file mode 100644
index 2ea1ab2d6b9cf..0000000000000
--- a/cumulus/.gitattributes
+++ /dev/null
@@ -1,2 +0,0 @@
-/.gitlab-ci.yml filter=ci-prettier
-/scripts/ci/gitlab/pipeline/*.yml filter=ci-prettier
diff --git a/cumulus/.github/dependabot.yml b/cumulus/.github/dependabot.yml
deleted file mode 100644
index 349a34690d4eb..0000000000000
--- a/cumulus/.github/dependabot.yml
+++ /dev/null
@@ -1,31 +0,0 @@
-version: 2
-updates:
-  - package-ecosystem: "cargo"
-    directory: "/"
-    labels: ["A2-insubstantial", "B0-silent", "C1-low"]
-    # Handle updates for crates from github.com/paritytech/substrate manually.
-    ignore:
-      - dependency-name: "substrate-*"
-      - dependency-name: "sc-*"
-      - dependency-name: "sp-*"
-      - dependency-name: "frame-*"
-      - dependency-name: "fork-tree"
-      - dependency-name: "frame-remote-externalities"
-      - dependency-name: "pallet-*"
-      - dependency-name: "beefy-*"
-      - dependency-name: "try-runtime-*"
-      - dependency-name: "test-runner"
-      - dependency-name: "generate-bags"
-      - dependency-name: "sub-tokens"
-      - dependency-name: "polkadot-*"
-      - dependency-name: "xcm*"
-      - dependency-name: "kusama-*"
-      - dependency-name: "westend-*"
-      - dependency-name: "rococo-*"
-    schedule:
-      interval: "daily"
-  - package-ecosystem: github-actions
-    directory: '/'
-    labels: ["A2-insubstantial", "B0-silent", "C1-low", "E2-dependencies"]
-    schedule:
-      interval: daily
diff --git a/cumulus/.github/pr-custom-review.yml b/cumulus/.github/pr-custom-review.yml
deleted file mode 100644
index fc26ee677f065..0000000000000
--- a/cumulus/.github/pr-custom-review.yml
+++ /dev/null
@@ -1,48 +0,0 @@
-# 🔒 PROTECTED: Changes to locks-review-team should be approved by the current locks-review-team
-locks-review-team: cumulus-locks-review
-team-leads-team: polkadot-review
-action-review-team: ci
-
-rules:
-  - name: Runtime files
-    check_type: changed_files
-    condition: ^parachains/runtimes/assets/(asset-hub-kusama|asset-hub-polkadot)/src/[^/]+\.rs$|^parachains/runtimes/bridge-hubs/(bridge-hub-kusama|bridge-hub-polkadot)/src/[^/]+\.rs$|^parachains/runtimes/collectives/collectives-polkadot/src/[^/]+\.rs$|^parachains/common/src/[^/]+\.rs$
-    all_distinct:
-      - min_approvals: 1
-        teams:
-          - cumulus-locks-review
-      - min_approvals: 1
-        teams:
-          - polkadot-review
-
-  - name: Core developers
-    check_type: changed_files
-    condition:
-      include: .*
-      # excluding files from 'Runtime files' and 'CI files' rules and `Bridges subtree files`
-      exclude: ^parachains/runtimes/assets/(asset-hub-kusama|asset-hub-polkadot)/src/[^/]+\.rs$|^parachains/runtimes/bridge-hubs/(bridge-hub-kusama|bridge-hub-polkadot)/src/[^/]+\.rs$|^parachains/runtimes/collectives/collectives-polkadot/src/[^/]+\.rs$|^parachains/common/src/[^/]+\.rs$|^\.gitlab-ci\.yml|^scripts/ci/.*|^\.github/.*
-    min_approvals: 2
-    teams:
-      - core-devs
-
-  # if there are any changes in the bridges subtree (in case of backport changes back to bridges repo)
-  - name: Bridges subtree files
-    check_type: changed_files
-    condition: ^bridges/.*
-    min_approvals: 1
-    teams:
-      - bridges-core
-
-  - name: CI files
-    check_type: changed_files
-    condition:
-      include: ^\.gitlab-ci\.yml|^scripts/ci/.*|^\.github/.*
-      exclude: ^scripts/ci/gitlab/pipeline/zombienet.yml$
-    min_approvals: 2
-    teams:
-      - ci
-      - release-engineering
-
-prevent-review-request:
-  teams:
-    - core-devs
diff --git a/cumulus/.github/workflows/check-D-labels.yml b/cumulus/.github/workflows/check-D-labels.yml
deleted file mode 100644
index 9106272093107..0000000000000
--- a/cumulus/.github/workflows/check-D-labels.yml
+++ /dev/null
@@ -1,47 +0,0 @@
-name: Check D labels
-
-on:
-  pull_request:
-    types: [labeled, opened, synchronize, unlabeled]
-    paths:
-      - primitives/**
-
-jobs:
-  check-labels:
-    runs-on: ubuntu-latest
-    steps:
-      - name: Pull image
-        env:
-          IMAGE: paritytech/ruled_labels:0.4.0
-        run: docker pull $IMAGE
-
-      - name: Check labels
-        env:
-          IMAGE: paritytech/ruled_labels:0.4.0
-          MOUNT: /work
-          GITHUB_PR: ${{ github.event.pull_request.number }}
-          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
-          API_BASE: https://api.github.com/repos
-          REPO: ${{ github.repository }}
-          RULES_PATH: labels/ruled_labels
-          CHECK_SPECS: specs_cumulus.yaml
-        run: |
-          echo "REPO: ${REPO}"
-          echo "GITHUB_PR: ${GITHUB_PR}"
-          # Clone repo with labels specs
-          git clone https://github.com/paritytech/labels
-          # Fetch the labels for the PR under test
-          labels=$( curl -H "Authorization: token ${GITHUB_TOKEN}" -s "$API_BASE/${REPO}/pulls/${GITHUB_PR}" | jq '.labels | .[] | .name' | tr "\n" ",")
-
-          if [ -z "${labels}" ]; then
-            docker run --rm -i -v $PWD/${RULES_PATH}/:$MOUNT $IMAGE check $MOUNT/$CHECK_SPECS --tags audit --no-label
-          fi
-
-          labels_args=${labels: :-1}
-          printf "Checking labels: %s\n" "${labels_args}"
-
-          # Prevent the shell from splitting labels with spaces
-          IFS=","
-
-          # --dev is more useful to debug mode to debug
-          docker run --rm -i -v $PWD/${RULES_PATH}/:$MOUNT $IMAGE check $MOUNT/$CHECK_SPECS --labels ${labels_args} --dev --tags audit
diff --git a/cumulus/.github/workflows/check-labels.yml b/cumulus/.github/workflows/check-labels.yml
deleted file mode 100644
index 004271d7788ad..0000000000000
--- a/cumulus/.github/workflows/check-labels.yml
+++ /dev/null
@@ -1,45 +0,0 @@
-name: Check labels
-
-on:
-  pull_request:
-    types: [labeled, opened, synchronize, unlabeled]
-
-jobs:
-  check-labels:
-    runs-on: ubuntu-latest
-    steps:
-      - name: Pull image
-        env:
-          IMAGE: paritytech/ruled_labels:0.4.0
-        run: docker pull $IMAGE
-
-      - name: Check labels
-        env:
-          IMAGE: paritytech/ruled_labels:0.4.0
-          MOUNT: /work
-          GITHUB_PR: ${{ github.event.pull_request.number }}
-          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
-          API_BASE: https://api.github.com/repos
-          REPO: ${{ github.repository }}
-          RULES_PATH: labels/ruled_labels
-          CHECK_SPECS: specs_cumulus.yaml
-        run: |
-          echo "REPO: ${REPO}"
-          echo "GITHUB_PR: ${GITHUB_PR}"
-          # Clone repo with labels specs
-          git clone https://github.com/paritytech/labels
-          # Fetch the labels for the PR under test
-          labels=$( curl -H "Authorization: token ${GITHUB_TOKEN}" -s "$API_BASE/${REPO}/pulls/${GITHUB_PR}" | jq '.labels | .[] | .name' | tr "\n" ",")
-
-          if [ -z "${labels}" ]; then
-            docker run --rm -i -v $PWD/${RULES_PATH}/:$MOUNT $IMAGE check $MOUNT/$CHECK_SPECS --tags audit --no-label
-          fi
-
-          labels_args=${labels: :-1}
-          printf "Checking labels: %s\n" "${labels_args}"
-
-          # Prevent the shell from splitting labels with spaces
-          IFS=","
-
-          # --dev is more useful to debug mode to debug
-          docker run --rm -i -v $PWD/${RULES_PATH}/:$MOUNT $IMAGE check $MOUNT/$CHECK_SPECS --labels ${labels_args} --dev --tags PR
diff --git a/cumulus/.github/workflows/docs.yml b/cumulus/.github/workflows/docs.yml
deleted file mode 100644
index 6aab3f27be6b4..0000000000000
--- a/cumulus/.github/workflows/docs.yml
+++ /dev/null
@@ -1,39 +0,0 @@
-name: Publish Rust Docs
-
-on:
-  push:
-    branches:
-      - master
-
-jobs:
-  deploy-docs:
-    name: Deploy docs
-    runs-on: ubuntu-latest
-
-    steps:
-      - name: Install tooling
-        run: |
-          sudo apt-get install -y protobuf-compiler
-          protoc --version
-
-      - name: Checkout repository
-        uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
-
-      - name: Rust versions
-        run:  rustup show
-
-      - name: Rust cache
-        uses: Swatinem/rust-cache@e207df5d269b42b69c8bc5101da26f7d31feddb4 # v2.6.2
-
-      - name: Build rustdocs
-        run:  SKIP_WASM_BUILD=1 cargo doc --all --no-deps
-
-      - name: Make index.html
-        run:  echo "<meta http-equiv=refresh content=0;url=cumulus_client_collator/index.html>" > ./target/doc/index.html
-
-      - name: Deploy documentation
-        uses: peaceiris/actions-gh-pages@373f7f263a76c20808c831209c920827a82a2847 # v3.9.3
-        with:
-          github_token: ${{ secrets.GITHUB_TOKEN }}
-          publish_branch: gh-pages
-          publish_dir: ./target/doc
diff --git a/cumulus/.github/workflows/fmt-check.yml b/cumulus/.github/workflows/fmt-check.yml
deleted file mode 100644
index 7571c51116be8..0000000000000
--- a/cumulus/.github/workflows/fmt-check.yml
+++ /dev/null
@@ -1,22 +0,0 @@
-name: Rustfmt check
-
-on:
-  push:
-    branches:
-      - master
-  pull_request:
-    types: [opened, synchronize, reopened, ready_for_review]
-
-jobs:
-  quick_check:
-    strategy:
-      matrix:
-        os: ["ubuntu-latest"]
-    runs-on: ${{ matrix.os }}
-    container:
-      image: paritytech/ci-linux:production
-    steps:
-      - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
-
-      - name: Cargo fmt
-        run: cargo +nightly fmt --all -- --check
diff --git a/cumulus/.github/workflows/pr-custom-review.yml b/cumulus/.github/workflows/pr-custom-review.yml
deleted file mode 100644
index 8e40c9ee72989..0000000000000
--- a/cumulus/.github/workflows/pr-custom-review.yml
+++ /dev/null
@@ -1,42 +0,0 @@
-name: Assign reviewers
-
-on:
-  pull_request:
-    branches:
-      - master
-      - main
-    types:
-      - opened
-      - reopened
-      - synchronize
-      - review_requested
-      - review_request_removed
-      - ready_for_review
-      - converted_to_draft
-  pull_request_review:
-
-jobs:
-  pr-custom-review:
-    runs-on: ubuntu-latest
-    steps:
-      - name: Skip if pull request is in Draft
-        # `if: github.event.pull_request.draft == true` should be kept here, at
-        # the step level, rather than at the job level. The latter is not
-        # recommended because when the PR is moved from "Draft" to "Ready to
-        # review" the workflow will immediately be passing (since it was skipped),
-        # even though it hasn't actually ran, since it takes a few seconds for
-        # the workflow to start. This is also disclosed in:
-        # https://github.community/t/dont-run-actions-on-draft-pull-requests/16817/17
-        # That scenario would open an opportunity for the check to be bypassed:
-        # 1. Get your PR approved
-        # 2. Move it to Draft
-        # 3. Push whatever commits you want
-        # 4. Move it to "Ready for review"; now the workflow is passing (it was
-        #    skipped) and "Check reviews" is also passing (it won't be updated
-        #    until the workflow is finished)
-        if: github.event.pull_request.draft == true
-        run: exit 1
-      - name: pr-custom-review
-        uses: paritytech/pr-custom-review@action-v3
-        with:
-          checks-reviews-api: http://pcr.parity-prod.parity.io/api/v1/check_reviews
diff --git a/cumulus/.github/workflows/release-01_branch-check.yml b/cumulus/.github/workflows/release-01_branch-check.yml
deleted file mode 100644
index afcd4580f176f..0000000000000
--- a/cumulus/.github/workflows/release-01_branch-check.yml
+++ /dev/null
@@ -1,22 +0,0 @@
-name: Release branch check
-on:
-  push:
-    branches:
-      - release-**v[0-9]+.[0-9]+.[0-9]+ # client
-      - release-**v[0-9]+               # runtimes
-      - polkadot-v[0-9]+.[0-9]+.[0-9]+  # cumulus code
-
-  workflow_dispatch:
-
-jobs:
-  check_branch:
-    runs-on: ubuntu-latest
-    steps:
-      - name: Checkout sources
-        uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
-        with:
-          fetch-depth: 0
-
-      - name: Run check
-        shell: bash
-        run: ./scripts/ci/github/check-rel-br
diff --git a/cumulus/.github/workflows/release-10_rc-automation.yml b/cumulus/.github/workflows/release-10_rc-automation.yml
deleted file mode 100644
index d1795faef096e..0000000000000
--- a/cumulus/.github/workflows/release-10_rc-automation.yml
+++ /dev/null
@@ -1,87 +0,0 @@
-name: Release - RC automation
-on:
-  push:
-    branches:
-      - release-v[0-9]+.[0-9]+.[0-9]+
-      - release-parachains-v[0-9]+
-  workflow_dispatch:
-
-jobs:
-  tag_rc:
-    runs-on: ubuntu-latest
-    strategy:
-      matrix:
-        channel:
-          - name: 'RelEng: Cumulus Release Coordination'
-            room: '!NAEMyPAHWOiOQHsvus:parity.io'
-            pre-releases: true
-    steps:
-      - name: Checkout sources
-        uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
-        with:
-          fetch-depth: 0
-      - id: compute_tag
-        name: Compute next rc tag
-        shell: bash
-        run: |
-          # Get last rc tag if exists, else set it to {version}-rc1
-          version=${GITHUB_REF#refs/heads/release-}
-          echo "$version"
-          echo "version=$version" >> $GITHUB_OUTPUT
-          git tag -l
-          last_rc=$(git tag -l "$version-rc*" | sort -V | tail -n 1)
-          if [ -n "$last_rc" ]; then
-            suffix=$(echo "$last_rc" | grep -Eo '[0-9]+$')
-            echo $suffix
-            ((suffix++))
-            echo $suffix
-            echo "new_tag=$version-rc$suffix" >> $GITHUB_OUTPUT
-            echo "first_rc=false" >> $GITHUB_OUTPUT
-          else
-            echo "new_tag=$version-rc1" >> $GITHUB_OUTPUT
-            echo "first_rc=true" >> $GITHUB_OUTPUT
-          fi
-
-      - name: Apply new tag
-        uses: tvdias/github-tagger@ed7350546e3e503b5e942dffd65bc8751a95e49d # v0.0.2
-        with:
-          # We can't use the normal GITHUB_TOKEN for the following reason:
-          # https://docs.github.com/en/actions/reference/events-that-trigger-workflows#triggering-new-workflows-using-a-personal-access-token
-          # RELEASE_BRANCH_TOKEN requires public_repo OAuth scope
-          repo-token: "${{ secrets.RELEASE_BRANCH_TOKEN }}"
-          tag: ${{ steps.compute_tag.outputs.new_tag }}
-
-      - id: create-issue-checklist-client
-        uses: JasonEtco/create-an-issue@e27dddc79c92bc6e4562f268fffa5ed752639abd # v2.9.1
-        # Only create the issue if it's the first release candidate
-        if: steps.compute_tag.outputs.first_rc == 'true'
-        env:
-          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
-          VERSION: ${{ steps.compute_tag.outputs.version }}
-        with:
-          assignees: EgorPopelyaev, coderobe, chevdor
-          filename: .github/ISSUE_TEMPLATE/release-client.md
-
-      - id: create-issue-checklist-runtime
-        uses: JasonEtco/create-an-issue@e27dddc79c92bc6e4562f268fffa5ed752639abd # v2.9.1
-        # Only create the issue if it's the first release candidate
-        if: steps.compute_tag.outputs.first_rc == 'true'
-        env:
-          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
-          VERSION: ${{ steps.compute_tag.outputs.version }}
-        with:
-          assignees: EgorPopelyaev, coderobe, chevdor
-          filename: .github/ISSUE_TEMPLATE/release-runtime.md
-
-      - name: Matrix notification to ${{ matrix.channel.name }}
-        uses: s3krit/matrix-message-action@70ad3fb812ee0e45ff8999d6af11cafad11a6ecf # v0.0.3
-        if: steps.create-issue-checklist-client.outputs.url != '' && steps.create-issue-checklist-runtime.outputs.url != ''
-        with:
-          room_id: ${{ matrix.channel.room }}
-          access_token: ${{ secrets.RELEASENOTES_MATRIX_V2_ACCESS_TOKEN }}
-          server: "m.parity.io"
-          message: |
-            The Release Process for Cumulus ${{ steps.compute_tag.outputs.version }} has been started.<br/>
-            Tracking issues:
-              - client: ${{ steps.create-issue-checklist-client.outputs.url }}"
-              - runtime: ${{ steps.create-issue-checklist-runtime.outputs.url }}"
diff --git a/cumulus/.github/workflows/release-20_extrinsic-ordering-check-from-bin.yml b/cumulus/.github/workflows/release-20_extrinsic-ordering-check-from-bin.yml
deleted file mode 100644
index d902e57ac9e7f..0000000000000
--- a/cumulus/.github/workflows/release-20_extrinsic-ordering-check-from-bin.yml
+++ /dev/null
@@ -1,86 +0,0 @@
-# This workflow performs the Extrinsic Ordering Check on demand using a binary
-
-name: Release - Extrinsic Ordering Check from Binary
-on:
-  workflow_dispatch:
-    inputs:
-      reference_url:
-        description: The WebSocket url of the reference node
-        default: wss://kusama-asset-hub-rpc.polkadot.io
-        required: true
-      binary_url:
-        description: A url to a Linux binary for the node containing the runtime to test
-        default: https://releases.parity.io/cumulus/polkadot-v0.9.21/polkadot-parachain
-        required: true
-      chain:
-        description: The name of the chain under test. Usually, you would pass a local chain
-        default: asset-hub-kusama-local
-        required: true
-
-jobs:
-  check:
-    name: Run check
-    runs-on: ubuntu-latest
-    env:
-      CHAIN: ${{github.event.inputs.chain}}
-      BIN: node-bin
-      BIN_PATH: ./tmp/$BIN
-      BIN_URL: ${{github.event.inputs.binary_url}}
-      REF_URL: ${{github.event.inputs.reference_url}}
-
-    steps:
-      - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
-
-      - name: Fetch binary
-        run: |
-          echo Creating a temp dir to download and run binary
-          mkdir -p tmp
-          echo Fetching $BIN_URL
-          wget $BIN_URL -O $BIN_PATH
-          chmod a+x $BIN_PATH
-          $BIN_PATH --version
-
-      - name: Start local node
-        run: |
-          echo Running on $CHAIN
-          $BIN_PATH --chain=$CHAIN -- --chain polkadot-local &
-
-      - name: Prepare output
-        run: |
-          VERSION=$($BIN_PATH --version)
-          echo "Metadata comparison:" >> output.txt
-          echo "Date: $(date)" >> output.txt
-          echo "Reference: $REF_URL" >> output.txt
-          echo "Target version: $VERSION" >> output.txt
-          echo "Chain: $CHAIN" >> output.txt
-          echo "----------------------------------------------------------------------" >> output.txt
-
-      - name: Pull polkadot-js-tools image
-        run: docker pull jacogr/polkadot-js-tools
-
-      - name: Compare the metadata
-        run: |
-          CMD="docker run --pull always --network host jacogr/polkadot-js-tools metadata $REF_URL ws://localhost:9944"
-          echo -e "Running:\n$CMD"
-          $CMD >> output.txt
-          sed -z -i 's/\n\n/\n/g' output.txt
-          cat output.txt | egrep -n -i ''
-          SUMMARY=$(./scripts/ci/github/extrinsic-ordering-filter.sh output.txt)
-          echo -e $SUMMARY
-          echo -e $SUMMARY >> output.txt
-
-      - name: Show result
-        run: |
-          cat output.txt
-
-      - name: Stop our local node
-        run: |
-          pkill $BIN
-        continue-on-error: true
-
-      - name: Save output as artifact
-        uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
-        with:
-          name: ${{ env.CHAIN }}
-          path: |
-            output.txt
diff --git a/cumulus/.github/workflows/release-21_extrinsic-ordering-check-from-two.yml b/cumulus/.github/workflows/release-21_extrinsic-ordering-check-from-two.yml
deleted file mode 100644
index 93c0050ff6f25..0000000000000
--- a/cumulus/.github/workflows/release-21_extrinsic-ordering-check-from-two.yml
+++ /dev/null
@@ -1,120 +0,0 @@
-# This workflow performs the Extrinsic Ordering Check on demand using two reference binaries
-
-name: Release - Extrinsic API Check with reference bins
-on:
-  workflow_dispatch:
-    inputs:
-      reference_binary_url:
-        description: A url to a Linux binary for the node containing the reference runtime to test against
-        default: https://releases.parity.io/cumulus/v0.9.230/polkadot-parachain
-        required: true
-      binary_url:
-        description: A url to a Linux binary for the node containing the runtime to test
-        default: https://releases.parity.io/cumulus/v0.9.270-rc7/polkadot-parachain
-        required: true
-
-jobs:
-  check:
-    name: Run check
-    runs-on: ubuntu-latest
-    timeout-minutes: 10
-    env:
-      REF_URL: ${{github.event.inputs.reference_binary_url}}
-      BIN_REF: polkadot-parachain-ref
-      BIN_URL: ${{github.event.inputs.binary_url}}
-      BIN_BASE: polkadot-parachain
-      TMP: ./tmp
-    strategy:
-      fail-fast: false
-      matrix:
-        include:
-          - runtime: asset-hub-kusama
-            local: asset-hub-kusama-local
-            relay: kusama-local
-          - runtime: asset-hub-polkadot
-            local: asset-hub-polkadot-local
-            relay: polkadot-local
-          - runtime: asset-hub-westend
-            local: asset-hub-westend-local
-            relay: polkadot-local
-          - runtime: contracts-rococo
-            local: contracts-rococo-local
-            relay: polkadot-local
-
-    steps:
-      - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
-
-      - name: Create tmp dir
-        run: |
-          mkdir -p $TMP
-          pwd
-
-      - name: Fetch reference binary for ${{ matrix.runtime }}
-        run: |
-          echo Fetching $REF_URL
-          curl $REF_URL -o $TMP/$BIN_REF
-          chmod a+x  $TMP/$BIN_REF
-          $TMP/$BIN_REF --version
-
-      - name: Fetch test binary for ${{ matrix.runtime }}
-        run: |
-          echo Fetching $BIN_URL
-          curl $BIN_URL -o $TMP/$BIN_BASE
-          chmod a+x  $TMP/$BIN_BASE
-          $TMP/$BIN_BASE --version
-
-      - name: Start local reference node for ${{ matrix.runtime }}
-        run: |
-          echo Running reference on ${{ matrix.local }}
-          $TMP/$BIN_REF --chain=${{ matrix.local }} --ws-port=9954 --tmp -- --chain ${{ matrix.relay }} &
-          sleep 15
-
-      - name: Start local test node for ${{ matrix.runtime }}
-        run: |
-          echo Running test on ${{ matrix.local }}
-          $TMP/$BIN_BASE --chain=${{ matrix.local }} --ws-port=9944 --tmp -- --chain ${{ matrix.relay }} &
-          sleep 15
-
-      - name: Prepare output
-        run: |
-          REF_VERSION=$($TMP/$BIN_REF --version)
-          BIN_VERSION=$($TMP/$BIN_BASE --version)
-          echo "Metadata comparison:" >> output.txt
-          echo "Date: $(date)" >> output.txt
-          echo "Ref. binary: $REF_URL" >> output.txt
-          echo "Test binary: $BIN_URL" >> output.txt
-          echo "Ref. version: $REF_VERSION" >> output.txt
-          echo "Test version: $BIN_VERSION" >> output.txt
-          echo "Chain: ${{ matrix.local }}" >> output.txt
-          echo "Relay: ${{ matrix.relay }}" >> output.txt
-          echo "----------------------------------------------------------------------" >> output.txt
-
-      - name: Pull polkadot-js-tools image
-        run: docker pull jacogr/polkadot-js-tools
-
-      - name: Compare the metadata
-        run: |
-          CMD="docker run --pull always --network host jacogr/polkadot-js-tools metadata ws://localhost:9954 ws://localhost:9944"
-          echo -e "Running:\n$CMD"
-          $CMD >> output.txt
-          sed -z -i 's/\n\n/\n/g' output.txt
-          cat output.txt | egrep -n -i ''
-          SUMMARY=$(./scripts/ci/github/extrinsic-ordering-filter.sh output.txt)
-          echo -e $SUMMARY
-          echo -e $SUMMARY >> output.txt
-
-      - name: Show result
-        run: |
-          cat output.txt
-
-      - name: Save output as artifact
-        uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
-        with:
-          name: ${{ matrix.runtime }}
-          path: |
-            output.txt
-
-      - name: Stop our local nodes
-        run: |
-          pkill $BIN_REF || true
-          pkill $BIN_BASE || true
diff --git a/cumulus/.github/workflows/release-30_create-draft.yml b/cumulus/.github/workflows/release-30_create-draft.yml
deleted file mode 100644
index 2d11dfe18cec8..0000000000000
--- a/cumulus/.github/workflows/release-30_create-draft.yml
+++ /dev/null
@@ -1,311 +0,0 @@
-name: Release - Create draft
-
-on:
-  workflow_dispatch:
-    inputs:
-      ref1:
-        description: The 'from' tag to use for the diff
-        default: parachains-v9.0.0
-        required: true
-      ref2:
-        description: The 'to' tag to use for the diff
-        default: release-parachains-v10.0.0
-        required: true
-      release_type:
-        description: Pass "client" for client releases, leave empty otherwise
-        required: false
-      pre_release:
-        description: For pre-releases
-        default: "true"
-        required: true
-      notification:
-        description: Whether or not to notify over Matrix
-        default: "true"
-        required: true
-
-jobs:
-  get-rust-versions:
-    runs-on: ubuntu-latest
-    container:
-      image: paritytech/ci-linux:production
-    outputs:
-      rustc-stable: ${{ steps.get-rust-versions.outputs.stable }}
-      rustc-nightly: ${{ steps.get-rust-versions.outputs.nightly }}
-    steps:
-      - id: get-rust-versions
-        run: |
-          echo "stable=$(rustc +stable --version)" >> $GITHUB_OUTPUT
-          echo "nightly=$(rustc +nightly --version)" >> $GITHUB_OUTPUT
-
-  # We do not skip the entire job for client builds (although we don't need it)
-  # because it is a dep of the next job. However we skip the time consuming steps.
-  build-runtimes:
-    runs-on: ubuntu-latest
-    strategy:
-       matrix:
-        include:
-          - category: assets
-            runtime: asset-hub-kusama
-          - category: assets
-            runtime: asset-hub-polkadot
-          - category: assets
-            runtime: asset-hub-westend
-          - category: bridge-hubs
-            runtime: bridge-hub-polkadot
-          - category: bridge-hubs
-            runtime: bridge-hub-kusama
-          - category: bridge-hubs
-            runtime: bridge-hub-rococo
-          - category: collectives
-            runtime: collectives-polkadot
-          - category: contracts
-            runtime: contracts-rococo
-          - category: starters
-            runtime: seedling
-          - category: starters
-            runtime: shell
-          - category: testing
-            runtime: rococo-parachain
-    steps:
-      - name: Checkout sources
-        uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
-        with:
-          ref: ${{ github.event.inputs.ref2 }}
-
-      - name: Cache target dir
-        if: ${{ github.event.inputs.release_type != 'client' }}
-        uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1
-        with:
-          path: "${{ github.workspace }}/runtime/${{ matrix.runtime }}/target"
-          key: srtool-target-${{ matrix.runtime }}-${{ github.sha }}
-          restore-keys: |
-            srtool-target-${{ matrix.runtime }}-
-            srtool-target-
-
-      - name: Build ${{ matrix.runtime }} runtime
-        if: ${{ github.event.inputs.release_type != 'client' }}
-        id: srtool_build
-        uses: chevdor/srtool-actions@v0.7.0
-        with:
-          image: paritytech/srtool
-          chain: ${{ matrix.runtime }}
-          runtime_dir: parachains/runtimes/${{ matrix.category }}/${{ matrix.runtime }}
-
-      - name: Store srtool digest to disk
-        if: ${{ github.event.inputs.release_type != 'client' }}
-        run: |
-          echo '${{ steps.srtool_build.outputs.json }}' | \
-            jq > ${{ matrix.runtime }}-srtool-digest.json
-
-      - name: Upload ${{ matrix.runtime }} srtool json
-        if: ${{ github.event.inputs.release_type != 'client' }}
-        uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
-        with:
-          name: ${{ matrix.runtime }}-srtool-json
-          path: ${{ matrix.runtime }}-srtool-digest.json
-
-      - name: Upload ${{ matrix.runtime }} runtime
-        if: ${{ github.event.inputs.release_type != 'client' }}
-        uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
-        with:
-          name: ${{ matrix.runtime }}-runtime
-          path: |
-            ${{ steps.srtool_build.outputs.wasm_compressed }}
-
-  publish-draft-release:
-    runs-on: ubuntu-latest
-    needs: ["get-rust-versions", "build-runtimes"]
-    outputs:
-      release_url: ${{ steps.create-release.outputs.html_url }}
-      asset_upload_url: ${{ steps.create-release.outputs.upload_url }}
-    steps:
-      - name: Checkout sources
-        uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
-        with:
-          fetch-depth: 0
-          path: cumulus
-          ref: ${{ github.event.inputs.ref2 }}
-
-      - uses: ruby/setup-ruby@250fcd6a742febb1123a77a841497ccaa8b9e939 # v1.152.0
-        with:
-          ruby-version: 3.0.0
-
-      - name: Download srtool json output
-        uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2
-
-      - name: Prepare tooling
-        run: |
-          cd cumulus/scripts/ci/changelog
-          gem install bundler changelogerator:0.9.1
-          bundle install
-          changelogerator --help
-
-          URL=https://github.com/chevdor/tera-cli/releases/download/v0.2.1/tera-cli_linux_amd64.deb
-          wget $URL -O tera.deb
-          sudo dpkg -i tera.deb
-          tera --version
-
-      - name: Generate release notes
-        env:
-          RUSTC_STABLE: ${{ needs.get-rust-versions.outputs.rustc-stable }}
-          RUSTC_NIGHTLY: ${{ needs.get-rust-versions.outputs.rustc-nightly }}
-          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
-          NO_CACHE: 1
-          DEBUG: 1
-          SHELL_DIGEST: ${{ github.workspace}}/shell-srtool-json/shell-srtool-digest.json
-          ASSET_HUB_WESTEND_DIGEST: ${{ github.workspace}}/asset-hub-westend-srtool-json/asset-hub-westend-srtool-digest.json
-          ASSET_HUB_KUSAMA_DIGEST: ${{ github.workspace}}/asset-hub-kusama-srtool-json/asset-hub-kusama-srtool-digest.json
-          ASSET_HUB_POLKADOT_DIGEST: ${{ github.workspace}}/asset-hub-polkadot-srtool-json/asset-hub-polkadot-srtool-digest.json
-          BRIDGE_HUB_ROCOCO_DIGEST: ${{ github.workspace}}/bridge-hub-rococo-srtool-json/bridge-hub-rococo-srtool-digest.json
-          BRIDGE_HUB_KUSAMA_DIGEST: ${{ github.workspace}}/bridge-hub-kusama-srtool-json/bridge-hub-kusama-srtool-digest.json
-          BRIDGE_HUB_POLKADOT_DIGEST: ${{ github.workspace}}/bridge-hub-polkadot-srtool-json/bridge-hub-polkadot-srtool-digest.json
-          COLLECTIVES_POLKADOT_DIGEST: ${{ github.workspace}}/collectives-polkadot-srtool-json/collectives-polkadot-srtool-digest.json
-          ROCOCO_PARA_DIGEST: ${{ github.workspace}}/rococo-parachain-srtool-json/rococo-parachain-srtool-digest.json
-          CANVAS_KUSAMA_DIGEST: ${{ github.workspace}}/contracts-rococo-srtool-json/contracts-rococo-srtool-digest.json
-          REF1: ${{ github.event.inputs.ref1 }}
-          REF2: ${{ github.event.inputs.ref2 }}
-          PRE_RELEASE: ${{ github.event.inputs.pre_release }}
-          RELEASE_TYPE: ${{ github.event.inputs.release_type }}
-        run: |
-          find ${{env.GITHUB_WORKSPACE}} -type f -name "*-srtool-digest.json"
-
-          if [ "$RELEASE_TYPE" != "client" ]; then
-            ls -al $SHELL_DIGEST || true
-            ls -al $ASSET_HUB_WESTEND_DIGEST || true
-            ls -al $ASSET_HUB_KUSAMA_DIGEST || true
-            ls -al $ASSET_HUB_POLKADOT_DIGEST || true
-            ls -al $BRIDGE_HUB_ROCOCO_DIGEST || true
-            ls -al $BRIDGE_HUB_KUSAMA_DIGEST || true
-            ls -al $BRIDGE_HUB_POLKADOT_DIGEST || true
-            ls -al $COLLECTIVES_POLKADOT_DIGEST || true
-            ls -al $ROCOCO_PARA_DIGEST || true
-            ls -al $CANVAS_KUSAMA_DIGEST || true
-          fi
-
-          echo "The diff will be computed from $REF1 to $REF2"
-          cd cumulus/scripts/ci/changelog
-          ./bin/changelog $REF1 $REF2 release-notes.md
-          ls -al {release-notes.md,context.json} || true
-
-      - name: Archive srtool json
-        if: ${{ github.event.inputs.release_type != 'client' }}
-        uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
-        with:
-          name: srtool-json
-          path: |
-            **/*-srtool-digest.json
-
-      - name: Archive context artifact
-        uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
-        with:
-          name: release-notes-context
-          path: |
-            cumulus/scripts/ci/changelog/context.json
-
-      - name: Create draft release
-        id: create-release
-        uses: actions/create-release@0cb9c9b65d5d1901c1f53e5e66eaf4afd303e70e # v1.1.4
-        env:
-          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
-        with:
-          body_path: ./cumulus/scripts/ci/changelog/release-notes.md
-          tag_name: ${{ github.event.inputs.ref2 }}
-          release_name: ${{ github.event.inputs.ref2 }}
-          draft: true
-
-  publish-runtimes:
-    if: ${{ github.event.inputs.release_type != 'client' }}
-    runs-on: ubuntu-latest
-    needs: ["publish-draft-release"]
-    env:
-      RUNTIME_DIR: parachains/runtimes
-    strategy:
-       matrix:
-        include:
-          - category: assets
-            runtime: asset-hub-kusama
-          - category: assets
-            runtime: asset-hub-polkadot
-          - category: assets
-            runtime: asset-hub-westend
-          - category: bridge-hubs
-            runtime: bridge-hub-polkadot
-          - category: bridge-hubs
-            runtime: bridge-hub-kusama
-          - category: bridge-hubs
-            runtime: bridge-hub-rococo
-          - category: collectives
-            runtime: collectives-polkadot
-          - category: contracts
-            runtime: contracts-rococo
-          - category: starters
-            runtime: seedling
-          - category: starters
-            runtime: shell
-          - category: testing
-            runtime: rococo-parachain
-    steps:
-      - name: Checkout sources
-        uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
-        with:
-          ref: ${{ github.event.inputs.ref2 }}
-
-      - name: Download artifacts
-        uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2
-
-      - uses: ruby/setup-ruby@250fcd6a742febb1123a77a841497ccaa8b9e939 # v1.152.0
-        with:
-          ruby-version: 3.0.0
-
-      - name: Get runtime version for ${{ matrix.runtime }}
-        id: get-runtime-ver
-        run: |
-          echo "require './scripts/ci/github/runtime-version.rb'" > script.rb
-          echo "puts get_runtime(runtime: \"${{ matrix.runtime }}\", runtime_dir: \"$RUNTIME_DIR/${{ matrix.category }}\")" >> script.rb
-
-          echo "Current folder: $PWD"
-          ls "$RUNTIME_DIR/${{ matrix.category }}/${{ matrix.runtime }}"
-          runtime_ver=$(ruby script.rb)
-          echo "Found version: >$runtime_ver<"
-          echo "runtime_ver=$runtime_ver" >> $GITHUB_OUTPUT
-
-      - name: Fix runtime name
-        id: fix-runtime-path
-        run: |
-          cd "${{ matrix.runtime }}-runtime/"
-          mv "$(sed -E 's/- */_/g' <<< ${{ matrix.runtime }})_runtime.compact.compressed.wasm" "${{ matrix.runtime }}_runtime.compact.compressed.wasm" || true
-
-      - name: Upload compressed ${{ matrix.runtime }} wasm
-        uses: actions/upload-release-asset@e8f9f06c4b078e705bd2ea027f0926603fc9b4d5 # v1.0.2
-        env:
-          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
-        with:
-          upload_url: ${{ needs.publish-draft-release.outputs.asset_upload_url }}
-          asset_path: "${{ matrix.runtime }}-runtime/${{ matrix.runtime }}_runtime.compact.compressed.wasm"
-          asset_name: ${{ matrix.runtime }}_runtime-v${{ steps.get-runtime-ver.outputs.runtime_ver }}.compact.compressed.wasm
-          asset_content_type: application/wasm
-
-  post_to_matrix:
-    if: ${{ github.event.inputs.notification == 'true' }}
-    runs-on: ubuntu-latest
-    needs: publish-draft-release
-    strategy:
-      matrix:
-        channel:
-          - name: 'RelEng: Cumulus Release Coordination'
-            room: '!NAEMyPAHWOiOQHsvus:parity.io'
-            pre-releases: true
-    steps:
-      - name: Matrix notification to ${{ matrix.channel.name }}
-        uses: s3krit/matrix-message-action@70ad3fb812ee0e45ff8999d6af11cafad11a6ecf # v0.0.3
-        with:
-          room_id: ${{ matrix.channel.room }}
-          access_token: ${{ secrets.RELEASENOTES_MATRIX_V2_ACCESS_TOKEN }}
-          server: "m.parity.io"
-          message: |
-            **New draft for ${{ github.repository }}**: ${{ github.event.inputs.ref2 }}<br/>
-
-            Draft release created: [draft](${{ needs.publish-draft-release.outputs.release_url }})
-
-            NOTE: The link above will no longer be valid if the draft is edited. You can then use the following link:
-            [${{ github.server_url }}/${{ github.repository }}/releases](${{ github.server_url }}/${{ github.repository }}/releases)
diff --git a/cumulus/.github/workflows/release-99_bot-announce.yml b/cumulus/.github/workflows/release-99_bot-announce.yml
deleted file mode 100644
index 5c2604924c4c0..0000000000000
--- a/cumulus/.github/workflows/release-99_bot-announce.yml
+++ /dev/null
@@ -1,39 +0,0 @@
-name: Release - Pushes release notes to a Matrix room
-on:
-  release:
-    types:
-      - published
-
-jobs:
-  ping_matrix:
-    runs-on: ubuntu-latest
-    strategy:
-      matrix:
-        channel:
-          - name: 'RelEng: Cumulus Release Coordination'
-            room: '!NAEMyPAHWOiOQHsvus:parity.io'
-            pre-releases: true
-          - name: 'Ledger <> Polkadot Coordination'
-            room: '!EoIhaKfGPmFOBrNSHT:web3.foundation'
-            pre-release: true
-          - name: 'General: Rust, Polkadot, Substrate'
-            room: '!aJymqQYtCjjqImFLSb:parity.io'
-            pre-release: false
-          - name: 'Team: DevOps'
-            room: '!lUslSijLMgNcEKcAiE:parity.io'
-            pre-release: true
-
-    steps:
-      - name: Matrix notification to ${{ matrix.channel.name }}
-        uses: s3krit/matrix-message-action@70ad3fb812ee0e45ff8999d6af11cafad11a6ecf # v0.0.3
-        with:
-          room_id: ${{ matrix.channel.room }}
-          access_token: ${{ secrets.RELEASENOTES_MATRIX_V2_ACCESS_TOKEN }}
-          server: "m.parity.io"
-          message: |
-            A (pre)release has been ${{github.event.action}} in **${{github.event.repository.full_name}}:**<br/>
-            Release version: [${{github.event.release.tag_name}}](${{github.event.release.html_url}})
-
-            -----
-
-            ${{github.event.release.body}}
diff --git a/cumulus/.github/workflows/srtool.yml b/cumulus/.github/workflows/srtool.yml
deleted file mode 100644
index ae473b4813709..0000000000000
--- a/cumulus/.github/workflows/srtool.yml
+++ /dev/null
@@ -1,122 +0,0 @@
-name: Srtool build
-
-env:
-  SUBWASM_VERSION: 0.20.0
-
-on:
-  push:
-    tags:
-      - "*"
-
-    # paths-ignore:
-    #   - "docker"
-    #   - "docs"
-    #   - "scripts"
-    #   - "test"
-    #   - "client"
-    paths:
-      - parachains/runtimes/**/*
-
-    branches:
-      - "release*"
-
-  schedule:
-    - cron: "00 02 * * 1" # 2AM weekly on monday
-
-  workflow_dispatch:
-
-jobs:
-  srtool:
-    runs-on: ubuntu-latest
-    strategy:
-      matrix:
-        include:
-          - category: assets
-            runtime: asset-hub-kusama
-          - category: assets
-            runtime: asset-hub-polkadot
-          - category: assets
-            runtime: asset-hub-westend
-          - category: bridge-hubs
-            runtime: bridge-hub-polkadot
-          - category: bridge-hubs
-            runtime: bridge-hub-kusama
-          - category: bridge-hubs
-            runtime: bridge-hub-rococo
-          - category: collectives
-            runtime: collectives-polkadot
-          - category: contracts
-            runtime: contracts-rococo
-          - category: starters
-            runtime: seedling
-          - category: starters
-            runtime: shell
-          - category: testing
-            runtime: rococo-parachain
-    steps:
-      - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
-        with:
-          fetch-depth: 0
-
-      - name: Srtool build
-        id: srtool_build
-        uses: chevdor/srtool-actions@v0.7.0
-        with:
-          chain: ${{ matrix.runtime }}
-          runtime_dir: parachains/runtimes/${{ matrix.category }}/${{ matrix.runtime }}
-
-      - name: Summary
-        run: |
-          echo '${{ steps.srtool_build.outputs.json }}' | jq > ${{ matrix.runtime }}-srtool-digest.json
-          cat ${{ matrix.runtime }}-srtool-digest.json
-          echo "Compact Runtime: ${{ steps.srtool_build.outputs.wasm }}"
-          echo "Compressed Runtime: ${{ steps.srtool_build.outputs.wasm_compressed }}"
-
-      # it takes a while to build the runtime, so let's save the artifact as soon as we have it
-      - name: Archive Artifacts for ${{ matrix.runtime }}
-        uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
-        with:
-          name: ${{ matrix.runtime }}-runtime
-          path: |
-            ${{ steps.srtool_build.outputs.wasm }}
-            ${{ steps.srtool_build.outputs.wasm_compressed }}
-            ${{ matrix.runtime }}-srtool-digest.json
-
-      # We now get extra information thanks to subwasm
-      - name: Install subwasm
-        run: |
-          wget https://github.com/chevdor/subwasm/releases/download/v${{ env.SUBWASM_VERSION }}/subwasm_linux_amd64_v${{ env.SUBWASM_VERSION }}.deb
-          sudo dpkg -i subwasm_linux_amd64_v${{ env.SUBWASM_VERSION }}.deb
-          subwasm --version
-
-      - name: Show Runtime information
-        shell: bash
-        run: |
-          subwasm info ${{ steps.srtool_build.outputs.wasm }}
-          subwasm info ${{ steps.srtool_build.outputs.wasm_compressed }}
-          subwasm --json info ${{ steps.srtool_build.outputs.wasm }} > ${{ matrix.runtime }}-info.json
-          subwasm --json info ${{ steps.srtool_build.outputs.wasm_compressed }} > ${{ matrix.runtime }}-compressed-info.json
-
-      - name: Extract the metadata
-        shell: bash
-        run: |
-          subwasm meta ${{ steps.srtool_build.outputs.wasm }}
-          subwasm --json meta ${{ steps.srtool_build.outputs.wasm }} > ${{ matrix.runtime }}-metadata.json
-
-      - name: Check the metadata diff
-        shell: bash
-        # the following subwasm call will error for chains that are not known and/or live, that includes shell for instance
-        run: |
-          subwasm diff ${{ steps.srtool_build.outputs.wasm }} --chain-b ${{ matrix.runtime }} || \
-            echo "Subwasm call failed, check the logs. This is likely because ${{ matrix.runtime }} is not known by subwasm" | \
-            tee ${{ matrix.runtime }}-diff.txt
-
-      - name: Archive Subwasm results
-        uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
-        with:
-          name: ${{ matrix.runtime }}-runtime
-          path: |
-            ${{ matrix.runtime }}-info.json
-            ${{ matrix.runtime }}-compressed-info.json
-            ${{ matrix.runtime }}-metadata.json
-            ${{ matrix.runtime }}-diff.txt
diff --git a/cumulus/.gitlab-ci.yml b/cumulus/.gitlab-ci.yml
deleted file mode 100644
index f032901c6f471..0000000000000
--- a/cumulus/.gitlab-ci.yml
+++ /dev/null
@@ -1,201 +0,0 @@
-# .gitlab-ci.yml
-#
-# cumulus
-#
-# pipelines can be triggered manually in the web
-
-stages:
-  - test
-  - build
-  # used for manual job run for regenerate weights for release-* branches (not needed anymore, just leave it here for a while as PlanB)
-  - benchmarks-build
-  # used for manual job run for regenerate weights for release-* branches (not needed anymore, just leave it here for a while as PlanB)
-  - benchmarks-run
-  - publish
-  - integration-tests
-  - zombienet
-  - short-benchmarks
-
-default:
-  interruptible: true
-  retry:
-    max: 2
-    when:
-      - runner_system_failure
-      - unknown_failure
-      - api_failure
-
-variables:
-  GIT_STRATEGY: fetch
-  GIT_DEPTH: 100
-  CARGO_INCREMENTAL: 0
-  CI_IMAGE: !reference [.ci-unified, variables, CI_IMAGE]  
-  DOCKER_OS: "debian:stretch"
-  ARCH: "x86_64"
-  ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.55"
-  BUILDAH_IMAGE: "quay.io/buildah/stable:v1.29"
-  BUILDAH_COMMAND: "buildah --storage-driver overlay2"
-
-.common-before-script:
-  before_script:
-    - !reference [.job-switcher, before_script]
-    - !reference [.timestamp, before_script]
-
-.collect-artifacts:
-  artifacts:
-    name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}"
-    when: on_success
-    expire_in: 1 days
-    paths:
-      - ./artifacts/
-
-# collecting vars for pipeline stopper
-# they will be used if the job fails
-.pipeline-stopper-vars:
-  before_script:
-    - echo "FAILED_JOB_URL=${CI_JOB_URL}" > pipeline-stopper.env
-    - echo "FAILED_JOB_NAME=${CI_JOB_NAME}" >> pipeline-stopper.env
-    - echo "FAILED_JOB_NAME=${CI_JOB_NAME}" >> pipeline-stopper.env
-    - echo "PR_NUM=${CI_COMMIT_REF_NAME}" >> pipeline-stopper.env
-
-.pipeline-stopper-artifacts:
-  artifacts:
-    reports:
-      dotenv: pipeline-stopper.env
-
-.common-refs:
-  # these jobs run always*
-  rules:
-    - if: $CI_PIPELINE_SOURCE == "schedule"
-    - if: $CI_COMMIT_REF_NAME == "master"
-    - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
-    - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1
-    - if: $CI_COMMIT_REF_NAME =~ /^release-parachains-v[0-9].*$/ # i.e. release-parachains-v1.0, release-parachains-v2.1rc1, release-parachains-v3000
-    - if: $CI_COMMIT_REF_NAME =~ /^polkadot-v[0-9]+\.[0-9]+.*$/ # i.e. polkadot-v1.0.99, polkadot-v2.1rc1
-
-.pr-refs:
-  # these jobs run always*
-  rules:
-    - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
-
-.publish-refs:
-  rules:
-    - if: $CI_COMMIT_REF_NAME == "master"
-    - if: $CI_PIPELINE_SOURCE == "schedule"
-    - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1
-
-# run benchmarks manually only on release-parachains-v* branch
-.benchmarks-manual-refs:
-  rules:
-    - if: $CI_COMMIT_REF_NAME =~ /^release-parachains-v[0-9].*$/ # i.e. release-parachains-v1.0, release-parachains-v2.1rc1, release-parachains-v3000
-      when: manual
-
-# run benchmarks only on release-parachains-v* branch
-.benchmarks-refs:
-  rules:
-    - if: $CI_COMMIT_REF_NAME =~ /^release-parachains-v[0-9].*$/ # i.e. release-parachains-v1.0, release-parachains-v2.1rc1, release-parachains-v3000
-
-.zombienet-refs:
-  rules:
-    - if: $CI_PIPELINE_SOURCE == "pipeline"
-      when: never
-    - if: $CI_PIPELINE_SOURCE == "schedule"
-      when: never
-    - if: $CI_COMMIT_REF_NAME == "master"
-    - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
-
-.job-switcher:
-  before_script:
-    - if echo "$CI_DISABLED_JOBS" | grep -xF "$CI_JOB_NAME"; then echo "The job has been cancelled in CI settings"; exit 0; fi
-
-.docker-env:
-  image: "${CI_IMAGE}"
-  before_script:
-    - !reference [.common-before-script, before_script]
-    - rustup show
-    - cargo --version
-    - bash --version
-  tags:
-    - linux-docker-vm-c2
-
-.kubernetes-env:
-  image: "${CI_IMAGE}"
-  before_script:
-    - !reference [.common-before-script, before_script]
-  tags:
-    - kubernetes-parity-build
-
-.git-commit-push:
-  script:
-    - git status
-    # Set git config
-    - rm -rf .git/config
-    - git config --global user.email "${GITHUB_EMAIL}"
-    - git config --global user.name "${GITHUB_USER}"
-    - git config remote.origin.url "https://${GITHUB_USER}:${GITHUB_TOKEN}@github.com/paritytech/${CI_PROJECT_NAME}.git"
-    - git config remote.origin.fetch "+refs/heads/*:refs/remotes/origin/*"
-    # push results to github
-    - git checkout -b $BRANCHNAME
-    - git add parachains/*
-    - git commit -m "[benchmarks] pr with weights"
-    - git push origin $BRANCHNAME
-
-include:
-  # test jobs
-  - scripts/ci/gitlab/pipeline/test.yml
-  # # build jobs
-  - scripts/ci/gitlab/pipeline/build.yml
-  # short-benchmarks jobs
-  - scripts/ci/gitlab/pipeline/short-benchmarks.yml
-  # # benchmarks jobs
-  # # used for manual job run for regenerate weights for release-* branches (not needed anymore, just leave it here for a while as PlanB)
-  - scripts/ci/gitlab/pipeline/benchmarks.yml
-  # # publish jobs
-  - scripts/ci/gitlab/pipeline/publish.yml
-  # zombienet jobs
-  - scripts/ci/gitlab/pipeline/zombienet.yml
-  # timestamp handler
-  - project: parity/infrastructure/ci_cd/shared
-    ref: main
-    file: /common/timestamp.yml
-  - project: parity/infrastructure/ci_cd/shared
-    ref: main
-    file: /common/ci-unified.yml  
-
-
-#### stage:                        .post
-
-# This job cancels the whole pipeline if any of provided jobs fail.
-# In a DAG, every jobs chain is executed independently of others. The `fail_fast` principle suggests
-# to fail the pipeline as soon as possible to shorten the feedback loop.
-cancel-pipeline:
-  stage: .post
-  needs:
-    - job: test-linux-stable
-  rules:
-    - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
-      when: on_failure
-  variables:
-    PROJECT_ID: "${CI_PROJECT_ID}"
-    PROJECT_NAME: "${CI_PROJECT_NAME}"
-    PIPELINE_ID: "${CI_PIPELINE_ID}"
-    FAILED_JOB_URL: "${FAILED_JOB_URL}"
-    FAILED_JOB_NAME: "${FAILED_JOB_NAME}"
-    PR_NUM: "${PR_NUM}"
-  trigger:
-    project: "parity/infrastructure/ci_cd/pipeline-stopper"
-    branch: "as-improve"
-
-remove-cancel-pipeline-message:
-  stage: .post
-  rules:
-    - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
-  variables:
-    PROJECT_ID: "${CI_PROJECT_ID}"
-    PROJECT_NAME: "${CI_PROJECT_NAME}"
-    PIPELINE_ID: "${CI_PIPELINE_ID}"
-    FAILED_JOB_URL: "https://gitlab.com"
-    FAILED_JOB_NAME: "nope"
-    PR_NUM: "${CI_COMMIT_REF_NAME}"
-  trigger:
-    project: "parity/infrastructure/ci_cd/pipeline-stopper"
diff --git a/cumulus/client/cli/Cargo.toml b/cumulus/client/cli/Cargo.toml
index c45b669fc6d1e..5dd18f0c156d1 100644
--- a/cumulus/client/cli/Cargo.toml
+++ b/cumulus/client/cli/Cargo.toml
@@ -5,7 +5,7 @@ authors.workspace = true
 edition.workspace = true
 
 [dependencies]
-clap = { version = "4.4.4", features = ["derive"] }
+clap = { version = "4.4.6", features = ["derive"] }
 codec = { package = "parity-scale-codec", version = "3.0.0" }
 url = "2.4.0"
 
diff --git a/cumulus/pallets/parachain-system/proc-macro/Cargo.toml b/cumulus/pallets/parachain-system/proc-macro/Cargo.toml
index a1510847fc2f5..cb5d9904c7cf3 100644
--- a/cumulus/pallets/parachain-system/proc-macro/Cargo.toml
+++ b/cumulus/pallets/parachain-system/proc-macro/Cargo.toml
@@ -9,7 +9,7 @@ description = "Proc macros provided by the parachain-system pallet"
 proc-macro = true
 
 [dependencies]
-syn = "2.0.37"
+syn = "2.0.38"
 proc-macro2 = "1.0.64"
 quote = "1.0.33"
 proc-macro-crate = "1.3.1"
diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs
index a8f0a49223f53..eaf15768e290d 100644
--- a/cumulus/pallets/parachain-system/src/lib.rs
+++ b/cumulus/pallets/parachain-system/src/lib.rs
@@ -245,10 +245,10 @@ pub mod pallet {
 			<UpgradeRestrictionSignal<T>>::kill();
 			let relay_upgrade_go_ahead = <UpgradeGoAhead<T>>::take();
 
-			assert!(
-				<ValidationData<T>>::exists(),
-				"set_validation_data inherent needs to be present in every block!"
-			);
+			let vfp = <ValidationData<T>>::get()
+				.expect("set_validation_data inherent needs to be present in every block!");
+
+			LastRelayChainBlockNumber::<T>::put(vfp.relay_parent_number);
 
 			let host_config = match Self::host_configuration() {
 				Some(ok) => ok,
@@ -380,8 +380,7 @@ pub mod pallet {
 				let ancestor = Ancestor::new_unchecked(used_bandwidth, consumed_go_ahead_signal);
 
 				let watermark = HrmpWatermark::<T>::get();
-				let watermark_update =
-					HrmpWatermarkUpdate::new(watermark, LastRelayChainBlockNumber::<T>::get());
+				let watermark_update = HrmpWatermarkUpdate::new(watermark, vfp.relay_parent_number);
 
 				aggregated_segment
 					.append(&ancestor, watermark_update, &total_bandwidth_out)
@@ -460,6 +459,9 @@ pub mod pallet {
 				4 + hrmp_max_message_num_per_candidate as u64,
 			);
 
+			// Weight for updating the last relay chain block number in `on_finalize`.
+			weight += T::DbWeight::get().reads_writes(1, 1);
+
 			// Weight for adjusting the unincluded segment in `on_finalize`.
 			weight += T::DbWeight::get().reads_writes(6, 3);
 
@@ -515,7 +517,6 @@ pub mod pallet {
 				vfp.relay_parent_number,
 				LastRelayChainBlockNumber::<T>::get(),
 			);
-			LastRelayChainBlockNumber::<T>::put(vfp.relay_parent_number);
 
 			let relay_state_proof = RelayChainStateProof::new(
 				T::SelfParaId::get(),
@@ -756,6 +757,8 @@ pub mod pallet {
 	pub(super) type DidSetValidationCode<T: Config> = StorageValue<_, bool, ValueQuery>;
 
 	/// The relay chain block number associated with the last parachain block.
+	///
+	/// This is updated in `on_finalize`.
 	#[pallet::storage]
 	pub(super) type LastRelayChainBlockNumber<T: Config> =
 		StorageValue<_, RelayChainBlockNumber, ValueQuery>;
@@ -1501,6 +1504,12 @@ impl<T: Config> Pallet<T> {
 		Self::deposit_event(Event::UpwardMessageSent { message_hash: Some(hash) });
 		Ok((0, hash))
 	}
+
+	/// Get the relay chain block number which was used as an anchor for the last block in this
+	/// chain.
+	pub fn last_relay_block_number(&self) -> RelayChainBlockNumber {
+		LastRelayChainBlockNumber::<T>::get()
+	}
 }
 
 impl<T: Config> UpwardMessageSender for Pallet<T> {
diff --git a/cumulus/pallets/xcmp-queue/src/lib.rs b/cumulus/pallets/xcmp-queue/src/lib.rs
index 1cb92f5951869..7ee07a7beb0a7 100644
--- a/cumulus/pallets/xcmp-queue/src/lib.rs
+++ b/cumulus/pallets/xcmp-queue/src/lib.rs
@@ -1129,7 +1129,7 @@ impl<T: Config> XcmpMessageSource for Pallet<T> {
 		let pruned = old_statuses_len - statuses.len();
 		// removing an item from status implies a message being sent, so the result messages must
 		// be no less than the pruned channels.
-		statuses.rotate_left(result.len() - pruned);
+		statuses.rotate_left(result.len().saturating_sub(pruned));
 
 		<OutboundXcmpStatus<T>>::put(statuses);
 
diff --git a/cumulus/parachain-template/node/Cargo.toml b/cumulus/parachain-template/node/Cargo.toml
index 114b25d126115..e73c7b507262e 100644
--- a/cumulus/parachain-template/node/Cargo.toml
+++ b/cumulus/parachain-template/node/Cargo.toml
@@ -11,7 +11,7 @@ build = "build.rs"
 publish = false
 
 [dependencies]
-clap = { version = "4.4.4", features = ["derive"] }
+clap = { version = "4.4.6", features = ["derive"] }
 log = "0.4.20"
 codec = { package = "parity-scale-codec", version = "3.0.0" }
 serde = { version = "1.0.188", features = ["derive"] }
diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/Cargo.toml
index af9776cbcd999..bf141dafebf2c 100644
--- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/Cargo.toml
+++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/Cargo.toml
@@ -18,17 +18,24 @@ frame-system = { path = "../../../../../../substrate/frame/system", default-feat
 pallet-balances = { path = "../../../../../../substrate/frame/balances", default-features = false}
 pallet-assets = { path = "../../../../../../substrate/frame/assets", default-features = false}
 pallet-asset-conversion = { path = "../../../../../../substrate/frame/asset-conversion", default-features = false}
+pallet-treasury = { path = "../../../../../../substrate/frame/treasury", default-features = false}
+pallet-asset-rate = { path = "../../../../../../substrate/frame/asset-rate", default-features = false}
 
 # Polkadot
 polkadot-core-primitives = { path = "../../../../../../polkadot/core-primitives", default-features = false}
 polkadot-parachain-primitives = { path = "../../../../../../polkadot/parachain", default-features = false}
+polkadot-runtime-common = { path = "../../../../../../polkadot/runtime/common" }
 polkadot-runtime-parachains = { path = "../../../../../../polkadot/runtime/parachains" }
 xcm = { package = "staging-xcm", path = "../../../../../../polkadot/xcm", default-features = false}
+xcm-builder = { package = "staging-xcm-builder",  path = "../../../../../../polkadot/xcm/xcm-builder", default-features = false}
+xcm-executor = { package = "staging-xcm-executor",  path = "../../../../../../polkadot/xcm/xcm-executor", default-features = false}
 pallet-xcm = { path = "../../../../../../polkadot/xcm/pallet-xcm", default-features = false}
 
 # Cumulus
 parachains-common = { path = "../../../../common" }
 asset-hub-westend-runtime = { path = "../../../../runtimes/assets/asset-hub-westend" }
+cumulus-pallet-dmp-queue = { default-features = false, path = "../../../../../pallets/dmp-queue" }
+cumulus-pallet-parachain-system = { default-features = false, path = "../../../../../pallets/parachain-system" }
 
 # Local
 xcm-emulator = { path = "../../../../../xcm/xcm-emulator", default-features = false}
@@ -37,15 +44,21 @@ integration-tests-common = { path = "../../common", default-features = false}
 [features]
 runtime-benchmarks = [
 	"asset-hub-westend-runtime/runtime-benchmarks",
+	"cumulus-pallet-parachain-system/runtime-benchmarks",
 	"frame-support/runtime-benchmarks",
 	"frame-system/runtime-benchmarks",
 	"integration-tests-common/runtime-benchmarks",
 	"pallet-asset-conversion/runtime-benchmarks",
+	"pallet-asset-rate/runtime-benchmarks",
 	"pallet-assets/runtime-benchmarks",
 	"pallet-balances/runtime-benchmarks",
+	"pallet-treasury/runtime-benchmarks",
 	"pallet-xcm/runtime-benchmarks",
 	"parachains-common/runtime-benchmarks",
 	"polkadot-parachain-primitives/runtime-benchmarks",
+	"polkadot-runtime-common/runtime-benchmarks",
 	"polkadot-runtime-parachains/runtime-benchmarks",
 	"sp-runtime/runtime-benchmarks",
+	"xcm-builder/runtime-benchmarks",
+	"xcm-executor/runtime-benchmarks",
 ]
diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/mod.rs
index b3841af0e6c38..0c9de89c5f98f 100644
--- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/mod.rs
+++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/mod.rs
@@ -18,3 +18,4 @@ mod send;
 mod set_xcm_versions;
 mod swap;
 mod teleport;
+mod treasury;
diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/reserve_transfer.rs
index 51fac43be1255..8f8b7a7dde777 100644
--- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/reserve_transfer.rs
+++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/reserve_transfer.rs
@@ -35,11 +35,8 @@ fn relay_origin_assertions(t: RelayToSystemParaTest) {
 	);
 }
 
-fn system_para_dest_assertions_incomplete(_t: RelayToSystemParaTest) {
-	AssetHubWestend::assert_dmp_queue_incomplete(
-		Some(Weight::from_parts(1_000_000_000, 0)),
-		Some(Error::UntrustedReserveLocation),
-	);
+fn system_para_dest_assertions(_t: RelayToSystemParaTest) {
+	AssetHubWestend::assert_dmp_queue_error(Error::WeightNotComputable);
 }
 
 fn system_para_to_relay_assertions(_t: SystemParaToRelayTest) {
@@ -178,7 +175,7 @@ fn limited_reserve_transfer_native_asset_from_relay_to_system_para_fails() {
 	let receiver_balance_before = test.receiver.balance;
 
 	test.set_assertion::<Westend>(relay_origin_assertions);
-	test.set_assertion::<AssetHubWestend>(system_para_dest_assertions_incomplete);
+	test.set_assertion::<AssetHubWestend>(system_para_dest_assertions);
 	test.set_dispatchable::<Westend>(relay_limited_reserve_transfer_assets);
 	test.assert();
 
@@ -237,7 +234,7 @@ fn reserve_transfer_native_asset_from_relay_to_system_para_fails() {
 	let receiver_balance_before = test.receiver.balance;
 
 	test.set_assertion::<Westend>(relay_origin_assertions);
-	test.set_assertion::<AssetHubWestend>(system_para_dest_assertions_incomplete);
+	test.set_assertion::<AssetHubWestend>(system_para_dest_assertions);
 	test.set_dispatchable::<Westend>(relay_reserve_transfer_assets);
 	test.assert();
 
@@ -352,6 +349,7 @@ fn limited_reserve_transfer_asset_from_system_para_to_para() {
 		ASSET_MIN_BALANCE,
 		true,
 		AssetHubWestendSender::get(),
+		Some(Weight::from_parts(1_019_445_000, 200_000)),
 		ASSET_MIN_BALANCE * 1000000,
 	);
 
@@ -387,6 +385,7 @@ fn reserve_transfer_asset_from_system_para_to_para() {
 		ASSET_MIN_BALANCE,
 		true,
 		AssetHubWestendSender::get(),
+		Some(Weight::from_parts(1_019_445_000, 200_000)),
 		ASSET_MIN_BALANCE * 1000000,
 	);
 
diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/send.rs b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/send.rs
index 424d222bef381..e603af685bb5c 100644
--- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/send.rs
+++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/send.rs
@@ -16,52 +16,16 @@
 use crate::*;
 
 /// Relay Chain should be able to execute `Transact` instructions in System Parachain
-/// when `OriginKind::Superuser` and signer is `sudo`
+/// when `OriginKind::Superuser`.
 #[test]
-fn send_transact_sudo_from_relay_to_system_para_works() {
-	// Init tests variables
-	let root_origin = <Westend as Chain>::RuntimeOrigin::root();
-	let system_para_destination = Westend::child_location_of(AssetHubWestend::para_id()).into();
-	let asset_owner: AccountId = AssetHubWestendSender::get().into();
-	let xcm = AssetHubWestend::force_create_asset_xcm(
-		OriginKind::Superuser,
+fn send_transact_as_superuser_from_relay_to_system_para_works() {
+	AssetHubWestend::force_create_asset_from_relay_as_root(
 		ASSET_ID,
-		asset_owner.clone(),
+		ASSET_MIN_BALANCE,
 		true,
-		1000,
-	);
-	// Send XCM message from Relay Chain
-	Westend::execute_with(|| {
-		assert_ok!(<Westend as WestendPallet>::XcmPallet::send(
-			root_origin,
-			bx!(system_para_destination),
-			bx!(xcm),
-		));
-
-		Westend::assert_xcm_pallet_sent();
-	});
-
-	// Receive XCM message in Assets Parachain
-	AssetHubWestend::execute_with(|| {
-		type RuntimeEvent = <AssetHubWestend as Chain>::RuntimeEvent;
-
-		AssetHubWestend::assert_dmp_queue_complete(Some(Weight::from_parts(
-			1_019_445_000,
-			200_000,
-		)));
-
-		assert_expected_events!(
-			AssetHubWestend,
-			vec![
-				RuntimeEvent::Assets(pallet_assets::Event::ForceCreated { asset_id, owner }) => {
-					asset_id: *asset_id == ASSET_ID,
-					owner: *owner == asset_owner,
-				},
-			]
-		);
-
-		assert!(<AssetHubWestend as AssetHubWestendPallet>::Assets::asset_exists(ASSET_ID));
-	});
+		AssetHubWestendSender::get().into(),
+		Some(Weight::from_parts(1_019_445_000, 200_000)),
+	)
 }
 
 /// Parachain should be able to send XCM paying its fee with sufficient asset
@@ -78,6 +42,7 @@ fn send_xcm_from_para_to_system_para_paying_fee_with_assets_works() {
 		ASSET_MIN_BALANCE,
 		true,
 		para_sovereign_account.clone(),
+		Some(Weight::from_parts(1_019_445_000, 200_000)),
 		ASSET_MIN_BALANCE * 1000000000,
 	);
 
@@ -119,8 +84,8 @@ fn send_xcm_from_para_to_system_para_paying_fee_with_assets_works() {
 		type RuntimeEvent = <AssetHubWestend as Chain>::RuntimeEvent;
 
 		AssetHubWestend::assert_xcmp_queue_success(Some(Weight::from_parts(
-			2_176_414_000,
-			203_593,
+			16_290_336_000,
+			562_893,
 		)));
 
 		assert_expected_events!(
diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/set_xcm_versions.rs b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/set_xcm_versions.rs
index 2720095aac00d..2133d5e5fb7c7 100644
--- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/set_xcm_versions.rs
+++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/set_xcm_versions.rs
@@ -47,32 +47,22 @@ fn relay_sets_system_para_xcm_supported_version() {
 #[test]
 fn system_para_sets_relay_xcm_supported_version() {
 	// Init test variables
-	let sudo_origin = <Westend as Chain>::RuntimeOrigin::root();
 	let parent_location = AssetHubWestend::parent_location();
-	let system_para_destination: VersionedMultiLocation =
-		Westend::child_location_of(AssetHubWestend::para_id()).into();
-	let call = <AssetHubWestend as Chain>::RuntimeCall::PolkadotXcm(pallet_xcm::Call::<
-		<AssetHubWestend as Chain>::Runtime,
-	>::force_xcm_version {
-		location: bx!(parent_location),
-		version: XCM_V3,
-	})
-	.encode()
-	.into();
-	let origin_kind = OriginKind::Superuser;
-
-	let xcm = xcm_transact_unpaid_execution(call, origin_kind);
-
-	// System Parachain sets supported version for Relay Chain throught it
-	Westend::execute_with(|| {
-		assert_ok!(<Westend as WestendPallet>::XcmPallet::send(
-			sudo_origin,
-			bx!(system_para_destination),
-			bx!(xcm),
-		));
+	let force_xcm_version_call =
+		<AssetHubWestend as Chain>::RuntimeCall::PolkadotXcm(pallet_xcm::Call::<
+			<AssetHubWestend as Chain>::Runtime,
+		>::force_xcm_version {
+			location: bx!(parent_location),
+			version: XCM_V3,
+		})
+		.encode()
+		.into();
 
-		Westend::assert_xcm_pallet_sent();
-	});
+	// System Parachain sets supported version for Relay Chain through it
+	Westend::send_unpaid_transact_to_parachain_as_root(
+		AssetHubWestend::para_id(),
+		force_xcm_version_call,
+	);
 
 	// System Parachain receive the XCM message
 	AssetHubWestend::execute_with(|| {
diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/teleport.rs
index 8de73a7420c6b..d94fd4b97d9fc 100644
--- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/teleport.rs
+++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/teleport.rs
@@ -97,7 +97,7 @@ fn para_origin_assertions(t: SystemParaToRelayTest) {
 fn para_dest_assertions(t: RelayToSystemParaTest) {
 	type RuntimeEvent = <AssetHubWestend as Chain>::RuntimeEvent;
 
-	AssetHubWestend::assert_dmp_queue_complete(Some(Weight::from_parts(164_733_000, 0)));
+	AssetHubWestend::assert_dmp_queue_complete(Some(Weight::from_parts(164_793_000, 3593)));
 
 	assert_expected_events!(
 		AssetHubWestend,
@@ -142,16 +142,15 @@ fn system_para_limited_teleport_assets(t: SystemParaToRelayTest) -> DispatchResu
 	)
 }
 
-// TODO: Uncomment when https://github.com/paritytech/polkadot/pull/7424 is merged
-// fn system_para_teleport_assets(t: SystemParaToRelayTest) -> DispatchResult {
-// 	<AssetHubWestend as AssetHubWestendPallet>::PolkadotXcm::teleport_assets(
-// 		t.signed_origin,
-// 		bx!(t.args.dest),
-// 		bx!(t.args.beneficiary),
-// 		bx!(t.args.assets),
-// 		t.args.fee_asset_item,
-// 	)
-// }
+fn system_para_teleport_assets(t: SystemParaToRelayTest) -> DispatchResult {
+	<AssetHubWestend as AssetHubWestendPallet>::PolkadotXcm::teleport_assets(
+		t.signed_origin,
+		bx!(t.args.dest.into()),
+		bx!(t.args.beneficiary.into()),
+		bx!(t.args.assets.into()),
+		t.args.fee_asset_item,
+	)
+}
 
 /// Limited Teleport of native asset from Relay Chain to the System Parachain should work
 #[test]
@@ -286,78 +285,75 @@ fn teleport_native_assets_from_relay_to_system_para_works() {
 	assert!(receiver_balance_after > receiver_balance_before);
 }
 
-// TODO: Uncomment when https://github.com/paritytech/polkadot/pull/7424 is merged
-
-// Right now it is failing in the Relay Chain with a
-// `messageQueue.ProcessingFailed` event `error: Unsupported`.
-// The reason is the `Weigher` in `pallet_xcm` is not properly calculating the `remote_weight`
-// and it cause an `Overweight` error in `AllowTopLevelPaidExecutionFrom` barrier
-
-// /// Teleport of native asset from System Parachains to the Relay Chain
-// /// should work when there is enough balance in Relay Chain's `CheckAccount`
-// #[test]
-// fn teleport_native_assets_back_from_system_para_to_relay_works() {
-// 	// Dependency - Relay Chain's `CheckAccount` should have enough balance
-// 	teleport_native_assets_from_relay_to_system_para_works();
-
-// 	// Init values for Relay Chain
-// 	let amount_to_send: Balance = ASSET_HUB_WESTEND_ED * 1000;
-// 	let test_args = TestContext {
-// 		sender: AssetHubWestendSender::get(),
-// 		receiver: WestendReceiver::get(),
-// 		args: get_para_dispatch_args(amount_to_send),
-// 	};
-
-// 	let mut test = SystemParaToRelayTest::new(test_args);
-
-// 	let sender_balance_before = test.sender.balance;
-// 	let receiver_balance_before = test.receiver.balance;
-
-// 	test.set_assertion::<AssetHubWestend>(para_origin_assertions);
-// 	test.set_assertion::<Westend>(relay_dest_assertions);
-// 	test.set_dispatchable::<AssetHubWestend>(system_para_teleport_assets);
-// 	test.assert();
-
-// 	let sender_balance_after = test.sender.balance;
-// 	let receiver_balance_after = test.receiver.balance;
-
-// 	// Sender's balance is reduced
-// 	assert_eq!(sender_balance_before - amount_to_send, sender_balance_after);
-// 	// Receiver's balance is increased
-// 	assert!(receiver_balance_after > receiver_balance_before);
-// }
-
-// /// Teleport of native asset from System Parachain to Relay Chain
-// /// shouldn't work when there is not enough balance in Relay Chain's `CheckAccount`
-// #[test]
-// fn teleport_native_assets_from_system_para_to_relay_fails() {
-// 	// Init values for Relay Chain
-// 	let amount_to_send: Balance = ASSET_HUB_WESTEND_ED * 1000;
-//  let assets = (Parent, amount_to_send).into();
-//
-// 	let test_args = TestContext {
-// 		sender: AssetHubWestendSender::get(),
-// 		receiver: WestendReceiver::get(),
-// 		args: system_para_test_args(amount_to_send),
-//      assets,
-//      None
-// 	};
-
-// 	let mut test = SystemParaToRelayTest::new(test_args);
-
-// 	let sender_balance_before = test.sender.balance;
-// 	let receiver_balance_before = test.receiver.balance;
-
-// 	test.set_assertion::<AssetHubWestend>(para_origin_assertions);
-// 	test.set_assertion::<Westend>(relay_dest_assertions);
-// 	test.set_dispatchable::<AssetHubWestend>(system_para_teleport_assets);
-// 	test.assert();
-
-// 	let sender_balance_after = test.sender.balance;
-// 	let receiver_balance_after = test.receiver.balance;
-
-// 	// Sender's balance is reduced
-// 	assert_eq!(sender_balance_before - amount_to_send, sender_balance_after);
-// 	// Receiver's balance does not change
-// 	assert_eq!(receiver_balance_after, receiver_balance_before);
-// }
+/// Teleport of native asset from System Parachains to the Relay Chain
+/// should work when there is enough balance in Relay Chain's `CheckAccount`
+#[test]
+fn teleport_native_assets_back_from_system_para_to_relay_works() {
+	// Dependency - Relay Chain's `CheckAccount` should have enough balance
+	teleport_native_assets_from_relay_to_system_para_works();
+
+	// Init values for Relay Chain
+	let amount_to_send: Balance = ASSET_HUB_WESTEND_ED * 1000;
+	let destination = AssetHubWestend::parent_location();
+	let beneficiary_id = WestendReceiver::get();
+	let assets = (Parent, amount_to_send).into();
+
+	let test_args = TestContext {
+		sender: AssetHubWestendSender::get(),
+		receiver: WestendReceiver::get(),
+		args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None),
+	};
+
+	let mut test = SystemParaToRelayTest::new(test_args);
+
+	let sender_balance_before = test.sender.balance;
+	let receiver_balance_before = test.receiver.balance;
+
+	test.set_assertion::<AssetHubWestend>(para_origin_assertions);
+	test.set_assertion::<Westend>(relay_dest_assertions);
+	test.set_dispatchable::<AssetHubWestend>(system_para_teleport_assets);
+	test.assert();
+
+	let sender_balance_after = test.sender.balance;
+	let receiver_balance_after = test.receiver.balance;
+
+	// Sender's balance is reduced
+	assert_eq!(sender_balance_before - amount_to_send, sender_balance_after);
+	// Receiver's balance is increased
+	assert!(receiver_balance_after > receiver_balance_before);
+}
+
+/// Teleport of native asset from System Parachain to Relay Chain
+/// shouldn't work when there is not enough balance in Relay Chain's `CheckAccount`
+#[test]
+fn teleport_native_assets_from_system_para_to_relay_fails() {
+	// Init values for Relay Chain
+	let amount_to_send: Balance = ASSET_HUB_WESTEND_ED * 1000;
+	let destination = AssetHubWestend::parent_location();
+	let beneficiary_id = WestendReceiver::get();
+	let assets = (Parent, amount_to_send).into();
+
+	let test_args = TestContext {
+		sender: AssetHubWestendSender::get(),
+		receiver: WestendReceiver::get(),
+		args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None),
+	};
+
+	let mut test = SystemParaToRelayTest::new(test_args);
+
+	let sender_balance_before = test.sender.balance;
+	let receiver_balance_before = test.receiver.balance;
+
+	test.set_assertion::<AssetHubWestend>(para_origin_assertions);
+	test.set_assertion::<Westend>(relay_dest_assertions_fail);
+	test.set_dispatchable::<AssetHubWestend>(system_para_teleport_assets);
+	test.assert();
+
+	let sender_balance_after = test.sender.balance;
+	let receiver_balance_after = test.receiver.balance;
+
+	// Sender's balance is reduced
+	assert_eq!(sender_balance_before - amount_to_send, sender_balance_after);
+	// Receiver's balance does not change
+	assert_eq!(receiver_balance_after, receiver_balance_before);
+}
diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/treasury.rs b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/treasury.rs
new file mode 100644
index 0000000000000..cf06f58682da2
--- /dev/null
+++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/treasury.rs
@@ -0,0 +1,126 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use crate::*;
+use frame_support::traits::fungibles::{Create, Inspect, Mutate};
+use integration_tests_common::constants::accounts::{ALICE, BOB};
+use polkadot_runtime_common::impls::VersionedLocatableAsset;
+use xcm_executor::traits::ConvertLocation;
+
+#[test]
+fn create_and_claim_treasury_spend() {
+	const ASSET_ID: u32 = 1984;
+	const SPEND_AMOUNT: u128 = 1_000_000;
+	// treasury location from a sibling parachain.
+	let treasury_location: MultiLocation = MultiLocation::new(1, PalletInstance(37));
+	// treasury account on a sibling parachain.
+	let treasury_account =
+		asset_hub_westend_runtime::xcm_config::LocationToAccountId::convert_location(
+			&treasury_location,
+		)
+		.unwrap();
+	let asset_hub_location = MultiLocation::new(0, Parachain(AssetHubWestend::para_id().into()));
+	let root = <Westend as Chain>::RuntimeOrigin::root();
+	// asset kind to be spend from the treasury.
+	let asset_kind = VersionedLocatableAsset::V3 {
+		location: asset_hub_location,
+		asset_id: AssetId::Concrete((PalletInstance(50), GeneralIndex(ASSET_ID.into())).into()),
+	};
+	// treasury spend beneficiary.
+	let alice: AccountId = Westend::account_id_of(ALICE);
+	let bob: AccountId = Westend::account_id_of(BOB);
+	let bob_signed = <Westend as Chain>::RuntimeOrigin::signed(bob.clone());
+
+	AssetHubWestend::execute_with(|| {
+		type Assets = <AssetHubWestend as AssetHubWestendPallet>::Assets;
+
+		// create an asset class and mint some assets to the treasury account.
+		assert_ok!(<Assets as Create<_>>::create(
+			ASSET_ID,
+			treasury_account.clone(),
+			true,
+			SPEND_AMOUNT / 2
+		));
+		assert_ok!(<Assets as Mutate<_>>::mint_into(ASSET_ID, &treasury_account, SPEND_AMOUNT * 4));
+		// beneficiary has zero balance.
+		assert_eq!(<Assets as Inspect<_>>::balance(ASSET_ID, &alice,), 0u128,);
+	});
+
+	Westend::execute_with(|| {
+		type RuntimeEvent = <Westend as Chain>::RuntimeEvent;
+		type Treasury = <Westend as WestendPallet>::Treasury;
+		type AssetRate = <Westend as WestendPallet>::AssetRate;
+
+		// create a conversion rate from `asset_kind` to the native currency.
+		assert_ok!(AssetRate::create(root.clone(), Box::new(asset_kind.clone()), 2.into()));
+
+		// create and approve a treasury spend.
+		assert_ok!(Treasury::spend(
+			root,
+			Box::new(asset_kind),
+			SPEND_AMOUNT,
+			Box::new(MultiLocation::new(0, Into::<[u8; 32]>::into(alice.clone())).into()),
+			None,
+		));
+		// claim the spend.
+		assert_ok!(Treasury::payout(bob_signed.clone(), 0));
+
+		assert_expected_events!(
+			Westend,
+			vec![
+				RuntimeEvent::Treasury(pallet_treasury::Event::Paid { .. }) => {},
+			]
+		);
+	});
+
+	AssetHubWestend::execute_with(|| {
+		type RuntimeEvent = <AssetHubWestend as Chain>::RuntimeEvent;
+		type Assets = <AssetHubWestend as AssetHubWestendPallet>::Assets;
+
+		// assert events triggered by xcm pay program
+		// 1. treasury asset transferred to spend beneficiary
+		// 2. response to Relay Chain treasury pallet instance sent back
+		// 3. XCM program completed
+		assert_expected_events!(
+			AssetHubWestend,
+			vec![
+				RuntimeEvent::Assets(pallet_assets::Event::Transferred { asset_id: id, from, to, amount }) => {
+					id: id == &ASSET_ID,
+					from: from == &treasury_account,
+					to: to == &alice,
+					amount: amount == &SPEND_AMOUNT,
+				},
+				RuntimeEvent::ParachainSystem(cumulus_pallet_parachain_system::Event::UpwardMessageSent { .. }) => {},
+				RuntimeEvent::DmpQueue(cumulus_pallet_dmp_queue::Event::ExecutedDownward { outcome: Outcome::Complete(..) ,.. }) => {},
+			]
+		);
+		// beneficiary received the assets from the treasury.
+		assert_eq!(<Assets as Inspect<_>>::balance(ASSET_ID, &alice,), SPEND_AMOUNT,);
+	});
+
+	Westend::execute_with(|| {
+		type RuntimeEvent = <Westend as Chain>::RuntimeEvent;
+		type Treasury = <Westend as WestendPallet>::Treasury;
+
+		// check the payment status to ensure the response from the AssetHub was received.
+		assert_ok!(Treasury::check_status(bob_signed, 0));
+		assert_expected_events!(
+			Westend,
+			vec![
+				RuntimeEvent::Treasury(pallet_treasury::Event::SpendProcessed { .. }) => {},
+			]
+		);
+	});
+}
diff --git a/cumulus/parachains/integration-tests/emulated/common/src/impls.rs b/cumulus/parachains/integration-tests/emulated/common/src/impls.rs
index 024ae65c51ee4..bb4c9d102e98a 100644
--- a/cumulus/parachains/integration-tests/emulated/common/src/impls.rs
+++ b/cumulus/parachains/integration-tests/emulated/common/src/impls.rs
@@ -54,7 +54,7 @@ pub use polkadot_runtime_parachains::{
 	inclusion::{AggregateMessageOrigin, UmpQueueId},
 };
 pub use xcm::{
-	prelude::{OriginKind, Outcome, VersionedXcm, Weight},
+	prelude::{MultiLocation, OriginKind, Outcome, VersionedXcm, Weight},
 	v3::Error,
 	DoubleEncoded,
 };
@@ -80,21 +80,11 @@ impl From<u32> for LaneIdWrapper {
 type BridgeHubRococoRuntime = <BridgeHubRococo as Chain>::Runtime;
 type BridgeHubWococoRuntime = <BridgeHubWococo as Chain>::Runtime;
 
-// TODO: uncomment when https://github.com/paritytech/polkadot-sdk/pull/1352 is merged
-// type BridgeHubPolkadotRuntime = <BridgeHubPolkadot as Chain>::Runtime;
-// type BridgeHubKusamaRuntime = <BridgeHubKusama as Chain>::Runtime;
-
 pub type RococoWococoMessageHandler =
 	BridgeHubMessageHandler<BridgeHubRococoRuntime, BridgeHubWococoRuntime, Instance2>;
 pub type WococoRococoMessageHandler =
 	BridgeHubMessageHandler<BridgeHubWococoRuntime, BridgeHubRococoRuntime, Instance2>;
 
-// TODO: uncomment when https://github.com/paritytech/polkadot-sdk/pull/1352 is merged
-// pub type PolkadotKusamaMessageHandler
-//	= BridgeHubMessageHandler<BridgeHubPolkadotRuntime, BridgeHubKusamaRuntime, Instance1>;
-// pub type KusamaPolkadotMessageHandler
-//	= BridgeHubMessageHandler<BridgeHubKusamaRuntime, BridgeHubPolkadoRuntime, Instance1>;
-
 impl<S, T, I> BridgeMessageHandler for BridgeHubMessageHandler<S, T, I>
 where
 	S: Config<Instance1>,
@@ -356,6 +346,37 @@ macro_rules! impl_hrmp_channels_helpers_for_relay_chain {
 	};
 }
 
+#[macro_export]
+macro_rules! impl_send_transact_helpers_for_relay_chain {
+	( $chain:ident ) => {
+		$crate::impls::paste::paste! {
+			impl $chain {
+				/// A root origin (as governance) sends `xcm::Transact` with `UnpaidExecution` and encoded `call` to child parachain.
+				pub fn send_unpaid_transact_to_parachain_as_root(
+					recipient: $crate::impls::ParaId,
+					call: $crate::impls::DoubleEncoded<()>
+				) {
+					use $crate::impls::{bx, Chain, RelayChain};
+
+					<Self as $crate::impls::TestExt>::execute_with(|| {
+						let root_origin = <Self as Chain>::RuntimeOrigin::root();
+						let destination:  $crate::impls::MultiLocation = <Self as RelayChain>::child_location_of(recipient);
+						let xcm = $crate::impls::xcm_transact_unpaid_execution(call, $crate::impls::OriginKind::Superuser);
+
+						// Send XCM `Transact`
+						$crate::impls::assert_ok!(<Self as [<$chain Pallet>]>::XcmPallet::send(
+							root_origin,
+							bx!(destination.into()),
+							bx!(xcm),
+						));
+						Self::assert_xcm_pallet_sent();
+					});
+				}
+			}
+		}
+	};
+}
+
 #[macro_export]
 macro_rules! impl_accounts_helpers_for_parachain {
 	( $chain:ident ) => {
@@ -503,6 +524,22 @@ macro_rules! impl_assert_events_helpers_for_parachain {
 					);
 				}
 
+				/// Asserts a XCM from Relay Chain is executed with error
+				pub fn assert_dmp_queue_error(
+					expected_error: $crate::impls::Error,
+				) {
+					$crate::impls::assert_expected_events!(
+						Self,
+						vec![
+							[<$chain RuntimeEvent>]::DmpQueue($crate::impls::cumulus_pallet_dmp_queue::Event::ExecutedDownward {
+								outcome: $crate::impls::Outcome::Error(error), ..
+							}) => {
+								error: *error == expected_error,
+							},
+						]
+					);
+				}
+
 				/// Asserts a XCM from another Parachain is completely executed
 				pub fn assert_xcmp_queue_success(expected_weight: Option<$crate::impls::Weight>) {
 					$crate::impls::assert_expected_events!(
@@ -600,53 +637,58 @@ macro_rules! impl_assets_helpers_for_parachain {
 					min_balance: u128,
 					is_sufficient: bool,
 					asset_owner: $crate::impls::AccountId,
+					dmp_weight_threshold: Option<$crate::impls::Weight>,
 					amount_to_mint: u128,
 				) {
-					use $crate::impls::{bx, Chain, RelayChain, Parachain, Inspect, TestExt};
-					// Init values for Relay Chain
-					let root_origin = <$relay_chain as Chain>::RuntimeOrigin::root();
-					let destination = <$relay_chain>::child_location_of(<$chain>::para_id());
-					let xcm = Self::force_create_asset_xcm(
-						$crate::impls::OriginKind::Superuser,
+					use $crate::impls::Chain;
+
+					// Force create asset
+					Self::force_create_asset_from_relay_as_root(
 						id,
-						asset_owner.clone(),
-						is_sufficient,
 						min_balance,
+						is_sufficient,
+						asset_owner.clone(),
+						dmp_weight_threshold
 					);
 
-					<$relay_chain>::execute_with(|| {
-						$crate::impls::assert_ok!(<$relay_chain as [<$relay_chain Pallet>]>::XcmPallet::send(
-							root_origin,
-							bx!(destination.into()),
-							bx!(xcm),
-						));
+					// Mint asset for System Parachain's sender
+					let signed_origin = <Self as Chain>::RuntimeOrigin::signed(asset_owner.clone());
+					Self::mint_asset(signed_origin, id, asset_owner, amount_to_mint);
+				}
 
-						<$relay_chain>::assert_xcm_pallet_sent();
-					});
+				/// Relay Chain sends `Transact` instruction with `force_create_asset` to Parachain with `Assets` instance of `pallet_assets` .
+				pub fn force_create_asset_from_relay_as_root(
+					id: u32,
+					min_balance: u128,
+					is_sufficient: bool,
+					asset_owner: $crate::impls::AccountId,
+					dmp_weight_threshold: Option<$crate::impls::Weight>,
+				) {
+					use $crate::impls::{Parachain, Inspect, TestExt};
 
-					Self::execute_with(|| {
-						Self::assert_dmp_queue_complete(Some($crate::impls::Weight::from_parts(1_019_445_000, 200_000)));
+					<$relay_chain>::send_unpaid_transact_to_parachain_as_root(
+						Self::para_id(),
+						Self::force_create_asset_call(id, asset_owner.clone(), is_sufficient, min_balance),
+					);
 
+					// Receive XCM message in Assets Parachain
+					Self::execute_with(|| {
 						type RuntimeEvent = <$chain as $crate::impls::Chain>::RuntimeEvent;
 
+						Self::assert_dmp_queue_complete(dmp_weight_threshold);
+
 						$crate::impls::assert_expected_events!(
 							Self,
 							vec![
-								// Asset has been created
 								RuntimeEvent::Assets($crate::impls::pallet_assets::Event::ForceCreated { asset_id, owner }) => {
 									asset_id: *asset_id == id,
-									owner: *owner == asset_owner.clone(),
+									owner: *owner == asset_owner,
 								},
 							]
 						);
 
 						assert!(<Self as [<$chain Pallet>]>::Assets::asset_exists(id.into()));
 					});
-
-					let signed_origin = <Self as Chain>::RuntimeOrigin::signed(asset_owner.clone());
-
-					// Mint asset for System Parachain's sender
-					Self::mint_asset(signed_origin, id, asset_owner, amount_to_mint);
 				}
 			}
 		}
diff --git a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs
index 565c889a443b1..f8fe8831d3c74 100644
--- a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs
+++ b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs
@@ -46,6 +46,8 @@ decl_test_relay_chains! {
 			XcmPallet: westend_runtime::XcmPallet,
 			Sudo: westend_runtime::Sudo,
 			Balances: westend_runtime::Balances,
+			Treasury: westend_runtime::Treasury,
+			AssetRate: westend_runtime::AssetRate,
 		}
 	},
 	#[api_version(8)]
@@ -248,30 +250,22 @@ decl_test_bridges! {
 		target = BridgeHubRococo,
 		handler = WococoRococoMessageHandler
 	}
-	// TODO: uncomment when https://github.com/paritytech/polkadot-sdk/pull/1352 is merged
-	// pub struct PolkadotKusamaMockBridge {
-	// 	source = BridgeHubPolkadot,
-	// 	target = BridgeHubKusama,
-	//  handler = PolkadotKusamaMessageHandler
-	// },
-	// pub struct KusamaPolkadotMockBridge {
-	// 	source = BridgeHubKusama,
-	// 	target = BridgeHubPolkadot,
-	// 	handler = KusamaPolkadotMessageHandler
-	// }
 }
 
 // Westend implementation
 impl_accounts_helpers_for_relay_chain!(Westend);
 impl_assert_events_helpers_for_relay_chain!(Westend);
+impl_send_transact_helpers_for_relay_chain!(Westend);
 
 // Rococo implementation
 impl_accounts_helpers_for_relay_chain!(Rococo);
 impl_assert_events_helpers_for_relay_chain!(Rococo);
+impl_send_transact_helpers_for_relay_chain!(Rococo);
 
 // Wococo implementation
 impl_accounts_helpers_for_relay_chain!(Wococo);
 impl_assert_events_helpers_for_relay_chain!(Wococo);
+impl_send_transact_helpers_for_relay_chain!(Wococo);
 
 // AssetHubWestend implementation
 impl_accounts_helpers_for_parachain!(AssetHubWestend);
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/mod.rs
index 9aff4902d15ba..ce6e92065156e 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/mod.rs
+++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/mod.rs
@@ -61,16 +61,8 @@ impl<Call> XcmWeightInfo<Call> for AssetHubKusamaXcmWeight<Call> {
 	fn withdraw_asset(assets: &MultiAssets) -> Weight {
 		assets.weigh_multi_assets(XcmFungibleWeight::<Runtime>::withdraw_asset())
 	}
-	// Currently there is no trusted reserve (`IsReserve = ()`),
-	// but we need this hack for `pallet_xcm::reserve_transfer_assets`
-	// (TODO) fix https://github.com/paritytech/polkadot/pull/7424
-	// (TODO) fix https://github.com/paritytech/polkadot/pull/7546
-	fn reserve_asset_deposited(_assets: &MultiAssets) -> Weight {
-		// TODO: if we change `IsReserve = ...` then use this line...
-		// TODO: or if remote weight estimation is fixed, then remove
-		// TODO: hardcoded - fix https://github.com/paritytech/cumulus/issues/1974
-		let hardcoded_weight = Weight::from_parts(1_000_000_000_u64, 0);
-		hardcoded_weight.min(XcmFungibleWeight::<Runtime>::reserve_asset_deposited())
+	fn reserve_asset_deposited(assets: &MultiAssets) -> Weight {
+		assets.weigh_multi_assets(XcmFungibleWeight::<Runtime>::reserve_asset_deposited())
 	}
 	fn receive_teleported_asset(assets: &MultiAssets) -> Weight {
 		assets.weigh_multi_assets(XcmFungibleWeight::<Runtime>::receive_teleported_asset())
@@ -127,10 +119,7 @@ impl<Call> XcmWeightInfo<Call> for AssetHubKusamaXcmWeight<Call> {
 	}
 
 	fn deposit_asset(assets: &MultiAssetFilter, _dest: &MultiLocation) -> Weight {
-		// Hardcoded till the XCM pallet is fixed
-		let hardcoded_weight = Weight::from_parts(1_000_000_000_u64, 0);
-		let weight = assets.weigh_multi_assets(XcmFungibleWeight::<Runtime>::deposit_asset());
-		hardcoded_weight.min(weight)
+		assets.weigh_multi_assets(XcmFungibleWeight::<Runtime>::deposit_asset())
 	}
 	fn deposit_reserve_asset(
 		assets: &MultiAssetFilter,
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs
index 6e663039b0c2b..9b8611fd6637a 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs
+++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs
@@ -17,28 +17,26 @@
 //! Autogenerated weights for `pallet_xcm_benchmarks::fungible`
 //!
 //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
-//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! DATE: 2023-09-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
-//! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("asset-hub-kusama-dev"), DB CACHE: 1024
+//! HOSTNAME: `runner-nbnwcyh-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: Compiled, CHAIN: Some("asset-hub-kusama-dev"), DB CACHE: 1024
 
 // Executed Command:
-// ./target/production/polkadot-parachain
+// target/production/polkadot-parachain
 // benchmark
 // pallet
-// --template=./templates/xcm-bench-template.hbs
-// --chain=asset-hub-kusama-dev
-// --wasm-execution=compiled
-// --pallet=pallet_xcm_benchmarks::fungible
-// --no-storage-info
-// --no-median-slopes
-// --no-min-squares
-// --extrinsic=*
 // --steps=50
 // --repeat=20
-// --json
-// --header=./file_header.txt
-// --output=./parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs
+// --extrinsic=*
+// --wasm-execution=compiled
+// --heap-pages=4096
+// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
+// --pallet=pallet_xcm_benchmarks::fungible
+// --chain=asset-hub-kusama-dev
+// --header=./cumulus/file_header.txt
+// --template=./cumulus/templates/xcm-bench-template.hbs
+// --output=./cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/
 
 #![cfg_attr(rustfmt, rustfmt_skip)]
 #![allow(unused_parens)]
@@ -56,8 +54,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `101`
 		//  Estimated: `3593`
-		// Minimum execution time: 26_104_000 picoseconds.
-		Weight::from_parts(26_722_000, 3593)
+		// Minimum execution time: 25_602_000 picoseconds.
+		Weight::from_parts(26_312_000, 3593)
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
@@ -67,8 +65,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `101`
 		//  Estimated: `6196`
-		// Minimum execution time: 52_259_000 picoseconds.
-		Weight::from_parts(53_854_000, 6196)
+		// Minimum execution time: 51_173_000 picoseconds.
+		Weight::from_parts(52_221_000, 6196)
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
@@ -88,10 +86,10 @@ impl<T: frame_system::Config> WeightInfo<T> {
 	// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	pub fn transfer_reserve_asset() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `210`
+		//  Measured:  `246`
 		//  Estimated: `6196`
-		// Minimum execution time: 77_248_000 picoseconds.
-		Weight::from_parts(80_354_000, 6196)
+		// Minimum execution time: 74_651_000 picoseconds.
+		Weight::from_parts(76_500_000, 6196)
 			.saturating_add(T::DbWeight::get().reads(8))
 			.saturating_add(T::DbWeight::get().writes(4))
 	}
@@ -101,8 +99,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 500_000_000_000 picoseconds.
-		Weight::from_parts(500_000_000_000, 0)
+		// Minimum execution time: 18_446_744_073_709_551_000 picoseconds.
+		Weight::from_parts(18_446_744_073_709_551_000, 0)
 	}
 	// Storage: `ParachainInfo::ParachainId` (r:1 w:0)
 	// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
@@ -118,10 +116,10 @@ impl<T: frame_system::Config> WeightInfo<T> {
 	// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	pub fn initiate_reserve_withdraw() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `109`
-		//  Estimated: `3574`
-		// Minimum execution time: 482_070_000 picoseconds.
-		Weight::from_parts(490_269_000, 3574)
+		//  Measured:  `145`
+		//  Estimated: `3610`
+		// Minimum execution time: 458_666_000 picoseconds.
+		Weight::from_parts(470_470_000, 3610)
 			.saturating_add(T::DbWeight::get().reads(6))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
@@ -129,8 +127,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 3_970_000 picoseconds.
-		Weight::from_parts(4_056_000, 0)
+		// Minimum execution time: 3_701_000 picoseconds.
+		Weight::from_parts(3_887_000, 0)
 	}
 	// Storage: `System::Account` (r:1 w:1)
 	// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
@@ -138,8 +136,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `3593`
-		// Minimum execution time: 26_324_000 picoseconds.
-		Weight::from_parts(26_985_000, 3593)
+		// Minimum execution time: 25_709_000 picoseconds.
+		Weight::from_parts(26_320_000, 3593)
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
@@ -159,10 +157,10 @@ impl<T: frame_system::Config> WeightInfo<T> {
 	// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	pub fn deposit_reserve_asset() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `109`
-		//  Estimated: `3593`
-		// Minimum execution time: 52_814_000 picoseconds.
-		Weight::from_parts(54_666_000, 3593)
+		//  Measured:  `145`
+		//  Estimated: `3610`
+		// Minimum execution time: 51_663_000 picoseconds.
+		Weight::from_parts(52_538_000, 3610)
 			.saturating_add(T::DbWeight::get().reads(7))
 			.saturating_add(T::DbWeight::get().writes(3))
 	}
@@ -180,10 +178,10 @@ impl<T: frame_system::Config> WeightInfo<T> {
 	// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	pub fn initiate_teleport() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `109`
-		//  Estimated: `3574`
-		// Minimum execution time: 33_044_000 picoseconds.
-		Weight::from_parts(33_849_000, 3574)
+		//  Measured:  `145`
+		//  Estimated: `3610`
+		// Minimum execution time: 31_972_000 picoseconds.
+		Weight::from_parts(32_834_000, 3610)
 			.saturating_add(T::DbWeight::get().reads(6))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/xcm/mod.rs
index 55fed809e2b75..eb140c4bf3238 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/xcm/mod.rs
+++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/xcm/mod.rs
@@ -61,16 +61,8 @@ impl<Call> XcmWeightInfo<Call> for AssetHubPolkadotXcmWeight<Call> {
 	fn withdraw_asset(assets: &MultiAssets) -> Weight {
 		assets.weigh_multi_assets(XcmFungibleWeight::<Runtime>::withdraw_asset())
 	}
-	// Currently there is no trusted reserve (`IsReserve = ()`),
-	// but we need this hack for `pallet_xcm::reserve_transfer_assets`
-	// (TODO) fix https://github.com/paritytech/polkadot/pull/7424
-	// (TODO) fix https://github.com/paritytech/polkadot/pull/7546
-	fn reserve_asset_deposited(_assets: &MultiAssets) -> Weight {
-		// TODO: if we change `IsReserve = ...` then use this line...
-		// TODO: or if remote weight estimation is fixed, then remove
-		// TODO: hardcoded - fix https://github.com/paritytech/cumulus/issues/1974
-		let hardcoded_weight = Weight::from_parts(1_000_000_000_u64, 0);
-		hardcoded_weight.min(XcmFungibleWeight::<Runtime>::reserve_asset_deposited())
+	fn reserve_asset_deposited(assets: &MultiAssets) -> Weight {
+		assets.weigh_multi_assets(XcmFungibleWeight::<Runtime>::reserve_asset_deposited())
 	}
 	fn receive_teleported_asset(assets: &MultiAssets) -> Weight {
 		assets.weigh_multi_assets(XcmFungibleWeight::<Runtime>::receive_teleported_asset())
@@ -127,10 +119,7 @@ impl<Call> XcmWeightInfo<Call> for AssetHubPolkadotXcmWeight<Call> {
 	}
 
 	fn deposit_asset(assets: &MultiAssetFilter, _dest: &MultiLocation) -> Weight {
-		// Hardcoded till the XCM pallet is fixed
-		let hardcoded_weight = Weight::from_parts(1_000_000_000_u64, 0);
-		let weight = assets.weigh_multi_assets(XcmFungibleWeight::<Runtime>::deposit_asset());
-		hardcoded_weight.min(weight)
+		assets.weigh_multi_assets(XcmFungibleWeight::<Runtime>::deposit_asset())
 	}
 	fn deposit_reserve_asset(
 		assets: &MultiAssetFilter,
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs
index 4f64ea3fa1bb3..96d86ec423f20 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs
+++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs
@@ -17,28 +17,26 @@
 //! Autogenerated weights for `pallet_xcm_benchmarks::fungible`
 //!
 //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
-//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! DATE: 2023-09-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
-//! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("asset-hub-polkadot-dev"), DB CACHE: 1024
+//! HOSTNAME: `runner-nbnwcyh-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: Compiled, CHAIN: Some("asset-hub-polkadot-dev"), DB CACHE: 1024
 
 // Executed Command:
-// ./target/production/polkadot-parachain
+// target/production/polkadot-parachain
 // benchmark
 // pallet
-// --template=./templates/xcm-bench-template.hbs
-// --chain=asset-hub-polkadot-dev
-// --wasm-execution=compiled
-// --pallet=pallet_xcm_benchmarks::fungible
-// --no-storage-info
-// --no-median-slopes
-// --no-min-squares
-// --extrinsic=*
 // --steps=50
 // --repeat=20
-// --json
-// --header=./file_header.txt
-// --output=./parachains/runtimes/assets/asset-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs
+// --extrinsic=*
+// --wasm-execution=compiled
+// --heap-pages=4096
+// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
+// --pallet=pallet_xcm_benchmarks::fungible
+// --chain=asset-hub-polkadot-dev
+// --header=./cumulus/file_header.txt
+// --template=./cumulus/templates/xcm-bench-template.hbs
+// --output=./cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/xcm/
 
 #![cfg_attr(rustfmt, rustfmt_skip)]
 #![allow(unused_parens)]
@@ -56,8 +54,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `101`
 		//  Estimated: `3593`
-		// Minimum execution time: 26_090_000 picoseconds.
-		Weight::from_parts(27_006_000, 3593)
+		// Minimum execution time: 25_903_000 picoseconds.
+		Weight::from_parts(26_768_000, 3593)
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
@@ -67,8 +65,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `101`
 		//  Estimated: `6196`
-		// Minimum execution time: 50_699_000 picoseconds.
-		Weight::from_parts(51_888_000, 6196)
+		// Minimum execution time: 51_042_000 picoseconds.
+		Weight::from_parts(51_939_000, 6196)
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
@@ -90,8 +88,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `176`
 		//  Estimated: `6196`
-		// Minimum execution time: 72_130_000 picoseconds.
-		Weight::from_parts(73_994_000, 6196)
+		// Minimum execution time: 74_626_000 picoseconds.
+		Weight::from_parts(75_963_000, 6196)
 			.saturating_add(T::DbWeight::get().reads(8))
 			.saturating_add(T::DbWeight::get().writes(4))
 	}
@@ -101,8 +99,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 500_000_000_000 picoseconds.
-		Weight::from_parts(500_000_000_000, 0)
+		// Minimum execution time: 18_446_744_073_709_551_000 picoseconds.
+		Weight::from_parts(18_446_744_073_709_551_000, 0)
 	}
 	// Storage: `ParachainInfo::ParachainId` (r:1 w:0)
 	// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
@@ -120,8 +118,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `75`
 		//  Estimated: `3540`
-		// Minimum execution time: 477_183_000 picoseconds.
-		Weight::from_parts(488_156_000, 3540)
+		// Minimum execution time: 480_030_000 picoseconds.
+		Weight::from_parts(486_039_000, 3540)
 			.saturating_add(T::DbWeight::get().reads(6))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
@@ -129,8 +127,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 3_966_000 picoseconds.
-		Weight::from_parts(4_129_000, 0)
+		// Minimum execution time: 3_936_000 picoseconds.
+		Weight::from_parts(4_033_000, 0)
 	}
 	// Storage: `System::Account` (r:1 w:1)
 	// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
@@ -138,8 +136,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `3593`
-		// Minimum execution time: 26_047_000 picoseconds.
-		Weight::from_parts(26_982_000, 3593)
+		// Minimum execution time: 26_274_000 picoseconds.
+		Weight::from_parts(26_609_000, 3593)
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
@@ -161,8 +159,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `75`
 		//  Estimated: `3593`
-		// Minimum execution time: 51_076_000 picoseconds.
-		Weight::from_parts(51_826_000, 3593)
+		// Minimum execution time: 52_888_000 picoseconds.
+		Weight::from_parts(53_835_000, 3593)
 			.saturating_add(T::DbWeight::get().reads(7))
 			.saturating_add(T::DbWeight::get().writes(3))
 	}
@@ -182,8 +180,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `75`
 		//  Estimated: `3540`
-		// Minimum execution time: 30_606_000 picoseconds.
-		Weight::from_parts(31_168_000, 3540)
+		// Minimum execution time: 33_395_000 picoseconds.
+		Weight::from_parts(33_827_000, 3540)
 			.saturating_add(T::DbWeight::get().reads(6))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/mod.rs
index bb850ac72c07d..3e47cf077a292 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/mod.rs
+++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/mod.rs
@@ -61,16 +61,8 @@ impl<Call> XcmWeightInfo<Call> for AssetHubWestendXcmWeight<Call> {
 	fn withdraw_asset(assets: &MultiAssets) -> Weight {
 		assets.weigh_multi_assets(XcmFungibleWeight::<Runtime>::withdraw_asset())
 	}
-	// Currently there is no trusted reserve (`IsReserve = ()`),
-	// but we need this hack for `pallet_xcm::reserve_transfer_assets`
-	// (TODO) fix https://github.com/paritytech/polkadot/pull/7424
-	// (TODO) fix https://github.com/paritytech/polkadot/pull/7546
-	fn reserve_asset_deposited(_assets: &MultiAssets) -> Weight {
-		// TODO: if we change `IsReserve = ...` then use this line...
-		// TODO: or if remote weight estimation is fixed, then remove
-		// TODO: hardcoded - fix https://github.com/paritytech/cumulus/issues/1974
-		let hardcoded_weight = Weight::from_parts(1_000_000_000_u64, 0);
-		hardcoded_weight.min(XcmFungibleWeight::<Runtime>::reserve_asset_deposited())
+	fn reserve_asset_deposited(assets: &MultiAssets) -> Weight {
+		assets.weigh_multi_assets(XcmFungibleWeight::<Runtime>::reserve_asset_deposited())
 	}
 	fn receive_teleported_asset(assets: &MultiAssets) -> Weight {
 		assets.weigh_multi_assets(XcmFungibleWeight::<Runtime>::receive_teleported_asset())
@@ -127,10 +119,7 @@ impl<Call> XcmWeightInfo<Call> for AssetHubWestendXcmWeight<Call> {
 	}
 
 	fn deposit_asset(assets: &MultiAssetFilter, _dest: &MultiLocation) -> Weight {
-		// Hardcoded till the XCM pallet is fixed
-		let hardcoded_weight = Weight::from_parts(1_000_000_000_u64, 0);
-		let weight = assets.weigh_multi_assets(XcmFungibleWeight::<Runtime>::deposit_asset());
-		hardcoded_weight.min(weight)
+		assets.weigh_multi_assets(XcmFungibleWeight::<Runtime>::deposit_asset())
 	}
 	fn deposit_reserve_asset(
 		assets: &MultiAssetFilter,
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs
index d6763d2fc66f4..f482064e84e9c 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs
+++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs
@@ -17,28 +17,26 @@
 //! Autogenerated weights for `pallet_xcm_benchmarks::fungible`
 //!
 //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
-//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! DATE: 2023-09-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
-//! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("asset-hub-westend-dev"), DB CACHE: 1024
+//! HOSTNAME: `runner-nbnwcyh-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: Compiled, CHAIN: Some("asset-hub-westend-dev"), DB CACHE: 1024
 
 // Executed Command:
-// ./target/production/polkadot-parachain
+// target/production/polkadot-parachain
 // benchmark
 // pallet
-// --template=./templates/xcm-bench-template.hbs
-// --chain=asset-hub-westend-dev
-// --wasm-execution=compiled
-// --pallet=pallet_xcm_benchmarks::fungible
-// --no-storage-info
-// --no-median-slopes
-// --no-min-squares
-// --extrinsic=*
 // --steps=50
 // --repeat=20
-// --json
-// --header=./file_header.txt
-// --output=./parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs
+// --extrinsic=*
+// --wasm-execution=compiled
+// --heap-pages=4096
+// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
+// --pallet=pallet_xcm_benchmarks::fungible
+// --chain=asset-hub-westend-dev
+// --header=./cumulus/file_header.txt
+// --template=./cumulus/templates/xcm-bench-template.hbs
+// --output=./cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/
 
 #![cfg_attr(rustfmt, rustfmt_skip)]
 #![allow(unused_parens)]
@@ -56,8 +54,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `101`
 		//  Estimated: `3593`
-		// Minimum execution time: 25_411_000 picoseconds.
-		Weight::from_parts(25_663_000, 3593)
+		// Minimum execution time: 25_407_000 picoseconds.
+		Weight::from_parts(25_949_000, 3593)
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
@@ -67,8 +65,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `101`
 		//  Estimated: `6196`
-		// Minimum execution time: 49_478_000 picoseconds.
-		Weight::from_parts(50_417_000, 6196)
+		// Minimum execution time: 51_335_000 picoseconds.
+		Weight::from_parts(52_090_000, 6196)
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
@@ -90,8 +88,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `246`
 		//  Estimated: `6196`
-		// Minimum execution time: 72_958_000 picoseconds.
-		Weight::from_parts(74_503_000, 6196)
+		// Minimum execution time: 74_312_000 picoseconds.
+		Weight::from_parts(76_725_000, 6196)
 			.saturating_add(T::DbWeight::get().reads(8))
 			.saturating_add(T::DbWeight::get().writes(4))
 	}
@@ -101,8 +99,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 500_000_000_000 picoseconds.
-		Weight::from_parts(500_000_000_000, 0)
+		// Minimum execution time: 18_446_744_073_709_551_000 picoseconds.
+		Weight::from_parts(18_446_744_073_709_551_000, 0)
 	}
 	// Storage: `ParachainInfo::ParachainId` (r:1 w:0)
 	// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
@@ -120,8 +118,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `145`
 		//  Estimated: `3610`
-		// Minimum execution time: 456_993_000 picoseconds.
-		Weight::from_parts(469_393_000, 3610)
+		// Minimum execution time: 446_848_000 picoseconds.
+		Weight::from_parts(466_251_000, 3610)
 			.saturating_add(T::DbWeight::get().reads(6))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
@@ -129,8 +127,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 3_580_000 picoseconds.
-		Weight::from_parts(3_717_000, 0)
+		// Minimum execution time: 3_602_000 picoseconds.
+		Weight::from_parts(3_844_000, 0)
 	}
 	// Storage: `System::Account` (r:1 w:1)
 	// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
@@ -138,8 +136,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `3593`
-		// Minimum execution time: 25_087_000 picoseconds.
-		Weight::from_parts(25_788_000, 3593)
+		// Minimum execution time: 25_480_000 picoseconds.
+		Weight::from_parts(26_142_000, 3593)
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
@@ -161,8 +159,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `145`
 		//  Estimated: `3610`
-		// Minimum execution time: 50_824_000 picoseconds.
-		Weight::from_parts(52_309_000, 3610)
+		// Minimum execution time: 51_540_000 picoseconds.
+		Weight::from_parts(53_744_000, 3610)
 			.saturating_add(T::DbWeight::get().reads(7))
 			.saturating_add(T::DbWeight::get().writes(3))
 	}
@@ -182,8 +180,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `145`
 		//  Estimated: `3610`
-		// Minimum execution time: 31_854_000 picoseconds.
-		Weight::from_parts(32_553_000, 3610)
+		// Minimum execution time: 32_279_000 picoseconds.
+		Weight::from_parts(33_176_000, 3610)
 			.saturating_add(T::DbWeight::get().reads(6))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs
index 6981c290c98ce..a0921c50dc59b 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs
+++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs
@@ -38,11 +38,12 @@ use xcm::latest::prelude::*;
 use xcm_builder::{
 	AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses,
 	AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, CurrencyAdapter,
-	DenyReserveTransferToRelayChain, DenyThenTry, EnsureXcmOrigin, FungiblesAdapter, IsConcrete,
-	LocalMint, NativeAsset, NoChecking, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative,
-	SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative,
-	SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId,
-	UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic,
+	DenyReserveTransferToRelayChain, DenyThenTry, DescribeFamily, DescribePalletTerminal,
+	EnsureXcmOrigin, FungiblesAdapter, HashedDescription, IsConcrete, LocalMint, NativeAsset,
+	NoChecking, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative,
+	SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32,
+	SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents,
+	WeightInfoBounds, WithComputedOrigin, WithUniqueTopic,
 };
 use xcm_executor::{traits::WithOriginFilter, XcmExecutor};
 
@@ -75,6 +76,9 @@ pub type LocationToAccountId = (
 	SiblingParachainConvertsVia<Sibling, AccountId>,
 	// Straight up local `AccountId32` origins just alias directly to `AccountId`.
 	AccountId32Aliases<RelayNetwork, AccountId>,
+	// Foreign chain account alias into local accounts according to a hash of their standard
+	// description.
+	HashedDescription<AccountId, DescribeFamily<DescribePalletTerminal>>,
 );
 
 /// Means for transacting the native currency on this chain.
@@ -222,6 +226,9 @@ match_types! {
 		MultiLocation { parents: 1, interior: Here } |
 		MultiLocation { parents: 1, interior: X1(Plurality { .. }) }
 	};
+	pub type TreasuryPallet: impl Contains<MultiLocation> = {
+		MultiLocation { parents: 1, interior: X1(PalletInstance(37)) }
+	};
 }
 
 /// A call filter for the XCM Transact instruction. This is a temporary measure until we properly
@@ -449,8 +456,9 @@ pub type Barrier = TrailingSetTopicAsId<
 					// If the message is one that immediately attemps to pay for execution, then
 					// allow it.
 					AllowTopLevelPaidExecutionFrom<Everything>,
-					// Parent and its pluralities (i.e. governance bodies) get free execution.
-					AllowExplicitUnpaidExecutionFrom<ParentOrParentsPlurality>,
+					// Parent, its pluralities (i.e. governance bodies) and treasury pallet get
+					// free execution.
+					AllowExplicitUnpaidExecutionFrom<(ParentOrParentsPlurality, TreasuryPallet)>,
 					// Subscriptions for version tracking are OK.
 					AllowSubscriptionsFrom<Everything>,
 				),
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/xcm/mod.rs
index 0e740922f339d..ded5dc6702e60 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/xcm/mod.rs
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/xcm/mod.rs
@@ -61,16 +61,8 @@ impl<Call> XcmWeightInfo<Call> for BridgeHubKusamaXcmWeight<Call> {
 	fn withdraw_asset(assets: &MultiAssets) -> Weight {
 		assets.weigh_multi_assets(XcmFungibleWeight::<Runtime>::withdraw_asset())
 	}
-	// Currently there is no trusted reserve (`IsReserve = ()`),
-	// but we need this hack for `pallet_xcm::reserve_transfer_assets`
-	// (TODO) fix https://github.com/paritytech/polkadot/pull/7424
-	// (TODO) fix https://github.com/paritytech/polkadot/pull/7546
-	fn reserve_asset_deposited(_assets: &MultiAssets) -> Weight {
-		// TODO: if we change `IsReserve = ...` then use this line...
-		// TODO: or if remote weight estimation is fixed, then remove
-		// TODO: hardcoded - fix https://github.com/paritytech/cumulus/issues/1974
-		let hardcoded_weight = Weight::from_parts(1_000_000_000_u64, 0);
-		hardcoded_weight.min(XcmFungibleWeight::<Runtime>::reserve_asset_deposited())
+	fn reserve_asset_deposited(assets: &MultiAssets) -> Weight {
+		assets.weigh_multi_assets(XcmFungibleWeight::<Runtime>::reserve_asset_deposited())
 	}
 	fn receive_teleported_asset(assets: &MultiAssets) -> Weight {
 		assets.weigh_multi_assets(XcmFungibleWeight::<Runtime>::receive_teleported_asset())
@@ -127,10 +119,7 @@ impl<Call> XcmWeightInfo<Call> for BridgeHubKusamaXcmWeight<Call> {
 	}
 
 	fn deposit_asset(assets: &MultiAssetFilter, _dest: &MultiLocation) -> Weight {
-		// Hardcoded till the XCM pallet is fixed
-		let hardcoded_weight = Weight::from_parts(1_000_000_000_u64, 0);
-		let weight = assets.weigh_multi_assets(XcmFungibleWeight::<Runtime>::deposit_asset());
-		hardcoded_weight.min(weight)
+		assets.weigh_multi_assets(XcmFungibleWeight::<Runtime>::deposit_asset())
 	}
 	fn deposit_reserve_asset(
 		assets: &MultiAssetFilter,
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs
index 6c8c7ab66bbdb..17ee5cb6a8dca 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs
@@ -17,28 +17,26 @@
 //! Autogenerated weights for `pallet_xcm_benchmarks::fungible`
 //!
 //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
-//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! DATE: 2023-09-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
-//! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-kusama-dev"), DB CACHE: 1024
+//! HOSTNAME: `runner-nbnwcyh-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-kusama-dev"), DB CACHE: 1024
 
 // Executed Command:
-// ./target/production/polkadot-parachain
+// target/production/polkadot-parachain
 // benchmark
 // pallet
-// --template=./templates/xcm-bench-template.hbs
-// --chain=bridge-hub-kusama-dev
-// --wasm-execution=compiled
-// --pallet=pallet_xcm_benchmarks::fungible
-// --no-storage-info
-// --no-median-slopes
-// --no-min-squares
-// --extrinsic=*
 // --steps=50
 // --repeat=20
-// --json
-// --header=./file_header.txt
-// --output=./parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs
+// --extrinsic=*
+// --wasm-execution=compiled
+// --heap-pages=4096
+// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
+// --pallet=pallet_xcm_benchmarks::fungible
+// --chain=bridge-hub-kusama-dev
+// --header=./cumulus/file_header.txt
+// --template=./cumulus/templates/xcm-bench-template.hbs
+// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/xcm/
 
 #![cfg_attr(rustfmt, rustfmt_skip)]
 #![allow(unused_parens)]
@@ -56,8 +54,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `101`
 		//  Estimated: `3593`
-		// Minimum execution time: 24_064_000 picoseconds.
-		Weight::from_parts(24_751_000, 3593)
+		// Minimum execution time: 25_447_000 picoseconds.
+		Weight::from_parts(25_810_000, 3593)
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
@@ -67,8 +65,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `153`
 		//  Estimated: `6196`
-		// Minimum execution time: 51_097_000 picoseconds.
-		Weight::from_parts(51_960_000, 6196)
+		// Minimum execution time: 53_908_000 picoseconds.
+		Weight::from_parts(54_568_000, 6196)
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
@@ -90,8 +88,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `223`
 		//  Estimated: `6196`
-		// Minimum execution time: 75_319_000 picoseconds.
-		Weight::from_parts(77_356_000, 6196)
+		// Minimum execution time: 79_923_000 picoseconds.
+		Weight::from_parts(80_790_000, 6196)
 			.saturating_add(T::DbWeight::get().reads(8))
 			.saturating_add(T::DbWeight::get().writes(4))
 	}
@@ -101,8 +99,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 500_000_000_000 picoseconds.
-		Weight::from_parts(500_000_000_000, 0)
+		// Minimum execution time: 18_446_744_073_709_551_000 picoseconds.
+		Weight::from_parts(18_446_744_073_709_551_000, 0)
 	}
 	// Storage: `ParachainInfo::ParachainId` (r:1 w:0)
 	// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
@@ -120,8 +118,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `70`
 		//  Estimated: `3535`
-		// Minimum execution time: 29_392_000 picoseconds.
-		Weight::from_parts(29_943_000, 3535)
+		// Minimum execution time: 31_923_000 picoseconds.
+		Weight::from_parts(32_499_000, 3535)
 			.saturating_add(T::DbWeight::get().reads(6))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
@@ -129,8 +127,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 3_637_000 picoseconds.
-		Weight::from_parts(3_720_000, 0)
+		// Minimum execution time: 3_903_000 picoseconds.
+		Weight::from_parts(4_065_000, 0)
 	}
 	// Storage: `System::Account` (r:1 w:1)
 	// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
@@ -138,8 +136,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `52`
 		//  Estimated: `3593`
-		// Minimum execution time: 25_045_000 picoseconds.
-		Weight::from_parts(25_546_000, 3593)
+		// Minimum execution time: 26_987_000 picoseconds.
+		Weight::from_parts(27_486_000, 3593)
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
@@ -161,8 +159,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `122`
 		//  Estimated: `3593`
-		// Minimum execution time: 51_450_000 picoseconds.
-		Weight::from_parts(52_354_000, 3593)
+		// Minimum execution time: 56_012_000 picoseconds.
+		Weight::from_parts(58_067_000, 3593)
 			.saturating_add(T::DbWeight::get().reads(7))
 			.saturating_add(T::DbWeight::get().writes(3))
 	}
@@ -182,8 +180,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `70`
 		//  Estimated: `3535`
-		// Minimum execution time: 29_711_000 picoseconds.
-		Weight::from_parts(30_759_000, 3535)
+		// Minimum execution time: 32_350_000 picoseconds.
+		Weight::from_parts(33_403_000, 3535)
 			.saturating_add(T::DbWeight::get().reads(6))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/xcm/mod.rs
index 4f8c2dec7a8c8..7e9f21842725e 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/xcm/mod.rs
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/xcm/mod.rs
@@ -61,16 +61,8 @@ impl<Call> XcmWeightInfo<Call> for BridgeHubPolkadotXcmWeight<Call> {
 	fn withdraw_asset(assets: &MultiAssets) -> Weight {
 		assets.weigh_multi_assets(XcmFungibleWeight::<Runtime>::withdraw_asset())
 	}
-	// Currently there is no trusted reserve (`IsReserve = ()`),
-	// but we need this hack for `pallet_xcm::reserve_transfer_assets`
-	// (TODO) fix https://github.com/paritytech/polkadot/pull/7424
-	// (TODO) fix https://github.com/paritytech/polkadot/pull/7546
-	fn reserve_asset_deposited(_assets: &MultiAssets) -> Weight {
-		// TODO: if we change `IsReserve = ...` then use this line...
-		// TODO: or if remote weight estimation is fixed, then remove
-		// TODO: hardcoded - fix https://github.com/paritytech/cumulus/issues/1974
-		let hardcoded_weight = Weight::from_parts(1_000_000_000_u64, 0);
-		hardcoded_weight.min(XcmFungibleWeight::<Runtime>::reserve_asset_deposited())
+	fn reserve_asset_deposited(assets: &MultiAssets) -> Weight {
+		assets.weigh_multi_assets(XcmFungibleWeight::<Runtime>::reserve_asset_deposited())
 	}
 	fn receive_teleported_asset(assets: &MultiAssets) -> Weight {
 		assets.weigh_multi_assets(XcmFungibleWeight::<Runtime>::receive_teleported_asset())
@@ -127,10 +119,7 @@ impl<Call> XcmWeightInfo<Call> for BridgeHubPolkadotXcmWeight<Call> {
 	}
 
 	fn deposit_asset(assets: &MultiAssetFilter, _dest: &MultiLocation) -> Weight {
-		// Hardcoded till the XCM pallet is fixed
-		let hardcoded_weight = Weight::from_parts(1_000_000_000_u64, 0);
-		let weight = assets.weigh_multi_assets(XcmFungibleWeight::<Runtime>::deposit_asset());
-		hardcoded_weight.min(weight)
+		assets.weigh_multi_assets(XcmFungibleWeight::<Runtime>::deposit_asset())
 	}
 	fn deposit_reserve_asset(
 		assets: &MultiAssetFilter,
@@ -154,10 +143,7 @@ impl<Call> XcmWeightInfo<Call> for BridgeHubPolkadotXcmWeight<Call> {
 		_dest: &MultiLocation,
 		_xcm: &Xcm<()>,
 	) -> Weight {
-		// Hardcoded till the XCM pallet is fixed
-		let hardcoded_weight = Weight::from_parts(200_000_000_u64, 0);
-		let weight = assets.weigh_multi_assets(XcmFungibleWeight::<Runtime>::initiate_teleport());
-		hardcoded_weight.min(weight)
+		assets.weigh_multi_assets(XcmFungibleWeight::<Runtime>::initiate_teleport())
 	}
 	fn report_holding(_response_info: &QueryResponseInfo, _assets: &MultiAssetFilter) -> Weight {
 		XcmGeneric::<Runtime>::report_holding()
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs
index 7c525dca051d2..f45f393636528 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs
@@ -17,28 +17,26 @@
 //! Autogenerated weights for `pallet_xcm_benchmarks::fungible`
 //!
 //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
-//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! DATE: 2023-09-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
-//! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-polkadot-dev"), DB CACHE: 1024
+//! HOSTNAME: `runner-nbnwcyh-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-polkadot-dev"), DB CACHE: 1024
 
 // Executed Command:
-// ./target/production/polkadot-parachain
+// target/production/polkadot-parachain
 // benchmark
 // pallet
-// --template=./templates/xcm-bench-template.hbs
-// --chain=bridge-hub-polkadot-dev
-// --wasm-execution=compiled
-// --pallet=pallet_xcm_benchmarks::fungible
-// --no-storage-info
-// --no-median-slopes
-// --no-min-squares
-// --extrinsic=*
 // --steps=50
 // --repeat=20
-// --json
-// --header=./file_header.txt
-// --output=./parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs
+// --extrinsic=*
+// --wasm-execution=compiled
+// --heap-pages=4096
+// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
+// --pallet=pallet_xcm_benchmarks::fungible
+// --chain=bridge-hub-polkadot-dev
+// --header=./cumulus/file_header.txt
+// --template=./cumulus/templates/xcm-bench-template.hbs
+// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/xcm/
 
 #![cfg_attr(rustfmt, rustfmt_skip)]
 #![allow(unused_parens)]
@@ -56,8 +54,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `101`
 		//  Estimated: `3593`
-		// Minimum execution time: 23_862_000 picoseconds.
-		Weight::from_parts(24_603_000, 3593)
+		// Minimum execution time: 24_237_000 picoseconds.
+		Weight::from_parts(24_697_000, 3593)
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
@@ -67,8 +65,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `153`
 		//  Estimated: `6196`
-		// Minimum execution time: 51_101_000 picoseconds.
-		Weight::from_parts(51_976_000, 6196)
+		// Minimum execution time: 52_269_000 picoseconds.
+		Weight::from_parts(53_848_000, 6196)
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
@@ -90,8 +88,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `223`
 		//  Estimated: `6196`
-		// Minimum execution time: 72_983_000 picoseconds.
-		Weight::from_parts(74_099_000, 6196)
+		// Minimum execution time: 77_611_000 picoseconds.
+		Weight::from_parts(82_634_000, 6196)
 			.saturating_add(T::DbWeight::get().reads(8))
 			.saturating_add(T::DbWeight::get().writes(4))
 	}
@@ -101,8 +99,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 500_000_000_000 picoseconds.
-		Weight::from_parts(500_000_000_000, 0)
+		// Minimum execution time: 18_446_744_073_709_551_000 picoseconds.
+		Weight::from_parts(18_446_744_073_709_551_000, 0)
 	}
 	// Storage: `ParachainInfo::ParachainId` (r:1 w:0)
 	// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
@@ -120,8 +118,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `70`
 		//  Estimated: `3535`
-		// Minimum execution time: 27_131_000 picoseconds.
-		Weight::from_parts(28_062_000, 3535)
+		// Minimum execution time: 29_506_000 picoseconds.
+		Weight::from_parts(30_269_000, 3535)
 			.saturating_add(T::DbWeight::get().reads(6))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
@@ -129,8 +127,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 3_564_000 picoseconds.
-		Weight::from_parts(3_738_000, 0)
+		// Minimum execution time: 3_541_000 picoseconds.
+		Weight::from_parts(3_629_000, 0)
 	}
 	// Storage: `System::Account` (r:1 w:1)
 	// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
@@ -138,8 +136,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `52`
 		//  Estimated: `3593`
-		// Minimum execution time: 24_453_000 picoseconds.
-		Weight::from_parts(25_216_000, 3593)
+		// Minimum execution time: 25_651_000 picoseconds.
+		Weight::from_parts(26_078_000, 3593)
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
@@ -161,8 +159,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `122`
 		//  Estimated: `3593`
-		// Minimum execution time: 48_913_000 picoseconds.
-		Weight::from_parts(50_202_000, 3593)
+		// Minimum execution time: 52_050_000 picoseconds.
+		Weight::from_parts(53_293_000, 3593)
 			.saturating_add(T::DbWeight::get().reads(7))
 			.saturating_add(T::DbWeight::get().writes(3))
 	}
@@ -182,8 +180,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `70`
 		//  Estimated: `3535`
-		// Minimum execution time: 27_592_000 picoseconds.
-		Weight::from_parts(28_099_000, 3535)
+		// Minimum execution time: 30_009_000 picoseconds.
+		Weight::from_parts(30_540_000, 3535)
 			.saturating_add(T::DbWeight::get().reads(6))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_hub_rococo_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_hub_rococo_config.rs
index bc8f97ad97c1c..f59c9e238f5f2 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_hub_rococo_config.rs
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_hub_rococo_config.rs
@@ -29,8 +29,8 @@ use bridge_runtime_common::{
 	},
 	messages_xcm_extension::{SenderAndLane, XcmBlobHauler, XcmBlobHaulerAdapter},
 	refund_relayer_extension::{
-		ActualFeeRefund, RefundBridgedParachainMessages, RefundableMessagesLane,
-		RefundableParachain,
+		ActualFeeRefund, RefundBridgedParachainMessages, RefundSignedExtensionAdapter,
+		RefundableMessagesLane, RefundableParachain,
 	},
 };
 use frame_support::{parameter_types, traits::PalletInfoAccess};
@@ -136,13 +136,15 @@ impl ThisChainWithMessages for BridgeHubRococo {
 }
 
 /// Signed extension that refunds relayers that are delivering messages from the Wococo parachain.
-pub type BridgeRefundBridgeHubWococoMessages = RefundBridgedParachainMessages<
-	Runtime,
-	RefundableParachain<BridgeParachainWococoInstance, bp_bridge_hub_wococo::BridgeHubWococo>,
-	RefundableMessagesLane<WithBridgeHubWococoMessagesInstance, BridgeHubWococoMessagesLane>,
-	ActualFeeRefund<Runtime>,
-	PriorityBoostPerMessage,
-	StrBridgeRefundBridgeHubWococoMessages,
+pub type BridgeRefundBridgeHubWococoMessages = RefundSignedExtensionAdapter<
+	RefundBridgedParachainMessages<
+		Runtime,
+		RefundableParachain<BridgeParachainWococoInstance, bp_bridge_hub_wococo::BridgeHubWococo>,
+		RefundableMessagesLane<WithBridgeHubWococoMessagesInstance, BridgeHubWococoMessagesLane>,
+		ActualFeeRefund<Runtime>,
+		PriorityBoostPerMessage,
+		StrBridgeRefundBridgeHubWococoMessages,
+	>,
 >;
 bp_runtime::generate_static_str_provider!(BridgeRefundBridgeHubWococoMessages);
 
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_hub_wococo_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_hub_wococo_config.rs
index 5178b75c30390..a0b16bace51d0 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_hub_wococo_config.rs
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_hub_wococo_config.rs
@@ -29,8 +29,8 @@ use bridge_runtime_common::{
 	},
 	messages_xcm_extension::{SenderAndLane, XcmBlobHauler, XcmBlobHaulerAdapter},
 	refund_relayer_extension::{
-		ActualFeeRefund, RefundBridgedParachainMessages, RefundableMessagesLane,
-		RefundableParachain,
+		ActualFeeRefund, RefundBridgedParachainMessages, RefundSignedExtensionAdapter,
+		RefundableMessagesLane, RefundableParachain,
 	},
 };
 use frame_support::{parameter_types, traits::PalletInfoAccess};
@@ -136,13 +136,15 @@ impl ThisChainWithMessages for BridgeHubWococo {
 }
 
 /// Signed extension that refunds relayers that are delivering messages from the Rococo parachain.
-pub type BridgeRefundBridgeHubRococoMessages = RefundBridgedParachainMessages<
-	Runtime,
-	RefundableParachain<BridgeParachainRococoInstance, bp_bridge_hub_rococo::BridgeHubRococo>,
-	RefundableMessagesLane<WithBridgeHubRococoMessagesInstance, BridgeHubRococoMessagesLane>,
-	ActualFeeRefund<Runtime>,
-	PriorityBoostPerMessage,
-	StrBridgeRefundBridgeHubRococoMessages,
+pub type BridgeRefundBridgeHubRococoMessages = RefundSignedExtensionAdapter<
+	RefundBridgedParachainMessages<
+		Runtime,
+		RefundableParachain<BridgeParachainRococoInstance, bp_bridge_hub_rococo::BridgeHubRococo>,
+		RefundableMessagesLane<WithBridgeHubRococoMessagesInstance, BridgeHubRococoMessagesLane>,
+		ActualFeeRefund<Runtime>,
+		PriorityBoostPerMessage,
+		StrBridgeRefundBridgeHubRococoMessages,
+	>,
 >;
 bp_runtime::generate_static_str_provider!(BridgeRefundBridgeHubRococoMessages);
 
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/mod.rs
index 40a2036fb49a9..78a0eed91740d 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/mod.rs
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/mod.rs
@@ -62,16 +62,8 @@ impl<Call> XcmWeightInfo<Call> for BridgeHubRococoXcmWeight<Call> {
 	fn withdraw_asset(assets: &MultiAssets) -> Weight {
 		assets.weigh_multi_assets(XcmFungibleWeight::<Runtime>::withdraw_asset())
 	}
-	// Currently there is no trusted reserve (`IsReserve = ()`),
-	// but we need this hack for `pallet_xcm::reserve_transfer_assets`
-	// (TODO) fix https://github.com/paritytech/polkadot/pull/7424
-	// (TODO) fix https://github.com/paritytech/polkadot/pull/7546
-	fn reserve_asset_deposited(_assets: &MultiAssets) -> Weight {
-		// TODO: if we change `IsReserve = ...` then use this line...
-		// TODO: or if remote weight estimation is fixed, then remove
-		// TODO: hardcoded - fix https://github.com/paritytech/cumulus/issues/1974
-		let hardcoded_weight = Weight::from_parts(1_000_000_000_u64, 0);
-		hardcoded_weight.min(XcmFungibleWeight::<Runtime>::reserve_asset_deposited())
+	fn reserve_asset_deposited(assets: &MultiAssets) -> Weight {
+		assets.weigh_multi_assets(XcmFungibleWeight::<Runtime>::reserve_asset_deposited())
 	}
 	fn receive_teleported_asset(assets: &MultiAssets) -> Weight {
 		assets.weigh_multi_assets(XcmFungibleWeight::<Runtime>::receive_teleported_asset())
@@ -128,10 +120,7 @@ impl<Call> XcmWeightInfo<Call> for BridgeHubRococoXcmWeight<Call> {
 	}
 
 	fn deposit_asset(assets: &MultiAssetFilter, _dest: &MultiLocation) -> Weight {
-		// Hardcoded till the XCM pallet is fixed
-		let hardcoded_weight = Weight::from_parts(1_000_000_000_u64, 0);
-		let weight = assets.weigh_multi_assets(XcmFungibleWeight::<Runtime>::deposit_asset());
-		hardcoded_weight.min(weight)
+		assets.weigh_multi_assets(XcmFungibleWeight::<Runtime>::deposit_asset())
 	}
 	fn deposit_reserve_asset(
 		assets: &MultiAssetFilter,
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs
index 8f9fbc912454b..cd1a673cb5397 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs
@@ -17,28 +17,26 @@
 //! Autogenerated weights for `pallet_xcm_benchmarks::fungible`
 //!
 //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
-//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! DATE: 2023-09-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
-//! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-rococo-dev"), DB CACHE: 1024
+//! HOSTNAME: `runner-nbnwcyh-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-rococo-dev"), DB CACHE: 1024
 
 // Executed Command:
-// ./target/production/polkadot-parachain
+// target/production/polkadot-parachain
 // benchmark
 // pallet
-// --template=./templates/xcm-bench-template.hbs
-// --chain=bridge-hub-rococo-dev
-// --wasm-execution=compiled
-// --pallet=pallet_xcm_benchmarks::fungible
-// --no-storage-info
-// --no-median-slopes
-// --no-min-squares
-// --extrinsic=*
 // --steps=50
 // --repeat=20
-// --json
-// --header=./file_header.txt
-// --output=./parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs
+// --extrinsic=*
+// --wasm-execution=compiled
+// --heap-pages=4096
+// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
+// --pallet=pallet_xcm_benchmarks::fungible
+// --chain=bridge-hub-rococo-dev
+// --header=./cumulus/file_header.txt
+// --template=./cumulus/templates/xcm-bench-template.hbs
+// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/
 
 #![cfg_attr(rustfmt, rustfmt_skip)]
 #![allow(unused_parens)]
@@ -56,8 +54,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `101`
 		//  Estimated: `3593`
-		// Minimum execution time: 24_521_000 picoseconds.
-		Weight::from_parts(25_005_000, 3593)
+		// Minimum execution time: 23_601_000 picoseconds.
+		Weight::from_parts(24_226_000, 3593)
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
@@ -67,8 +65,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `153`
 		//  Estimated: `6196`
-		// Minimum execution time: 52_274_000 picoseconds.
-		Weight::from_parts(53_374_000, 6196)
+		// Minimum execution time: 51_043_000 picoseconds.
+		Weight::from_parts(52_326_000, 6196)
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
@@ -90,8 +88,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `260`
 		//  Estimated: `6196`
-		// Minimum execution time: 77_625_000 picoseconds.
-		Weight::from_parts(78_530_000, 6196)
+		// Minimum execution time: 75_639_000 picoseconds.
+		Weight::from_parts(76_736_000, 6196)
 			.saturating_add(T::DbWeight::get().reads(8))
 			.saturating_add(T::DbWeight::get().writes(4))
 	}
@@ -101,8 +99,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 500_000_000_000 picoseconds.
-		Weight::from_parts(500_000_000_000, 0)
+		// Minimum execution time: 18_446_744_073_709_551_000 picoseconds.
+		Weight::from_parts(18_446_744_073_709_551_000, 0)
 	}
 	// Storage: `ParachainInfo::ParachainId` (r:1 w:0)
 	// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
@@ -120,8 +118,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `107`
 		//  Estimated: `3572`
-		// Minimum execution time: 32_804_000 picoseconds.
-		Weight::from_parts(33_462_000, 3572)
+		// Minimum execution time: 31_190_000 picoseconds.
+		Weight::from_parts(32_150_000, 3572)
 			.saturating_add(T::DbWeight::get().reads(6))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
@@ -129,8 +127,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 3_921_000 picoseconds.
-		Weight::from_parts(4_050_000, 0)
+		// Minimum execution time: 3_603_000 picoseconds.
+		Weight::from_parts(3_721_000, 0)
 	}
 	// Storage: `System::Account` (r:1 w:1)
 	// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
@@ -138,8 +136,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `52`
 		//  Estimated: `3593`
-		// Minimum execution time: 25_436_000 picoseconds.
-		Weight::from_parts(25_789_000, 3593)
+		// Minimum execution time: 24_265_000 picoseconds.
+		Weight::from_parts(25_004_000, 3593)
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
@@ -161,8 +159,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `159`
 		//  Estimated: `3624`
-		// Minimum execution time: 53_846_000 picoseconds.
-		Weight::from_parts(54_684_000, 3624)
+		// Minimum execution time: 51_882_000 picoseconds.
+		Weight::from_parts(53_228_000, 3624)
 			.saturating_add(T::DbWeight::get().reads(7))
 			.saturating_add(T::DbWeight::get().writes(3))
 	}
@@ -182,8 +180,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `107`
 		//  Estimated: `3572`
-		// Minimum execution time: 33_052_000 picoseconds.
-		Weight::from_parts(33_897_000, 3572)
+		// Minimum execution time: 32_195_000 picoseconds.
+		Weight::from_parts(33_206_000, 3572)
 			.saturating_add(T::DbWeight::get().reads(6))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
diff --git a/cumulus/parachains/runtimes/glutton/glutton-kusama/Cargo.toml b/cumulus/parachains/runtimes/glutton/glutton-kusama/Cargo.toml
index 63b658ca977a5..ad13bf05a3ecc 100644
--- a/cumulus/parachains/runtimes/glutton/glutton-kusama/Cargo.toml
+++ b/cumulus/parachains/runtimes/glutton/glutton-kusama/Cargo.toml
@@ -43,6 +43,7 @@ xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkad
 cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false }
 cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] }
 cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false }
+cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false }
 cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false }
 cumulus-primitives-timestamp = { path = "../../../../primitives/timestamp", default-features = false }
 parachain-info = { path = "../../../pallets/parachain-info", default-features = false }
@@ -72,6 +73,7 @@ std = [
 	"cumulus-pallet-aura-ext/std",
 	"cumulus-pallet-parachain-system/std",
 	"cumulus-pallet-xcm/std",
+	"cumulus-primitives-aura/std",
 	"cumulus-primitives-core/std",
 	"cumulus-primitives-timestamp/std",
 	"frame-benchmarking?/std",
diff --git a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-kusama/src/lib.rs
index d3369202aac47..f5d52239e5437 100644
--- a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/lib.rs
+++ b/cumulus/parachains/runtimes/glutton/glutton-kusama/src/lib.rs
@@ -81,12 +81,7 @@ use frame_system::{
 	limits::{BlockLength, BlockWeights},
 	EnsureRoot,
 };
-use parachains_common::{
-	kusama::consensus::{
-		BLOCK_PROCESSING_VELOCITY, RELAY_CHAIN_SLOT_DURATION_MILLIS, UNINCLUDED_SEGMENT_CAPACITY,
-	},
-	AccountId, Signature, SLOT_DURATION,
-};
+use parachains_common::{AccountId, Signature};
 #[cfg(any(feature = "std", test))]
 pub use sp_runtime::BuildStorage;
 pub use sp_runtime::{Perbill, Permill};
@@ -123,10 +118,28 @@ const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10);
 const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75);
 /// We allow for .5 seconds of compute with a 12 second average block time.
 const MAXIMUM_BLOCK_WEIGHT: Weight = Weight::from_parts(
-	WEIGHT_REF_TIME_PER_SECOND.saturating_div(2),
+	WEIGHT_REF_TIME_PER_SECOND.saturating_mul(2),
 	cumulus_primitives_core::relay_chain::MAX_POV_SIZE as u64,
 );
 
+/// Maximum number of blocks simultaneously accepted by the Runtime, not yet included
+/// into the relay chain.
+const UNINCLUDED_SEGMENT_CAPACITY: u32 = 3;
+/// How many parachain blocks are processed by the relay chain per parent. Limits the
+/// number of blocks authored per slot.
+const BLOCK_PROCESSING_VELOCITY: u32 = 2;
+/// Relay chain slot duration, in milliseconds.
+const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000;
+
+/// This determines the average expected block time that we are targeting.
+/// Blocks will be produced at a minimum duration defined by `SLOT_DURATION`.
+/// `SLOT_DURATION` is picked up by `pallet_timestamp` which is in turn picked
+/// up by `pallet_aura` to implement `fn slot_duration()`.
+///
+/// Change this to adjust the block time.
+pub const MILLISECS_PER_BLOCK: u64 = 6000;
+pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK;
+
 parameter_types! {
 	pub const BlockHashCount: BlockNumber = 4096;
 	pub const Version: RuntimeVersion = VERSION;
@@ -184,6 +197,13 @@ parameter_types! {
 	pub const ReservedDmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(2);
 }
 
+type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook<
+	Runtime,
+	RELAY_CHAIN_SLOT_DURATION_MILLIS,
+	BLOCK_PROCESSING_VELOCITY,
+	UNINCLUDED_SEGMENT_CAPACITY,
+>;
+
 impl cumulus_pallet_parachain_system::Config for Runtime {
 	type RuntimeEvent = RuntimeEvent;
 	type OnSystemEvent = ();
@@ -194,12 +214,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime {
 	type XcmpMessageHandler = ();
 	type ReservedXcmpWeight = ();
 	type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases;
-	type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook<
-		Runtime,
-		RELAY_CHAIN_SLOT_DURATION_MILLIS,
-		BLOCK_PROCESSING_VELOCITY,
-		UNINCLUDED_SEGMENT_CAPACITY,
-	>;
+	type ConsensusHook = ConsensusHook;
 }
 
 impl parachain_info::Config for Runtime {}
@@ -209,7 +224,7 @@ impl cumulus_pallet_aura_ext::Config for Runtime {}
 impl pallet_timestamp::Config for Runtime {
 	type Moment = u64;
 	type OnTimestampSet = Aura;
-	type MinimumPeriod = ConstU64<{ SLOT_DURATION / 2 }>;
+	type MinimumPeriod = ConstU64<0>;
 	type WeightInfo = weights::pallet_timestamp::WeightInfo<Runtime>;
 }
 
@@ -217,9 +232,9 @@ impl pallet_aura::Config for Runtime {
 	type AuthorityId = AuraId;
 	type DisabledValidators = ();
 	type MaxAuthorities = ConstU32<100_000>;
-	type AllowMultipleBlocksPerSlot = ConstBool<false>;
+	type AllowMultipleBlocksPerSlot = ConstBool<true>;
 	#[cfg(feature = "experimental")]
-	type SlotDuration = pallet_aura::MinimumPeriodTimesTwo<Self>;
+	type SlotDuration = ConstU64<SLOT_DURATION>;
 }
 
 impl pallet_glutton::Config for Runtime {
@@ -340,7 +355,7 @@ impl_runtime_apis! {
 
 	impl sp_consensus_aura::AuraApi<Block, AuraId> for Runtime {
 		fn slot_duration() -> sp_consensus_aura::SlotDuration {
-			sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration())
+			sp_consensus_aura::SlotDuration::from_millis(SLOT_DURATION)
 		}
 
 		fn authorities() -> Vec<AuraId> {
@@ -348,6 +363,15 @@ impl_runtime_apis! {
 		}
 	}
 
+	impl cumulus_primitives_aura::AuraUnincludedSegmentApi<Block> for Runtime {
+		fn can_build_upon(
+			included_hash: <Block as BlockT>::Hash,
+			slot: cumulus_primitives_aura::Slot,
+		) -> bool {
+			ConsensusHook::can_build_upon(included_hash, slot)
+		}
+	}
+
 	impl sp_block_builder::BlockBuilder<Block> for Runtime {
 		fn apply_extrinsic(
 			extrinsic: <Block as BlockT>::Extrinsic,
diff --git a/cumulus/polkadot-parachain/Cargo.toml b/cumulus/polkadot-parachain/Cargo.toml
index ac8ad53b52431..778b056b89d15 100644
--- a/cumulus/polkadot-parachain/Cargo.toml
+++ b/cumulus/polkadot-parachain/Cargo.toml
@@ -12,7 +12,7 @@ path = "src/main.rs"
 
 [dependencies]
 async-trait = "0.1.73"
-clap = { version = "4.4.4", features = ["derive"] }
+clap = { version = "4.4.6", features = ["derive"] }
 codec = { package = "parity-scale-codec", version = "3.0.0" }
 futures = "0.3.28"
 hex-literal = "0.4.1"
@@ -87,6 +87,7 @@ cumulus-client-consensus-relay-chain = { path = "../client/consensus/relay-chain
 cumulus-client-consensus-common = { path = "../client/consensus/common" }
 cumulus-client-consensus-proposer = { path = "../client/consensus/proposer" }
 cumulus-client-service = { path = "../client/service" }
+cumulus-primitives-aura = { path = "../primitives/aura" }
 cumulus-primitives-core = { path = "../primitives/core" }
 cumulus-primitives-parachain-inherent = { path = "../primitives/parachain-inherent" }
 cumulus-relay-chain-interface = { path = "../client/relay-chain-interface" }
diff --git a/cumulus/polkadot-parachain/src/command.rs b/cumulus/polkadot-parachain/src/command.rs
index b96163f63e43a..c47555a32168a 100644
--- a/cumulus/polkadot-parachain/src/command.rs
+++ b/cumulus/polkadot-parachain/src/command.rs
@@ -836,21 +836,21 @@ pub fn run() -> Result<()> {
 				info!("Is collating: {}", if config.role.is_authority() { "yes" } else { "no" });
 
 				match config.chain_spec.runtime() {
-					Runtime::AssetHubPolkadot => crate::service::start_generic_aura_node::<
+					Runtime::AssetHubPolkadot => crate::service::start_asset_hub_node::<
 						asset_hub_polkadot_runtime::RuntimeApi,
 						AssetHubPolkadotAuraId,
 					>(config, polkadot_config, collator_options, id, hwbench)
 					.await
 					.map(|r| r.0)
 					.map_err(Into::into),
-					Runtime::AssetHubKusama => crate::service::start_generic_aura_node::<
+					Runtime::AssetHubKusama => crate::service::start_asset_hub_node::<
 						asset_hub_kusama_runtime::RuntimeApi,
 						AuraId,
 					>(config, polkadot_config, collator_options, id, hwbench)
 					.await
 					.map(|r| r.0)
 					.map_err(Into::into),
-					Runtime::AssetHubWestend => crate::service::start_generic_aura_node::<
+					Runtime::AssetHubWestend => crate::service::start_asset_hub_node::<
 						asset_hub_westend_runtime::RuntimeApi,
 						AuraId,
 					>(config, polkadot_config, collator_options, id, hwbench)
@@ -876,12 +876,17 @@ pub fn run() -> Result<()> {
 						.await
 						.map(|r| r.0)
 						.map_err(Into::into),
-					Runtime::Seedling => crate::service::start_shell_node::<
-						seedling_runtime::RuntimeApi,
-					>(config, polkadot_config, collator_options, id, hwbench)
-					.await
-					.map(|r| r.0)
-					.map_err(Into::into),
+					Runtime::Seedling =>
+						crate::service::start_shell_node::<seedling_runtime::RuntimeApi>(
+							config,
+							polkadot_config,
+							collator_options,
+							id,
+							hwbench
+						)
+						.await
+						.map(|r| r.0)
+						.map_err(Into::into),
 					Runtime::ContractsRococo => crate::service::start_contracts_rococo_node(
 						config,
 						polkadot_config,
@@ -949,13 +954,10 @@ pub fn run() -> Result<()> {
 						.map(|r| r.0)
 						.map_err(Into::into),
 					Runtime::Glutton =>
-						crate::service::start_shell_node::<glutton_runtime::RuntimeApi>(
-							config,
-							polkadot_config,
-							collator_options,
-							id,
-							hwbench,
-						)
+						crate::service::start_basic_lookahead_node::<
+							glutton_runtime::RuntimeApi,
+							AuraId,
+						>(config, polkadot_config, collator_options, id, hwbench)
 						.await
 						.map(|r| r.0)
 						.map_err(Into::into),
diff --git a/cumulus/polkadot-parachain/src/service.rs b/cumulus/polkadot-parachain/src/service.rs
index f7b053b4b6a9d..fa61f534784e0 100644
--- a/cumulus/polkadot-parachain/src/service.rs
+++ b/cumulus/polkadot-parachain/src/service.rs
@@ -14,11 +14,12 @@
 // You should have received a copy of the GNU General Public License
 // along with Cumulus.  If not, see <http://www.gnu.org/licenses/>.
 
-use codec::Codec;
+use codec::{Codec, Decode};
 use cumulus_client_cli::CollatorOptions;
 use cumulus_client_collator::service::CollatorService;
-use cumulus_client_consensus_aura::collators::basic::{
-	self as basic_aura, Params as BasicAuraParams,
+use cumulus_client_consensus_aura::collators::{
+	basic::{self as basic_aura, Params as BasicAuraParams},
+	lookahead::{self as aura, Params as AuraParams},
 };
 use cumulus_client_consensus_common::{
 	ParachainBlockImport as TParachainBlockImport, ParachainCandidate, ParachainConsensus,
@@ -31,7 +32,7 @@ use cumulus_client_service::{
 	BuildNetworkParams, CollatorSybilResistance, DARecoveryProfile, StartRelayChainTasksParams,
 };
 use cumulus_primitives_core::{
-	relay_chain::{Hash as PHash, PersistedValidationData},
+	relay_chain::{Hash as PHash, PersistedValidationData, ValidationCode},
 	ParaId,
 };
 use cumulus_relay_chain_interface::{OverseerHandle, RelayChainInterface};
@@ -43,7 +44,7 @@ use crate::rpc;
 pub use parachains_common::{AccountId, Balance, Block, BlockNumber, Hash, Header, Nonce};
 
 use cumulus_client_consensus_relay_chain::Verifier as RelayChainVerifier;
-use futures::lock::Mutex;
+use futures::{lock::Mutex, prelude::*};
 use sc_consensus::{
 	import_queue::{BasicQueue, Verifier as VerifierT},
 	BlockImportParams, ImportQueue,
@@ -53,10 +54,14 @@ use sc_network::{config::FullNetworkConfiguration, NetworkBlock};
 use sc_network_sync::SyncingService;
 use sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient, TaskManager};
 use sc_telemetry::{Telemetry, TelemetryHandle, TelemetryWorker, TelemetryWorkerHandle};
-use sp_api::{ApiExt, ConstructRuntimeApi};
+use sp_api::{ApiExt, ConstructRuntimeApi, ProvideRuntimeApi};
 use sp_consensus_aura::AuraApi;
+use sp_core::traits::SpawnEssentialNamed;
 use sp_keystore::KeystorePtr;
-use sp_runtime::{app_crypto::AppCrypto, traits::Header as HeaderT};
+use sp_runtime::{
+	app_crypto::AppCrypto,
+	traits::{Block as BlockT, Header as HeaderT},
+};
 use std::{marker::PhantomData, sync::Arc, time::Duration};
 use substrate_prometheus_endpoint::Registry;
 
@@ -696,6 +701,188 @@ where
 	Ok((task_manager, client))
 }
 
+/// Start a node with the given parachain `Configuration` and relay chain `Configuration`.
+///
+/// This is the actual implementation that is abstract over the executor and the runtime api.
+///
+/// This node is basic in the sense that it doesn't support functionality like transaction
+/// payment. Intended to replace start_shell_node in use for glutton, shell, and seedling.
+#[sc_tracing::logging::prefix_logs_with("Parachain")]
+async fn start_basic_lookahead_node_impl<RuntimeApi, RB, BIQ, SC>(
+	parachain_config: Configuration,
+	polkadot_config: Configuration,
+	collator_options: CollatorOptions,
+	sybil_resistance_level: CollatorSybilResistance,
+	para_id: ParaId,
+	rpc_ext_builder: RB,
+	build_import_queue: BIQ,
+	start_consensus: SC,
+	hwbench: Option<sc_sysinfo::HwBench>,
+) -> sc_service::error::Result<(TaskManager, Arc<ParachainClient<RuntimeApi>>)>
+where
+	RuntimeApi: ConstructRuntimeApi<Block, ParachainClient<RuntimeApi>> + Send + Sync + 'static,
+	RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue<Block>
+		+ sp_api::Metadata<Block>
+		+ sp_session::SessionKeys<Block>
+		+ sp_api::ApiExt<Block>
+		+ sp_offchain::OffchainWorkerApi<Block>
+		+ sp_block_builder::BlockBuilder<Block>
+		+ cumulus_primitives_core::CollectCollationInfo<Block>
+		+ frame_rpc_system::AccountNonceApi<Block, AccountId, Nonce>,
+	RB: Fn(Arc<ParachainClient<RuntimeApi>>) -> Result<jsonrpsee::RpcModule<()>, sc_service::Error>
+		+ 'static,
+	BIQ: FnOnce(
+		Arc<ParachainClient<RuntimeApi>>,
+		ParachainBlockImport<RuntimeApi>,
+		&Configuration,
+		Option<TelemetryHandle>,
+		&TaskManager,
+	) -> Result<sc_consensus::DefaultImportQueue<Block>, sc_service::Error>,
+	SC: FnOnce(
+		Arc<ParachainClient<RuntimeApi>>,
+		ParachainBlockImport<RuntimeApi>,
+		Option<&Registry>,
+		Option<TelemetryHandle>,
+		&TaskManager,
+		Arc<dyn RelayChainInterface>,
+		Arc<sc_transaction_pool::FullPool<Block, ParachainClient<RuntimeApi>>>,
+		Arc<SyncingService<Block>>,
+		KeystorePtr,
+		Duration,
+		ParaId,
+		CollatorPair,
+		OverseerHandle,
+		Arc<dyn Fn(Hash, Option<Vec<u8>>) + Send + Sync>,
+		Arc<ParachainBackend>,
+	) -> Result<(), sc_service::Error>,
+{
+	let parachain_config = prepare_node_config(parachain_config);
+
+	let params = new_partial::<RuntimeApi, BIQ>(&parachain_config, build_import_queue)?;
+	let (block_import, mut telemetry, telemetry_worker_handle) = params.other;
+
+	let client = params.client.clone();
+	let backend = params.backend.clone();
+
+	let mut task_manager = params.task_manager;
+	let (relay_chain_interface, collator_key) = build_relay_chain_interface(
+		polkadot_config,
+		&parachain_config,
+		telemetry_worker_handle,
+		&mut task_manager,
+		collator_options.clone(),
+		hwbench.clone(),
+	)
+	.await
+	.map_err(|e| sc_service::Error::Application(Box::new(e) as Box<_>))?;
+
+	let validator = parachain_config.role.is_authority();
+	let prometheus_registry = parachain_config.prometheus_registry().cloned();
+	let transaction_pool = params.transaction_pool.clone();
+	let import_queue_service = params.import_queue.service();
+	let net_config = FullNetworkConfiguration::new(&parachain_config.network);
+
+	let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) =
+		build_network(BuildNetworkParams {
+			parachain_config: &parachain_config,
+			net_config,
+			client: client.clone(),
+			transaction_pool: transaction_pool.clone(),
+			para_id,
+			spawn_handle: task_manager.spawn_handle(),
+			relay_chain_interface: relay_chain_interface.clone(),
+			import_queue: params.import_queue,
+			sybil_resistance_level,
+		})
+		.await?;
+
+	let rpc_client = client.clone();
+	let rpc_builder = Box::new(move |_, _| rpc_ext_builder(rpc_client.clone()));
+
+	sc_service::spawn_tasks(sc_service::SpawnTasksParams {
+		rpc_builder,
+		client: client.clone(),
+		transaction_pool: transaction_pool.clone(),
+		task_manager: &mut task_manager,
+		config: parachain_config,
+		keystore: params.keystore_container.keystore(),
+		backend: backend.clone(),
+		network: network.clone(),
+		sync_service: sync_service.clone(),
+		system_rpc_tx,
+		tx_handler_controller,
+		telemetry: telemetry.as_mut(),
+	})?;
+
+	if let Some(hwbench) = hwbench {
+		sc_sysinfo::print_hwbench(&hwbench);
+		if validator {
+			warn_if_slow_hardware(&hwbench);
+		}
+
+		if let Some(ref mut telemetry) = telemetry {
+			let telemetry_handle = telemetry.handle();
+			task_manager.spawn_handle().spawn(
+				"telemetry_hwbench",
+				None,
+				sc_sysinfo::initialize_hwbench_telemetry(telemetry_handle, hwbench),
+			);
+		}
+	}
+
+	let announce_block = {
+		let sync_service = sync_service.clone();
+		Arc::new(move |hash, data| sync_service.announce_block(hash, data))
+	};
+
+	let relay_chain_slot_duration = Duration::from_secs(6);
+
+	let overseer_handle = relay_chain_interface
+		.overseer_handle()
+		.map_err(|e| sc_service::Error::Application(Box::new(e)))?;
+
+	start_relay_chain_tasks(StartRelayChainTasksParams {
+		client: client.clone(),
+		announce_block: announce_block.clone(),
+		para_id,
+		relay_chain_interface: relay_chain_interface.clone(),
+		task_manager: &mut task_manager,
+		da_recovery_profile: if validator {
+			DARecoveryProfile::Collator
+		} else {
+			DARecoveryProfile::FullNode
+		},
+		import_queue: import_queue_service,
+		relay_chain_slot_duration,
+		recovery_handle: Box::new(overseer_handle.clone()),
+		sync_service: sync_service.clone(),
+	})?;
+
+	if validator {
+		start_consensus(
+			client.clone(),
+			block_import,
+			prometheus_registry.as_ref(),
+			telemetry.as_ref().map(|t| t.handle()),
+			&task_manager,
+			relay_chain_interface.clone(),
+			transaction_pool,
+			sync_service.clone(),
+			params.keystore_container.keystore(),
+			relay_chain_slot_duration,
+			para_id,
+			collator_key.expect("Command line arguments do not allow this. qed"),
+			overseer_handle,
+			announce_block,
+			backend.clone(),
+		)?;
+	}
+
+	start_network.start_network();
+
+	Ok((task_manager, client))
+}
+
 /// Build the import queue for the rococo parachain runtime.
 pub fn rococo_parachain_build_import_queue(
 	client: Arc<ParachainClient<rococo_parachain_runtime::RuntimeApi>>,
@@ -1206,6 +1393,247 @@ where
 	.await
 }
 
+/// Start a shell node which should later transition into an Aura powered parachain node. Asset Hub
+/// uses this because at genesis, Asset Hub was on the `shell` runtime which didn't have Aura and
+/// needs to sync and upgrade before it can run `AuraApi` functions.
+pub async fn start_asset_hub_node<RuntimeApi, AuraId: AppCrypto + Send + Codec + Sync>(
+	parachain_config: Configuration,
+	polkadot_config: Configuration,
+	collator_options: CollatorOptions,
+	para_id: ParaId,
+	hwbench: Option<sc_sysinfo::HwBench>,
+) -> sc_service::error::Result<(TaskManager, Arc<ParachainClient<RuntimeApi>>)>
+where
+	RuntimeApi: ConstructRuntimeApi<Block, ParachainClient<RuntimeApi>> + Send + Sync + 'static,
+	RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue<Block>
+		+ sp_api::Metadata<Block>
+		+ sp_session::SessionKeys<Block>
+		+ sp_api::ApiExt<Block>
+		+ sp_offchain::OffchainWorkerApi<Block>
+		+ sp_block_builder::BlockBuilder<Block>
+		+ cumulus_primitives_core::CollectCollationInfo<Block>
+		+ sp_consensus_aura::AuraApi<Block, <<AuraId as AppCrypto>::Pair as Pair>::Public>
+		+ pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi<Block, Balance>
+		+ frame_rpc_system::AccountNonceApi<Block, AccountId, Nonce>,
+	<<AuraId as AppCrypto>::Pair as Pair>::Signature:
+		TryFrom<Vec<u8>> + std::hash::Hash + sp_runtime::traits::Member + Codec,
+{
+	start_node_impl::<RuntimeApi, _, _, _>(
+		parachain_config,
+		polkadot_config,
+		collator_options,
+		CollatorSybilResistance::Resistant, // Aura
+		para_id,
+		|_| Ok(RpcModule::new(())),
+		aura_build_import_queue::<_, AuraId>,
+		|client,
+		 block_import,
+		 prometheus_registry,
+		 telemetry,
+		 task_manager,
+		 relay_chain_interface,
+		 transaction_pool,
+		 sync_oracle,
+		 keystore,
+		 relay_chain_slot_duration,
+		 para_id,
+		 collator_key,
+		 overseer_handle,
+		 announce_block| {
+			let relay_chain_interface2 = relay_chain_interface.clone();
+
+			let collator_service = CollatorService::new(
+				client.clone(),
+				Arc::new(task_manager.spawn_handle()),
+				announce_block,
+				client.clone(),
+			);
+
+			let spawner = task_manager.spawn_handle();
+
+			let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording(
+				spawner,
+				client.clone(),
+				transaction_pool,
+				prometheus_registry,
+				telemetry.clone(),
+			);
+
+			let collation_future = Box::pin(async move {
+				// Start collating with the `shell` runtime while waiting for an upgrade to an Aura
+				// compatible runtime.
+				let mut request_stream = cumulus_client_collator::relay_chain_driven::init(
+					collator_key.clone(),
+					para_id,
+					overseer_handle.clone(),
+				)
+				.await;
+				while let Some(request) = request_stream.next().await {
+					let pvd = request.persisted_validation_data().clone();
+					let last_head_hash =
+						match <Block as BlockT>::Header::decode(&mut &pvd.parent_head.0[..]) {
+							Ok(header) => header.hash(),
+							Err(e) => {
+								log::error!("Could not decode the head data: {e}");
+								request.complete(None);
+								continue
+							},
+						};
+
+					// Check if we have upgraded to an Aura compatible runtime and transition if
+					// necessary.
+					if client
+						.runtime_api()
+						.has_api::<dyn AuraApi<Block, AuraId>>(last_head_hash)
+						.unwrap_or(false)
+					{
+						// Respond to this request before transitioning to Aura.
+						request.complete(None);
+						break
+					}
+				}
+
+				// Move to Aura consensus.
+				let slot_duration = match cumulus_client_consensus_aura::slot_duration(&*client) {
+					Ok(d) => d,
+					Err(e) => {
+						log::error!("Could not get Aura slot duration: {e}");
+						return
+					},
+				};
+
+				let proposer = Proposer::new(proposer_factory);
+
+				let params = BasicAuraParams {
+					create_inherent_data_providers: move |_, ()| async move { Ok(()) },
+					block_import,
+					para_client: client,
+					relay_client: relay_chain_interface2,
+					sync_oracle,
+					keystore,
+					collator_key,
+					para_id,
+					overseer_handle,
+					slot_duration,
+					relay_chain_slot_duration,
+					proposer,
+					collator_service,
+					// Very limited proposal time.
+					authoring_duration: Duration::from_millis(500),
+				};
+
+				basic_aura::run::<Block, <AuraId as AppCrypto>::Pair, _, _, _, _, _, _, _>(params)
+					.await
+			});
+
+			let spawner = task_manager.spawn_essential_handle();
+			spawner.spawn_essential("cumulus-asset-hub-collator", None, collation_future);
+
+			Ok(())
+		},
+		hwbench,
+	)
+	.await
+}
+
+/// Start an aura powered parachain node which uses the lookahead collator to support async backing.
+/// This node is basic in the sense that its runtime api doesn't include common contents such as
+/// transaction payment. Used for aura glutton.
+pub async fn start_basic_lookahead_node<RuntimeApi, AuraId: AppCrypto>(
+	parachain_config: Configuration,
+	polkadot_config: Configuration,
+	collator_options: CollatorOptions,
+	para_id: ParaId,
+	hwbench: Option<sc_sysinfo::HwBench>,
+) -> sc_service::error::Result<(TaskManager, Arc<ParachainClient<RuntimeApi>>)>
+where
+	RuntimeApi: ConstructRuntimeApi<Block, ParachainClient<RuntimeApi>> + Send + Sync + 'static,
+	RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue<Block>
+		+ sp_api::Metadata<Block>
+		+ sp_session::SessionKeys<Block>
+		+ sp_api::ApiExt<Block>
+		+ sp_offchain::OffchainWorkerApi<Block>
+		+ sp_block_builder::BlockBuilder<Block>
+		+ cumulus_primitives_core::CollectCollationInfo<Block>
+		+ sp_consensus_aura::AuraApi<Block, <<AuraId as AppCrypto>::Pair as Pair>::Public>
+		+ frame_rpc_system::AccountNonceApi<Block, AccountId, Nonce>
+		+ cumulus_primitives_aura::AuraUnincludedSegmentApi<Block>,
+	<<AuraId as AppCrypto>::Pair as Pair>::Signature:
+		TryFrom<Vec<u8>> + std::hash::Hash + sp_runtime::traits::Member + Codec,
+{
+	start_basic_lookahead_node_impl::<RuntimeApi, _, _, _>(
+		parachain_config,
+		polkadot_config,
+		collator_options,
+		CollatorSybilResistance::Resistant, // Aura
+		para_id,
+		|_| Ok(RpcModule::new(())),
+		aura_build_import_queue::<_, AuraId>,
+		|client,
+		 block_import,
+		 prometheus_registry,
+		 telemetry,
+		 task_manager,
+		 relay_chain_interface,
+		 transaction_pool,
+		 sync_oracle,
+		 keystore,
+		 relay_chain_slot_duration,
+		 para_id,
+		 collator_key,
+		 overseer_handle,
+		 announce_block,
+		 backend| {
+			let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?;
+
+			let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording(
+				task_manager.spawn_handle(),
+				client.clone(),
+				transaction_pool,
+				prometheus_registry,
+				telemetry.clone(),
+			);
+			let proposer = Proposer::new(proposer_factory);
+
+			let collator_service = CollatorService::new(
+				client.clone(),
+				Arc::new(task_manager.spawn_handle()),
+				announce_block,
+				client.clone(),
+			);
+
+			let params = AuraParams {
+				create_inherent_data_providers: move |_, ()| async move { Ok(()) },
+				block_import,
+				para_client: client.clone(),
+				para_backend: backend.clone(),
+				relay_client: relay_chain_interface,
+				code_hash_provider: move |block_hash| {
+					client.code_at(block_hash).ok().map(ValidationCode).map(|c| c.hash())
+				},
+				sync_oracle,
+				keystore,
+				collator_key,
+				para_id,
+				overseer_handle,
+				slot_duration,
+				relay_chain_slot_duration,
+				proposer,
+				collator_service,
+				authoring_duration: Duration::from_millis(1500),
+			};
+
+			let fut =
+				aura::run::<Block, <AuraId as AppCrypto>::Pair, _, _, _, _, _, _, _, _, _>(params);
+			task_manager.spawn_essential_handle().spawn("aura", None, fut);
+
+			Ok(())
+		},
+		hwbench,
+	)
+	.await
+}
+
 #[sc_tracing::logging::prefix_logs_with("Parachain")]
 async fn start_contracts_rococo_node_impl<RuntimeApi, RB, BIQ, SC>(
 	parachain_config: Configuration,
diff --git a/cumulus/scripts/ci/changelog/README.md b/cumulus/scripts/ci/changelog/README.md
deleted file mode 100644
index 5c8ee9c9b914e..0000000000000
--- a/cumulus/scripts/ci/changelog/README.md
+++ /dev/null
@@ -1,77 +0,0 @@
-# Changelog
-
-Currently, the changelog is built locally. It will be moved to CI once labels stabilize.
-
-For now, a bit of preparation is required before you can run the script:
-- fetch the srtool digests
-- store them under the `digests` folder as `<chain>-srtool-digest.json`
-- ensure the `.env` file is up to date with correct information
-
-The content of the release notes is generated from the template files under the `scripts/ci/changelog/templates` folder.
-For readability and maintenance, the template is split into several small snippets.
-
-Run:
-```
-./bin/changelog <ref_since> [<ref_until>=HEAD]
-```
-
-For instance:
-```
-./bin/changelog parachains-v7.0.0-rc8
-```
-
-A file called `release-notes.md` will be generated and can be used for the release.
-
-## ENV
-
-You may use the following ENV for testing:
-
-```
-RUSTC_STABLE="rustc 1.56.1 (59eed8a2a 2021-11-01)"
-RUSTC_NIGHTLY="rustc 1.57.0-nightly (51e514c0f 2021-09-12)"
-PRE_RELEASE=true
-HIDE_SRTOOL_ROCOCO=true
-HIDE_SRTOOL_SHELL=true
-REF1=statemine-v5.0.0
-REF2=HEAD
-DEBUG=1
-NO_CACHE=1
-```
-
-By default, the template will include all the information, including the runtime data. For clients releases, we don't
-need those and they can be skipped by setting the following env:
-```
-RELEASE_TYPE=client
-```
-
-## Considered labels
-
-The following list will likely evolve over time and it will be hard to keep it in sync. In any case, if you want to find
-all the labels that are used, search for `meta` in the templates. Currently, the considered labels are:
-
-- Priority: C<N> labels
-- Audit: D<N> labels
-- E4 => new host function
-- B0 => silent, not showing up
-- B1-releasenotes (misc unless other labels)
-- B5-client (client changes)
-- B7-runtimenoteworthy (runtime changes)
-- T6-XCM
-
-Note that labels with the same letter are mutually exclusive. A PR should not have both `B0` and `B5`, or both `C1` and
-`C9`. In case of conflicts, the template will decide which label will be considered.
-
-## Dev and debugging
-
-### Hot Reload
-
-The following command allows **Hot Reload**:
-```
-fswatch templates -e ".*\.md$" | xargs -n1 -I{} ./bin/changelog statemine-v5.0.0
-```
-### Caching
-
-By default, if the changelog data from Github is already present, the calls to the Github API will be skipped and the
-local version of the data will be used. This is much faster. If you know that some labels have changed in Github, you
-probably want to refresh the data. You can then either delete manually the `cumulus.json` file or `export NO_CACHE=1` to
-force refreshing the data.
diff --git a/cumulus/test/service/Cargo.toml b/cumulus/test/service/Cargo.toml
index 5285376f3d59c..c996a01a12ed1 100644
--- a/cumulus/test/service/Cargo.toml
+++ b/cumulus/test/service/Cargo.toml
@@ -11,7 +11,7 @@ path = "src/main.rs"
 
 [dependencies]
 async-trait = "0.1.73"
-clap = { version = "4.4.4", features = ["derive"] }
+clap = { version = "4.4.6", features = ["derive"] }
 codec = { package = "parity-scale-codec", version = "3.0.0" }
 criterion = { version = "0.5.1", features = [ "async_tokio" ] }
 jsonrpsee = { version = "0.16.2", features = ["server"] }
diff --git a/cumulus/xcm/xcm-emulator/src/lib.rs b/cumulus/xcm/xcm-emulator/src/lib.rs
index 9fda0632bae40..caf73ae1e41ca 100644
--- a/cumulus/xcm/xcm-emulator/src/lib.rs
+++ b/cumulus/xcm/xcm-emulator/src/lib.rs
@@ -26,7 +26,7 @@ pub use std::{
 // Substrate
 pub use frame_support::{
 	assert_ok,
-	sp_runtime::{traits::Header as HeaderT, AccountId32, DispatchResult},
+	sp_runtime::{traits::Header as HeaderT, DispatchResult},
 	traits::{
 		EnqueueMessage, Get, Hooks, OriginTrait, ProcessMessage, ProcessMessageError, ServiceQueues,
 	},
@@ -61,6 +61,8 @@ pub use xcm::v3::prelude::{
 };
 pub use xcm_executor::traits::ConvertLocation;
 
+pub type AccountIdOf<T> = <T as frame_system::Config>::AccountId;
+
 thread_local! {
 	/// Downward messages, each message is: `(to_para_id, [(relay_block_number, msg)])`
 	#[allow(clippy::type_complexity)]
@@ -90,8 +92,8 @@ pub trait CheckAssertion<Origin, Destination, Hops, Args>
 where
 	Origin: Chain + Clone,
 	Destination: Chain + Clone,
-	Origin::RuntimeOrigin: OriginTrait<AccountId = AccountId32> + Clone,
-	Destination::RuntimeOrigin: OriginTrait<AccountId = AccountId32> + Clone,
+	Origin::RuntimeOrigin: OriginTrait<AccountId = AccountIdOf<Origin::Runtime>> + Clone,
+	Destination::RuntimeOrigin: OriginTrait<AccountId = AccountIdOf<Destination::Runtime>> + Clone,
 	Hops: Clone,
 	Args: Clone,
 {
@@ -103,8 +105,8 @@ impl<Origin, Destination, Hops, Args> CheckAssertion<Origin, Destination, Hops,
 where
 	Origin: Chain + Clone,
 	Destination: Chain + Clone,
-	Origin::RuntimeOrigin: OriginTrait<AccountId = AccountId32> + Clone,
-	Destination::RuntimeOrigin: OriginTrait<AccountId = AccountId32> + Clone,
+	Origin::RuntimeOrigin: OriginTrait<AccountId = AccountIdOf<Origin::Runtime>> + Clone,
+	Destination::RuntimeOrigin: OriginTrait<AccountId = AccountIdOf<Destination::Runtime>> + Clone,
 	Hops: Clone,
 	Args: Clone,
 {
@@ -219,24 +221,24 @@ pub trait Chain: TestExt + NetworkComponent {
 		helpers::get_account_id_from_seed::<sr25519::Public>(seed)
 	}
 
-	fn account_data_of(account: AccountId) -> AccountData<Balance>;
+	fn account_data_of(account: AccountIdOf<Self::Runtime>) -> AccountData<Balance>;
 
 	fn events() -> Vec<<Self as Chain>::RuntimeEvent>;
 }
 
 pub trait RelayChain: Chain {
 	type MessageProcessor: ProcessMessage;
-	type SovereignAccountOf: ConvertLocation<AccountId>;
+	type SovereignAccountOf: ConvertLocation<AccountIdOf<Self::Runtime>>;
 
 	fn child_location_of(id: ParaId) -> MultiLocation {
 		(Ancestor(0), ParachainJunction(id.into())).into()
 	}
 
-	fn sovereign_account_id_of(location: MultiLocation) -> AccountId {
+	fn sovereign_account_id_of(location: MultiLocation) -> AccountIdOf<Self::Runtime> {
 		Self::SovereignAccountOf::convert_location(&location).unwrap()
 	}
 
-	fn sovereign_account_id_of_child_para(id: ParaId) -> AccountId {
+	fn sovereign_account_id_of_child_para(id: ParaId) -> AccountIdOf<Self::Runtime> {
 		Self::sovereign_account_id_of(Self::child_location_of(id))
 	}
 }
@@ -244,7 +246,7 @@ pub trait RelayChain: Chain {
 pub trait Parachain: Chain {
 	type XcmpMessageHandler: XcmpMessageHandler;
 	type DmpMessageHandler: DmpMessageHandler;
-	type LocationToAccountId: ConvertLocation<AccountId>;
+	type LocationToAccountId: ConvertLocation<AccountIdOf<Self::Runtime>>;
 	type ParachainInfo: Get<ParaId>;
 	type ParachainSystem;
 
@@ -268,7 +270,7 @@ pub trait Parachain: Chain {
 		(Parent, X1(ParachainJunction(para_id.into()))).into()
 	}
 
-	fn sovereign_account_id_of(location: MultiLocation) -> AccountId {
+	fn sovereign_account_id_of(location: MultiLocation) -> AccountIdOf<Self::Runtime> {
 		Self::LocationToAccountId::convert_location(&location).unwrap()
 	}
 }
@@ -365,7 +367,7 @@ macro_rules! decl_test_relay_chains {
 				type RuntimeEvent = $runtime::RuntimeEvent;
 				type System = $crate::SystemPallet::<Self::Runtime>;
 
-				fn account_data_of(account: $crate::AccountId) -> $crate::AccountData<$crate::Balance> {
+				fn account_data_of(account: $crate::AccountIdOf<Self::Runtime>) -> $crate::AccountData<$crate::Balance> {
 					<Self as $crate::TestExt>::ext_wrapper(|| $crate::SystemPallet::<Self::Runtime>::account(account).data.into())
 				}
 
@@ -590,7 +592,7 @@ macro_rules! decl_test_parachains {
 				type RuntimeEvent = $runtime::RuntimeEvent;
 				type System = $crate::SystemPallet::<Self::Runtime>;
 
-				fn account_data_of(account: $crate::AccountId) -> $crate::AccountData<$crate::Balance> {
+				fn account_data_of(account: $crate::AccountIdOf<Self::Runtime>) -> $crate::AccountData<$crate::Balance> {
 					<Self as $crate::TestExt>::ext_wrapper(|| $crate::SystemPallet::<Self::Runtime>::account(account).data.into())
 				}
 
@@ -1159,9 +1161,10 @@ macro_rules! __impl_check_assertion {
 		where
 			Origin: $crate::Chain + Clone,
 			Destination: $crate::Chain + Clone,
-			Origin::RuntimeOrigin: $crate::OriginTrait<AccountId = $crate::AccountId32> + Clone,
+			Origin::RuntimeOrigin:
+				$crate::OriginTrait<AccountId = $crate::AccountIdOf<Origin::Runtime>> + Clone,
 			Destination::RuntimeOrigin:
-				$crate::OriginTrait<AccountId = $crate::AccountId32> + Clone,
+				$crate::OriginTrait<AccountId = $crate::AccountIdOf<Destination::Runtime>> + Clone,
 			Hops: Clone,
 			Args: Clone,
 		{
@@ -1308,8 +1311,8 @@ where
 
 /// Struct that keeps account's id and balance
 #[derive(Clone)]
-pub struct TestAccount {
-	pub account_id: AccountId,
+pub struct TestAccount<R: Chain> {
+	pub account_id: AccountIdOf<R::Runtime>,
 	pub balance: Balance,
 }
 
@@ -1326,9 +1329,9 @@ pub struct TestArgs {
 }
 
 /// Auxiliar struct to help creating a new `Test` instance
-pub struct TestContext<T> {
-	pub sender: AccountId,
-	pub receiver: AccountId,
+pub struct TestContext<T, Origin: Chain, Destination: Chain> {
+	pub sender: AccountIdOf<Origin::Runtime>,
+	pub receiver: AccountIdOf<Destination::Runtime>,
 	pub args: T,
 }
 
@@ -1345,12 +1348,12 @@ pub struct Test<Origin, Destination, Hops = (), Args = TestArgs>
 where
 	Origin: Chain + Clone,
 	Destination: Chain + Clone,
-	Origin::RuntimeOrigin: OriginTrait<AccountId = AccountId32> + Clone,
-	Destination::RuntimeOrigin: OriginTrait<AccountId = AccountId32> + Clone,
+	Origin::RuntimeOrigin: OriginTrait<AccountId = AccountIdOf<Origin::Runtime>> + Clone,
+	Destination::RuntimeOrigin: OriginTrait<AccountId = AccountIdOf<Destination::Runtime>> + Clone,
 	Hops: Clone,
 {
-	pub sender: TestAccount,
-	pub receiver: TestAccount,
+	pub sender: TestAccount<Origin>,
+	pub receiver: TestAccount<Destination>,
 	pub signed_origin: Origin::RuntimeOrigin,
 	pub root_origin: Origin::RuntimeOrigin,
 	pub hops_assertion: HashMap<String, fn(Self)>,
@@ -1365,12 +1368,12 @@ where
 	Args: Clone,
 	Origin: Chain + Clone + CheckAssertion<Origin, Destination, Hops, Args>,
 	Destination: Chain + Clone + CheckAssertion<Origin, Destination, Hops, Args>,
-	Origin::RuntimeOrigin: OriginTrait<AccountId = AccountId32> + Clone,
-	Destination::RuntimeOrigin: OriginTrait<AccountId = AccountId32> + Clone,
+	Origin::RuntimeOrigin: OriginTrait<AccountId = AccountIdOf<Origin::Runtime>> + Clone,
+	Destination::RuntimeOrigin: OriginTrait<AccountId = AccountIdOf<Destination::Runtime>> + Clone,
 	Hops: Clone + CheckAssertion<Origin, Destination, Hops, Args>,
 {
 	/// Creates a new `Test` instance
-	pub fn new(test_args: TestContext<Args>) -> Self {
+	pub fn new(test_args: TestContext<Args, Origin, Destination>) -> Self {
 		Test {
 			sender: TestAccount {
 				account_id: test_args.sender.clone(),
diff --git a/polkadot/.gitattributes b/polkadot/.gitattributes
deleted file mode 100644
index 2ea1ab2d6b9cf..0000000000000
--- a/polkadot/.gitattributes
+++ /dev/null
@@ -1,2 +0,0 @@
-/.gitlab-ci.yml filter=ci-prettier
-/scripts/ci/gitlab/pipeline/*.yml filter=ci-prettier
diff --git a/polkadot/.github/dependabot.yml b/polkadot/.github/dependabot.yml
deleted file mode 100644
index a1fa925970bbb..0000000000000
--- a/polkadot/.github/dependabot.yml
+++ /dev/null
@@ -1,26 +0,0 @@
-version: 2
-updates:
-  - package-ecosystem: "cargo"
-    directory: "/"
-    labels: ["A2-insubstantial", "B0-silent", "C1-low", "E2-dependencies"]
-    # Handle updates for crates from github.com/paritytech/substrate manually.
-    ignore:
-      - dependency-name: "substrate-*"
-      - dependency-name: "sc-*"
-      - dependency-name: "sp-*"
-      - dependency-name: "frame-*"
-      - dependency-name: "fork-tree"
-      - dependency-name: "frame-remote-externalities"
-      - dependency-name: "pallet-*"
-      - dependency-name: "beefy-*"
-      - dependency-name: "try-runtime-*"
-      - dependency-name: "test-runner"
-      - dependency-name: "generate-bags"
-      - dependency-name: "sub-tokens"
-    schedule:
-      interval: "daily"
-  - package-ecosystem: github-actions
-    directory: '/'
-    labels: ["A2-insubstantial", "B0-silent", "C1-low", "E2-dependencies"]
-    schedule:
-      interval: daily
diff --git a/polkadot/.github/pr-custom-review.yml b/polkadot/.github/pr-custom-review.yml
deleted file mode 100644
index 136c9e75ff2de..0000000000000
--- a/polkadot/.github/pr-custom-review.yml
+++ /dev/null
@@ -1,42 +0,0 @@
-# 🔒 PROTECTED: Changes to locks-review-team should be approved by the current locks-review-team
-locks-review-team: locks-review
-team-leads-team: polkadot-review
-action-review-team: ci
-
-rules:
-  - name: Runtime files
-    check_type: changed_files
-    condition:
-      include: ^runtime\/(kusama|polkadot)\/src\/.+\.rs$
-      exclude: ^runtime\/(kusama|polkadot)\/src\/weights\/.+\.rs$
-    all_distinct:
-      - min_approvals: 1
-        teams:
-          - locks-review
-      - min_approvals: 1
-        teams:
-          - polkadot-review
-
-  - name: Core developers
-    check_type: changed_files
-    condition:
-      include: .*
-      # excluding files from 'Runtime files' and 'CI files' rules
-      exclude: ^runtime/(kusama|polkadot)/src/[^/]+\.rs$|^\.gitlab-ci\.yml|^(?!.*\.dic$|.*spellcheck\.toml$)scripts/ci/.*|^\.github/.*
-    min_approvals: 3
-    teams:
-      - core-devs
-
-  - name: CI files
-    check_type: changed_files
-    condition:
-      # dictionary files are excluded
-      include: ^\.gitlab-ci\.yml|^(?!.*\.dic$|.*spellcheck\.toml$)scripts/ci/.*|^\.github/.*
-    min_approvals: 2
-    teams:
-      - ci
-      - release-engineering
-
-prevent-review-request:
-  teams:
-    - core-devs
diff --git a/polkadot/.github/workflows/burnin-label-notification.yml b/polkadot/.github/workflows/burnin-label-notification.yml
deleted file mode 100644
index 536f8fa2a3f65..0000000000000
--- a/polkadot/.github/workflows/burnin-label-notification.yml
+++ /dev/null
@@ -1,24 +0,0 @@
-name: Notify devops when burn-in label applied
-on:
-  pull_request:
-    types: [labeled]
-
-jobs:
-  notify-devops:
-    runs-on: ubuntu-latest
-    strategy:
-      matrix:
-        channel:
-          - name: 'Team: DevOps'
-            room: '!lUslSijLMgNcEKcAiE:parity.io'
-
-    steps:
-      - name: Send Matrix message to ${{ matrix.channel.name }}
-        if: startsWith(github.event.label.name, 'A1-')
-        uses: s3krit/matrix-message-action@70ad3fb812ee0e45ff8999d6af11cafad11a6ecf # v0.0.3
-        with:
-          room_id: ${{ matrix.channel.room }}
-          access_token: ${{ secrets.RELEASENOTES_MATRIX_V2_ACCESS_TOKEN }}
-          server: m.parity.io
-          message: |
-            @room Burn-in request received for the following PR: ${{ github.event.pull_request.html_url }}
diff --git a/polkadot/.github/workflows/check-D-labels.yml b/polkadot/.github/workflows/check-D-labels.yml
deleted file mode 100644
index 9abefaa6fa100..0000000000000
--- a/polkadot/.github/workflows/check-D-labels.yml
+++ /dev/null
@@ -1,50 +0,0 @@
-name: Check D labels
-
-on:
-  pull_request:
-    types: [labeled, opened, synchronize, unlabeled]
-    paths:
-      - runtime/polkadot/**
-      - runtime/kusama/**
-      - runtime/common/**
-      - primitives/src/**
-
-jobs:
-  check-labels:
-    runs-on: ubuntu-latest
-    steps:
-      - name: Pull image
-        env:
-          IMAGE: paritytech/ruled_labels:0.4.0
-        run: docker pull $IMAGE
-
-      - name: Check labels
-        env:
-          IMAGE: paritytech/ruled_labels:0.4.0
-          MOUNT: /work
-          GITHUB_PR: ${{ github.event.pull_request.number }}
-          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
-          API_BASE: https://api.github.com/repos
-          REPO: ${{ github.repository }}
-          RULES_PATH: labels/ruled_labels
-          CHECK_SPECS: specs_polkadot.yaml
-        run: |
-          echo "REPO: ${REPO}"
-          echo "GITHUB_PR: ${GITHUB_PR}"
-          # Clone repo with labels specs
-          git clone https://github.com/paritytech/labels
-          # Fetch the labels for the PR under test
-          labels=$( curl -H "Authorization: token ${GITHUB_TOKEN}" -s "$API_BASE/${REPO}/pulls/${GITHUB_PR}" | jq '.labels | .[] | .name' | tr "\n" ",")
-
-          if [ -z "${labels}" ]; then
-            docker run --rm -i -v $PWD/${RULES_PATH}/:$MOUNT $IMAGE check $MOUNT/$CHECK_SPECS --tags audit --no-label
-          fi
-
-          labels_args=${labels: :-1}
-          printf "Checking labels: %s\n" "${labels_args}"
-
-          # Prevent the shell from splitting labels with spaces
-          IFS=","
-
-          # --dev is more useful to debug mode to debug
-          docker run --rm -i -v $PWD/${RULES_PATH}/:$MOUNT $IMAGE check $MOUNT/$CHECK_SPECS --labels ${labels_args} --dev --tags audit
diff --git a/polkadot/.github/workflows/check-bootnodes.yml b/polkadot/.github/workflows/check-bootnodes.yml
deleted file mode 100644
index 897a90d3ae928..0000000000000
--- a/polkadot/.github/workflows/check-bootnodes.yml
+++ /dev/null
@@ -1,31 +0,0 @@
-# checks all networks we care about (kusama, polkadot, westend) and ensures
-# the bootnodes in their respective chainspecs are contactable
-
-name: Check all bootnodes
-on:
-  push:
-    branches:
-      # Catches v1.2.3 and v1.2.3-rc1
-      - release-v[0-9]+.[0-9]+.[0-9]+*
-
-jobs:
-  check_bootnodes:
-    strategy:
-      fail-fast: false
-      matrix:
-        runtime: [westend, kusama, polkadot]
-    runs-on: ubuntu-latest
-    steps:
-      - name: Checkout sources
-        uses: actions/checkout@v3
-      - name: Install polkadot
-        shell: bash
-        run: |
-          curl -L "$(curl -s https://api.github.com/repos/paritytech/polkadot/releases/latest \
-          | jq -r '.assets | .[] | select(.name == "polkadot").browser_download_url')" \
-          | sudo tee /usr/local/bin/polkadot > /dev/null
-          sudo chmod +x /usr/local/bin/polkadot
-          polkadot --version
-      - name: Check ${{ matrix.runtime }} bootnodes
-        shell: bash
-        run: scripts/ci/github/check_bootnodes.sh node/service/chain-specs/${{ matrix.runtime }}.json
diff --git a/polkadot/.github/workflows/check-labels.yml b/polkadot/.github/workflows/check-labels.yml
deleted file mode 100644
index df0a0e9cf02dd..0000000000000
--- a/polkadot/.github/workflows/check-labels.yml
+++ /dev/null
@@ -1,45 +0,0 @@
-name: Check labels
-
-on:
-  pull_request:
-    types: [labeled, opened, synchronize, unlabeled]
-
-jobs:
-  check-labels:
-    runs-on: ubuntu-latest
-    steps:
-      - name: Pull image
-        env:
-          IMAGE: paritytech/ruled_labels:0.4.0
-        run: docker pull $IMAGE
-
-      - name: Check labels
-        env:
-          IMAGE: paritytech/ruled_labels:0.4.0
-          MOUNT: /work
-          GITHUB_PR: ${{ github.event.pull_request.number }}
-          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
-          API_BASE: https://api.github.com/repos
-          REPO: ${{ github.repository }}
-          RULES_PATH: labels/ruled_labels
-          CHECK_SPECS: specs_polkadot.yaml
-        run: |
-          echo "REPO: ${REPO}"
-          echo "GITHUB_PR: ${GITHUB_PR}"
-          # Clone repo with labels specs
-          git clone https://github.com/paritytech/labels
-          # Fetch the labels for the PR under test
-          labels=$( curl -H "Authorization: token ${GITHUB_TOKEN}" -s "$API_BASE/${REPO}/pulls/${GITHUB_PR}" | jq '.labels | .[] | .name' | tr "\n" ",")
-
-          if [ -z "${labels}" ]; then
-            docker run --rm -i -v $PWD/${RULES_PATH}/:$MOUNT $IMAGE check $MOUNT/$CHECK_SPECS --tags PR --no-label
-          fi
-
-          labels_args=${labels: :-1}
-          printf "Checking labels: %s\n" "${labels_args}"
-
-          # Prevent the shell from splitting labels with spaces
-          IFS=","
-
-          # --dev is more useful to debug mode to debug
-          docker run --rm -i -v $PWD/${RULES_PATH}/:$MOUNT $IMAGE check $MOUNT/$CHECK_SPECS --labels ${labels_args} --dev --tags PR
diff --git a/polkadot/.github/workflows/check-licenses.yml b/polkadot/.github/workflows/check-licenses.yml
deleted file mode 100644
index 1e654f7b30705..0000000000000
--- a/polkadot/.github/workflows/check-licenses.yml
+++ /dev/null
@@ -1,26 +0,0 @@
-name: Check licenses
-
-on:
-  pull_request:
-
-jobs:
-  check-licenses:
-    runs-on: ubuntu-22.04
-    steps:
-      - name: Checkout sources
-        uses: actions/checkout@v3
-      - uses: actions/setup-node@v3.8.1
-        with:
-          node-version: '18.x'
-          registry-url: 'https://npm.pkg.github.com'
-          scope: '@paritytech'
-      - name: Check the licenses
-        run: |
-          shopt -s globstar
-
-          npx @paritytech/license-scanner@0.0.5 scan \
-            --ensure-licenses=Apache-2.0 \
-            --ensure-licenses=GPL-3.0-only \
-            ./**/*.rs
-        env:
-          NODE_AUTH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/polkadot/.github/workflows/check-new-bootnodes.yml b/polkadot/.github/workflows/check-new-bootnodes.yml
deleted file mode 100644
index 25b2a0a56fe5f..0000000000000
--- a/polkadot/.github/workflows/check-new-bootnodes.yml
+++ /dev/null
@@ -1,28 +0,0 @@
-# If a chainspec file is updated with new bootnodes, we check to make sure those bootnodes are contactable
-
-name: Check new bootnodes
-on:
-  pull_request:
-    paths:
-      - 'node/service/chain-specs/*.json'
-
-jobs:
-  check_bootnodes:
-    runs-on: ubuntu-latest
-    steps:
-      - name: Checkout sources
-        uses: actions/checkout@v3
-        with:
-          fetch-depth: 0
-      - name: Install polkadot
-        shell: bash
-        run: |
-          curl -L "$(curl -s https://api.github.com/repos/paritytech/polkadot/releases/latest \
-          | jq -r '.assets | .[] | select(.name == "polkadot").browser_download_url')" \
-          | sudo tee /usr/local/bin/polkadot > /dev/null
-          sudo chmod +x /usr/local/bin/polkadot
-          polkadot --version
-      - name: Check new bootnodes
-        shell: bash
-        run: |
-          scripts/ci/github/check_new_bootnodes.sh
diff --git a/polkadot/.github/workflows/check-weights.yml b/polkadot/.github/workflows/check-weights.yml
deleted file mode 100644
index e6a6c43e0a6a9..0000000000000
--- a/polkadot/.github/workflows/check-weights.yml
+++ /dev/null
@@ -1,49 +0,0 @@
-name: Check updated weights
-
-on:
-  pull_request:
-    paths:
-      - 'runtime/*/src/weights/**'
-
-jobs:
-  check_weights_files:
-    strategy:
-      fail-fast: false
-      matrix:
-        runtime: [westend, kusama, polkadot, rococo]
-    runs-on: ubuntu-latest
-    steps:
-      - name: Checkout sources
-        uses: actions/checkout@v3
-      - name: Check weights files
-        shell: bash
-        run: |
-          scripts/ci/github/verify_updated_weights.sh ${{ matrix.runtime }}
-
-  # This job uses https://github.com/ggwpez/substrate-weight-compare to compare the weights of the current
-  # release with the last release, then adds them as a comment to the PR.
-  check_weight_changes:
-    strategy:
-      fail-fast: false
-      matrix:
-        runtime: [westend, kusama, polkadot, rococo]
-    runs-on: ubuntu-latest
-    steps:
-      - name: Get latest release
-        run: |
-          LAST_RELEASE=$(curl -s https://api.github.com/repos/paritytech/polkadot/releases/latest | jq -r .tag_name)
-          echo "LAST_RELEASE=$LAST_RELEASE" >> $GITHUB_ENV
-      - name: Checkout current sources
-        uses: actions/checkout@v3
-        with:
-          fetch-depth: 0
-      - name: Check weight changes
-        shell: bash
-        run: |
-          cargo install --git https://github.com/ggwpez/substrate-weight-compare swc
-          ./scripts/ci/github/check_weights_swc.sh ${{ matrix.runtime }} "$LAST_RELEASE" | tee swc_output_${{ matrix.runtime }}.md
-      - name: Add comment
-        uses: thollander/actions-comment-pull-request@v2
-        with:
-          filePath: ./swc_output_${{ matrix.runtime }}.md
-          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
\ No newline at end of file
diff --git a/polkadot/.github/workflows/honggfuzz.yml b/polkadot/.github/workflows/honggfuzz.yml
deleted file mode 100644
index 27fa0d9967f3b..0000000000000
--- a/polkadot/.github/workflows/honggfuzz.yml
+++ /dev/null
@@ -1,137 +0,0 @@
-name: Run nightly fuzzer jobs
-
-on:
-  schedule:
-    - cron: '0 0 * * *'
-
-jobs:
-  xcm-fuzzer:
-    runs-on: ubuntu-latest
-    steps:
-      - name: Checkout sources
-        uses: actions/checkout@v3
-        with:
-          fetch-depth: 1
-
-      - name: Install minimal stable Rust
-        uses: actions-rs/toolchain@v1
-        with:
-          profile: minimal
-          toolchain: stable
-          override: true
-
-      - name: Install minimal nightly Rust
-        uses: actions-rs/toolchain@v1
-        with:
-          profile: minimal
-          toolchain: nightly
-          target: wasm32-unknown-unknown
-
-      - name: Install honggfuzz deps
-        run: sudo apt-get install --no-install-recommends binutils-dev libunwind8-dev
-
-      - name: Install honggfuzz
-        uses: actions-rs/cargo@v1
-        with:
-          command: install
-          args: honggfuzz --version "0.5.54"
-
-      - name: Build fuzzer binaries
-        working-directory: xcm/xcm-simulator/fuzzer/
-        run: cargo hfuzz build
-
-      - name: Run fuzzer
-        working-directory: xcm/xcm-simulator/fuzzer/
-        run: bash $GITHUB_WORKSPACE/scripts/ci/github/run_fuzzer.sh xcm-fuzzer
-
-  erasure-coding-round-trip:
-    runs-on: ubuntu-latest
-    steps:
-      - name: Checkout sources
-        uses: actions/checkout@v3
-        with:
-          fetch-depth: 1
-
-      - name: Cache Seed
-        id: cache-seed-round-trip
-        uses: actions/cache@v3
-        with:
-          path: erasure-coding/fuzzer/hfuzz_workspace
-          key: ${{ runner.os }}-erasure-coding
-
-      - name: Install minimal stable Rust
-        uses: actions-rs/toolchain@v1
-        with:
-          profile: minimal
-          toolchain: stable
-          override: true
-
-      - name: Install minimal nightly Rust
-        uses: actions-rs/toolchain@v1
-        with:
-          profile: minimal
-          toolchain: nightly
-          target: wasm32-unknown-unknown
-
-      - name: Install honggfuzz deps
-        run: sudo apt-get install --no-install-recommends binutils-dev libunwind8-dev
-
-      - name: Install honggfuzz
-        uses: actions-rs/cargo@v1
-        with:
-          command: install
-          args: honggfuzz --version "0.5.54"
-
-      - name: Build fuzzer binaries
-        working-directory: erasure-coding/fuzzer
-        run: cargo hfuzz build
-
-      - name: Run fuzzer
-        working-directory: erasure-coding/fuzzer
-        run: bash $GITHUB_WORKSPACE/scripts/ci/github/run_fuzzer.sh round_trip
-
-  erasure-coding-reconstruct:
-    runs-on: ubuntu-latest
-    steps:
-      - name: Checkout sources
-        uses: actions/checkout@v3
-        with:
-          fetch-depth: 1
-
-      - name: Cache Seed
-        id: cache-seed-reconstruct
-        uses: actions/cache@v3
-        with:
-          path: erasure-coding/fuzzer/hfuzz_workspace
-          key: ${{ runner.os }}-erasure-coding
-
-      - name: Install minimal stable Rust
-        uses: actions-rs/toolchain@v1
-        with:
-          profile: minimal
-          toolchain: stable
-          override: true
-
-      - name: Install minimal nightly Rust
-        uses: actions-rs/toolchain@v1
-        with:
-          profile: minimal
-          toolchain: nightly
-          target: wasm32-unknown-unknown
-
-      - name: Install honggfuzz deps
-        run: sudo apt-get install --no-install-recommends binutils-dev libunwind8-dev
-
-      - name: Install honggfuzz
-        uses: actions-rs/cargo@v1
-        with:
-          command: install
-          args: honggfuzz --version "0.5.54"
-
-      - name: Build fuzzer binaries
-        working-directory: erasure-coding/fuzzer
-        run: cargo hfuzz build
-
-      - name: Run fuzzer
-        working-directory: erasure-coding/fuzzer
-        run: bash $GITHUB_WORKSPACE/scripts/ci/github/run_fuzzer.sh reconstruct
diff --git a/polkadot/.github/workflows/pr-custom-review.yml b/polkadot/.github/workflows/pr-custom-review.yml
deleted file mode 100644
index 8e40c9ee72989..0000000000000
--- a/polkadot/.github/workflows/pr-custom-review.yml
+++ /dev/null
@@ -1,42 +0,0 @@
-name: Assign reviewers
-
-on:
-  pull_request:
-    branches:
-      - master
-      - main
-    types:
-      - opened
-      - reopened
-      - synchronize
-      - review_requested
-      - review_request_removed
-      - ready_for_review
-      - converted_to_draft
-  pull_request_review:
-
-jobs:
-  pr-custom-review:
-    runs-on: ubuntu-latest
-    steps:
-      - name: Skip if pull request is in Draft
-        # `if: github.event.pull_request.draft == true` should be kept here, at
-        # the step level, rather than at the job level. The latter is not
-        # recommended because when the PR is moved from "Draft" to "Ready to
-        # review" the workflow will immediately be passing (since it was skipped),
-        # even though it hasn't actually ran, since it takes a few seconds for
-        # the workflow to start. This is also disclosed in:
-        # https://github.community/t/dont-run-actions-on-draft-pull-requests/16817/17
-        # That scenario would open an opportunity for the check to be bypassed:
-        # 1. Get your PR approved
-        # 2. Move it to Draft
-        # 3. Push whatever commits you want
-        # 4. Move it to "Ready for review"; now the workflow is passing (it was
-        #    skipped) and "Check reviews" is also passing (it won't be updated
-        #    until the workflow is finished)
-        if: github.event.pull_request.draft == true
-        run: exit 1
-      - name: pr-custom-review
-        uses: paritytech/pr-custom-review@action-v3
-        with:
-          checks-reviews-api: http://pcr.parity-prod.parity.io/api/v1/check_reviews
diff --git a/polkadot/.github/workflows/release-01_branch-check.yml b/polkadot/.github/workflows/release-01_branch-check.yml
deleted file mode 100644
index f2b559b7c176b..0000000000000
--- a/polkadot/.github/workflows/release-01_branch-check.yml
+++ /dev/null
@@ -1,21 +0,0 @@
-name: Release - Branch check
-on:
-  push:
-    branches:
-      # Catches v1.2.3 and v1.2.3-rc1
-      - release-v[0-9]+.[0-9]+.[0-9]+*
-
-  workflow_dispatch:
-
-jobs:
-  check_branch:
-    runs-on: ubuntu-latest
-    steps:
-      - name: Checkout sources
-        uses: actions/checkout@v3
-        with:
-          fetch-depth: 0
-
-      - name: Run check
-        shell: bash
-        run: ./scripts/ci/github/check-rel-br
diff --git a/polkadot/.github/workflows/release-10_candidate.yml b/polkadot/.github/workflows/release-10_candidate.yml
deleted file mode 100644
index 54a937a7819a1..0000000000000
--- a/polkadot/.github/workflows/release-10_candidate.yml
+++ /dev/null
@@ -1,71 +0,0 @@
-name: Release - RC automation
-on:
-  push:
-    branches:
-      # Catches v1.2.3 and v1.2.3-rc1
-      - release-v[0-9]+.[0-9]+.[0-9]+*
-jobs:
-  tag_rc:
-    runs-on: ubuntu-latest
-    strategy:
-      matrix:
-        channel:
-          - name: "RelEng: Polkadot Release Coordination"
-            room: '!cqAmzdIcbOFwrdrubV:parity.io'
-
-    steps:
-      - name: Checkout sources
-        uses: actions/checkout@v3
-        with:
-          fetch-depth: 0
-      - id: compute_tag
-        name: Compute next rc tag
-        shell: bash
-        run: |
-          # Get last rc tag if exists, else set it to {version}-rc1
-          version=${GITHUB_REF#refs/heads/release-}
-          echo "$version"
-          echo "version=$version" >> $GITHUB_OUTPUT
-          git tag -l
-          last_rc=$(git tag -l "$version-rc*" | sort -V | tail -n 1)
-          if [ -n "$last_rc" ]; then
-            suffix=$(echo "$last_rc" | grep -Eo '[0-9]+$')
-            echo $suffix
-            ((suffix++))
-            echo $suffix
-            echo "new_tag=$version-rc$suffix" >> $GITHUB_OUTPUT
-            echo "first_rc=false" >> $GITHUB_OUTPUT
-          else
-            echo "new_tag=$version-rc1" >> $GITHUB_OUTPUT
-            echo "first_rc=true" >> $GITHUB_OUTPUT
-          fi
-
-      - name: Apply new tag
-        uses: tvdias/github-tagger@ed7350546e3e503b5e942dffd65bc8751a95e49d # v0.0.2
-        with:
-          # We can't use the normal GITHUB_TOKEN for the following reason:
-          # https://docs.github.com/en/actions/reference/events-that-trigger-workflows#triggering-new-workflows-using-a-personal-access-token
-          # RELEASE_BRANCH_TOKEN requires public_repo OAuth scope
-          repo-token: "${{ secrets.RELEASE_BRANCH_TOKEN }}"
-          tag: ${{ steps.compute_tag.outputs.new_tag }}
-
-      - id: create-issue
-        uses: JasonEtco/create-an-issue@e27dddc79c92bc6e4562f268fffa5ed752639abd # v2.9.1
-        # Only create the issue if it's the first release candidate
-        if: steps.compute_tag.outputs.first_rc == 'true'
-        env:
-          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
-          VERSION: ${{ steps.compute_tag.outputs.version }}
-        with:
-          filename: .github/ISSUE_TEMPLATE/release.md
-
-      - name: Send Matrix message to ${{ matrix.channel.name }}
-        uses: s3krit/matrix-message-action@70ad3fb812ee0e45ff8999d6af11cafad11a6ecf # v0.0.3
-        if: steps.create-issue.outputs.url != ''
-        with:
-          room_id: ${{ matrix.channel.room }}
-          access_token: ${{ secrets.RELEASENOTES_MATRIX_V2_ACCESS_TOKEN }}
-          server: m.parity.io
-          message: |
-            Release process for polkadot ${{ steps.compute_tag.outputs.version }} has been started.<br/>
-            Tracking issue: ${{ steps.create-issue.outputs.url }}
diff --git a/polkadot/.github/workflows/release-20_extrinsic-ordering-check-from-bin.yml b/polkadot/.github/workflows/release-20_extrinsic-ordering-check-from-bin.yml
deleted file mode 100644
index 0613ed04d35a9..0000000000000
--- a/polkadot/.github/workflows/release-20_extrinsic-ordering-check-from-bin.yml
+++ /dev/null
@@ -1,81 +0,0 @@
-# This workflow performs the Extrinsic Ordering Check on demand using a binary
-
-name: Release - Extrinsic Ordering Check
-on:
-  workflow_dispatch:
-    inputs:
-      reference_url:
-        description: The WebSocket url of the reference node
-        default: wss://kusama-rpc.polkadot.io
-        required: true
-      binary_url:
-        description: A url to a Linux binary for the node containing the runtime to test
-        default: https://releases.parity.io/polkadot/x86_64-debian:stretch/v0.9.10/polkadot
-        required: true
-      chain:
-        description: The name of the chain under test. Usually, you would pass a local chain
-        default: kusama-local
-        required: true
-
-jobs:
-  check:
-    name: Run check
-    runs-on: ubuntu-latest
-    env:
-      CHAIN: ${{github.event.inputs.chain}}
-      BIN_URL: ${{github.event.inputs.binary_url}}
-      REF_URL: ${{github.event.inputs.reference_url}}
-
-    steps:
-      - name: Checkout sources
-        uses: actions/checkout@v3
-
-      - name: Fetch binary
-        run: |
-          echo Fetching $BIN_URL
-          wget $BIN_URL
-          chmod a+x polkadot
-          ./polkadot --version
-
-      - name: Start local node
-        run: |
-          echo Running on $CHAIN
-          ./polkadot --chain=$CHAIN &
-
-      - name: Prepare output
-        run: |
-          VERSION=$(./polkadot --version)
-          echo "Metadata comparison:" >> output.txt
-          echo "Date: $(date)" >> output.txt
-          echo "Reference: $REF_URL" >> output.txt
-          echo "Target version: $VERSION" >> output.txt
-          echo "Chain: $CHAIN" >> output.txt
-          echo "----------------------------------------------------------------------" >> output.txt
-
-      - name: Pull polkadot-js-tools image
-        run: docker pull jacogr/polkadot-js-tools
-
-      - name: Compare the metadata
-        run: |
-          CMD="docker run --pull always --network host jacogr/polkadot-js-tools metadata $REF_URL ws://localhost:9944"
-          echo -e "Running:\n$CMD"
-          $CMD >> output.txt
-          sed -z -i 's/\n\n/\n/g' output.txt
-          cat output.txt | egrep -n -i ''
-          SUMMARY=$(./scripts/ci/github/extrinsic-ordering-filter.sh output.txt)
-          echo -e $SUMMARY
-          echo -e $SUMMARY >> output.txt
-
-      - name: Show result
-        run: |
-          cat output.txt
-
-      - name: Stop our local node
-        run: pkill polkadot
-
-      - name: Save output as artifact
-        uses: actions/upload-artifact@v3
-        with:
-          name: ${{ env.CHAIN }}
-          path: |
-            output.txt
diff --git a/polkadot/.github/workflows/release-21_extrinsic-ordering-check-from-two.yml b/polkadot/.github/workflows/release-21_extrinsic-ordering-check-from-two.yml
deleted file mode 100644
index 6513897f4a134..0000000000000
--- a/polkadot/.github/workflows/release-21_extrinsic-ordering-check-from-two.yml
+++ /dev/null
@@ -1,97 +0,0 @@
-# This workflow performs the Extrinsic Ordering Check on demand using two reference binaries
-
-name: Release - Extrinsic API Check with reference bins
-on:
-  workflow_dispatch:
-    inputs:
-      reference_binary_url:
-        description: A url to a Linux binary for the node containing the reference runtime to test against
-        default: https://releases.parity.io/polkadot/x86_64-debian:stretch/v0.9.26/polkadot
-        required: true
-      binary_url:
-        description: A url to a Linux binary for the node containing the runtime to test
-        default: https://releases.parity.io/polkadot/x86_64-debian:stretch/v0.9.27-rc1/polkadot
-        required: true
-
-jobs:
-  check:
-    name: Run check
-    runs-on: ubuntu-latest
-    env:
-      BIN_URL: ${{github.event.inputs.binary_url}}
-      REF_URL: ${{github.event.inputs.reference_binary_url}}
-    strategy:
-      fail-fast: false
-      matrix:
-        chain: [polkadot, kusama, westend, rococo]
-
-    steps:
-      - name: Checkout sources
-        uses: actions/checkout@v3
-
-      - name: Fetch reference binary
-        run: |
-          echo Fetching $REF_URL
-          curl $REF_URL -o polkadot-ref
-          chmod a+x polkadot-ref
-          ./polkadot-ref --version
-
-      - name: Fetch test binary
-        run: |
-          echo Fetching $BIN_URL
-          curl $BIN_URL -o polkadot
-          chmod a+x polkadot
-          ./polkadot --version
-
-      - name: Start local reference node
-        run: |
-          echo Running reference on ${{ matrix.chain }}-local
-          ./polkadot-ref --chain=${{ matrix.chain }}-local --rpc-port=9934 --ws-port=9945 --base-path=polkadot-ref-base/ &
-
-      - name: Start local test node
-        run: |
-          echo Running test on ${{ matrix.chain }}-local
-          ./polkadot --chain=${{ matrix.chain }}-local &
-
-      - name: Prepare output
-        run: |
-          REF_VERSION=$(./polkadot-ref --version)
-          BIN_VERSION=$(./polkadot --version)
-          echo "Metadata comparison:" >> output.txt
-          echo "Date: $(date)" >> output.txt
-          echo "Ref. binary: $REF_URL" >> output.txt
-          echo "Test binary: $BIN_URL" >> output.txt
-          echo "Ref. version: $REF_VERSION" >> output.txt
-          echo "Test version: $BIN_VERSION" >> output.txt
-          echo "Chain: ${{ matrix.chain }}-local" >> output.txt
-          echo "----------------------------------------------------------------------" >> output.txt
-
-      - name: Pull polkadot-js-tools image
-        run: docker pull jacogr/polkadot-js-tools
-
-      - name: Compare the metadata
-        run: |
-          CMD="docker run --pull always --network host jacogr/polkadot-js-tools metadata ws://localhost:9945 ws://localhost:9944"
-          echo -e "Running:\n$CMD"
-          $CMD >> output.txt
-          sed -z -i 's/\n\n/\n/g' output.txt
-          cat output.txt | egrep -n -i ''
-          SUMMARY=$(./scripts/ci/github/extrinsic-ordering-filter.sh output.txt)
-          echo -e $SUMMARY
-          echo -e $SUMMARY >> output.txt
-
-      - name: Show result
-        run: |
-          cat output.txt
-
-      - name: Save output as artifact
-        uses: actions/upload-artifact@v3
-        with:
-          name: ${{ matrix.chain }}
-          path: |
-            output.txt
-
-      - name: Stop our local nodes
-        run: |
-          pkill polkadot-ref
-          pkill polkadot
diff --git a/polkadot/.github/workflows/release-30_publish-draft-release.yml b/polkadot/.github/workflows/release-30_publish-draft-release.yml
deleted file mode 100644
index 206b1871d80a4..0000000000000
--- a/polkadot/.github/workflows/release-30_publish-draft-release.yml
+++ /dev/null
@@ -1,199 +0,0 @@
-name: Release - Publish draft
-
-on:
-  push:
-    tags:
-      # Catches v1.2.3 and v1.2.3-rc1
-      - v[0-9]+.[0-9]+.[0-9]+*
-
-jobs:
-  get-rust-versions:
-    runs-on: ubuntu-latest
-    container:
-      image: paritytech/ci-linux:production
-    outputs:
-      rustc-stable: ${{ steps.get-rust-versions.outputs.stable }}
-      rustc-nightly: ${{ steps.get-rust-versions.outputs.nightly }}
-    steps:
-      - id: get-rust-versions
-        run: |
-          echo "stable=$(rustc +stable --version)" >> $GITHUB_OUTPUT
-          echo "nightly=$(rustc +nightly --version)" >> $GITHUB_OUTPUT
-
-  build-runtimes:
-    runs-on: ubuntu-latest
-    strategy:
-      matrix:
-        runtime: ["polkadot", "kusama", "westend", "rococo"]
-    steps:
-      - name: Checkout sources
-        uses: actions/checkout@v3
-
-      - name: Cache target dir
-        uses: actions/cache@v3
-        with:
-          path: "${{ github.workspace }}/runtime/${{ matrix.runtime }}/target"
-          key: srtool-target-${{ matrix.runtime }}-${{ github.sha }}
-          restore-keys: |
-            srtool-target-${{ matrix.runtime }}-
-            srtool-target-
-
-      - name: Build ${{ matrix.runtime }} runtime
-        id: srtool_build
-        uses: chevdor/srtool-actions@v0.8.0
-        with:
-          image: paritytech/srtool
-          chain: ${{ matrix.runtime }}
-
-      - name: Store srtool digest to disk
-        run: |
-          echo '${{ steps.srtool_build.outputs.json }}' | jq > ${{ matrix.runtime }}_srtool_output.json
-
-      - name: Upload ${{ matrix.runtime }} srtool json
-        uses: actions/upload-artifact@v3
-        with:
-          name: ${{ matrix.runtime }}-srtool-json
-          path: ${{ matrix.runtime }}_srtool_output.json
-
-      - name: Upload ${{ matrix.runtime }} runtime
-        uses: actions/upload-artifact@v3
-        with:
-          name: ${{ matrix.runtime }}-runtime
-          path: |
-            ${{ steps.srtool_build.outputs.wasm_compressed }}
-
-  publish-draft-release:
-    runs-on: ubuntu-latest
-    needs: ["get-rust-versions", "build-runtimes"]
-    outputs:
-      release_url: ${{ steps.create-release.outputs.html_url }}
-      asset_upload_url: ${{ steps.create-release.outputs.upload_url }}
-    steps:
-      - name: Checkout sources
-        uses: actions/checkout@v3
-        with:
-          fetch-depth: 0
-          path: polkadot
-
-      - name: Set up Ruby
-        uses: ruby/setup-ruby@v1
-        with:
-          ruby-version: 3.0.0
-
-      - name: Download srtool json output
-        uses: actions/download-artifact@v3
-
-      - name: Prepare tooling
-        run: |
-          cd polkadot/scripts/ci/changelog
-          gem install bundler changelogerator:0.9.1
-          bundle install
-          changelogerator --help
-
-          URL=https://github.com/chevdor/tera-cli/releases/download/v0.2.1/tera-cli_linux_amd64.deb
-          wget $URL -O tera.deb
-          sudo dpkg -i tera.deb
-          tera --version
-
-      - name: Generate release notes
-        env:
-          RUSTC_STABLE: ${{ needs.get-rust-versions.outputs.rustc-stable }}
-          RUSTC_NIGHTLY: ${{ needs.get-rust-versions.outputs.rustc-nightly }}
-          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
-          NO_CACHE: 1
-          DEBUG: 1
-          ROCOCO_DIGEST: ${{ github.workspace}}/rococo-srtool-json/rococo_srtool_output.json
-          WESTEND_DIGEST: ${{ github.workspace}}/westend-srtool-json/westend_srtool_output.json
-          KUSAMA_DIGEST: ${{ github.workspace}}/kusama-srtool-json/kusama_srtool_output.json
-          POLKADOT_DIGEST: ${{ github.workspace}}/polkadot-srtool-json/polkadot_srtool_output.json
-          PRE_RELEASE: ${{ github.event.inputs.pre_release }}
-        run: |
-          find ${{env.GITHUB_WORKSPACE}} -type f -name "*_srtool_output.json"
-          ls -al $ROCOCO_DIGEST
-          ls -al $WESTEND_DIGEST
-          ls -al $KUSAMA_DIGEST
-          ls -al $POLKADOT_DIGEST
-
-          cd polkadot/scripts/ci/changelog
-
-          ./bin/changelog ${GITHUB_REF}
-          ls -al release-notes.md
-          ls -al context.json
-
-      - name: Archive artifact context.json
-        uses: actions/upload-artifact@v3
-        with:
-          name: release-notes-context
-          path: |
-            polkadot/scripts/ci/changelog/context.json
-            **/*_srtool_output.json
-
-      - name: Create draft release
-        id: create-release
-        uses: actions/create-release@v1
-        env:
-          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
-        with:
-          tag_name: ${{ github.ref }}
-          release_name: Polkadot ${{ github.ref }}
-          body_path: ./polkadot/scripts/ci/changelog/release-notes.md
-          draft: true
-
-  publish-runtimes:
-    runs-on: ubuntu-latest
-    needs: ["publish-draft-release"]
-    env:
-      RUNTIME_DIR: runtime
-    strategy:
-      matrix:
-        runtime: ["polkadot", "kusama", "westend", "rococo"]
-    steps:
-      - name: Checkout sources
-        uses: actions/checkout@v3
-      - name: Download artifacts
-        uses: actions/download-artifact@v3
-      - name: Set up Ruby
-        uses: ruby/setup-ruby@v1
-        with:
-          ruby-version: 3.0.0
-      - name: Get runtime version
-        id: get-runtime-ver
-        run: |
-          echo "require './scripts/ci/github/lib.rb'" > script.rb
-          echo "puts get_runtime(runtime: \"${{ matrix.runtime }}\", runtime_dir: \"$RUNTIME_DIR\")" >> script.rb
-
-          echo "Current folder: $PWD"
-          ls "$RUNTIME_DIR/${{ matrix.runtime }}"
-          runtime_ver=$(ruby script.rb)
-          echo "Found version: >$runtime_ver<"
-          echo "runtime_ver=$runtime_ver" >> $GITHUB_OUTPUT
-
-      - name: Upload compressed ${{ matrix.runtime }} wasm
-        uses: actions/upload-release-asset@v1
-        env:
-          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
-        with:
-          upload_url: ${{ needs.publish-draft-release.outputs.asset_upload_url }}
-          asset_path: "${{ matrix.runtime }}-runtime/${{ matrix.runtime }}_runtime.compact.compressed.wasm"
-          asset_name: ${{ matrix.runtime }}_runtime-v${{ steps.get-runtime-ver.outputs.runtime_ver }}.compact.compressed.wasm
-          asset_content_type: application/wasm
-
-  post_to_matrix:
-    runs-on: ubuntu-latest
-    needs: publish-draft-release
-    strategy:
-      matrix:
-        channel:
-          - name: "RelEng: Polkadot Release Coordination"
-            room: '!cqAmzdIcbOFwrdrubV:parity.io'
-
-    steps:
-      - name: Send Matrix message to ${{ matrix.channel.name }}
-        uses: s3krit/matrix-message-action@70ad3fb812ee0e45ff8999d6af11cafad11a6ecf # v0.0.3
-        with:
-          room_id: ${{ matrix.channel.room }}
-          access_token: ${{ secrets.RELEASENOTES_MATRIX_V2_ACCESS_TOKEN }}
-          server: m.parity.io
-          message: |
-            **New version of polkadot tagged**: ${{ github.ref }}<br/>
-            Draft release created: ${{ needs.publish-draft-release.outputs.release_url }}
diff --git a/polkadot/.github/workflows/release-99_bot.yml b/polkadot/.github/workflows/release-99_bot.yml
deleted file mode 100644
index 5d45c0d44eda3..0000000000000
--- a/polkadot/.github/workflows/release-99_bot.yml
+++ /dev/null
@@ -1,49 +0,0 @@
-name: Release - Send new release notification to matrix channels
-on:
-  release:
-    types:
-      - published
-
-jobs:
-  ping_matrix:
-    strategy:
-      matrix:
-        channel:
-          - name: '#KusamaValidatorLounge:polkadot.builders'
-            room: '!LhjZccBOqFNYKLdmbb:polkadot.builders'
-            pre-releases: false
-          - name: '#kusama-announcements:matrix.parity.io'
-            room: '!FMwxpQnYhRCNDRsYGI:matrix.parity.io'
-            pre-release: false
-          - name: '#polkadotvalidatorlounge:web3.foundation'
-            room: '!NZrbtteFeqYKCUGQtr:matrix.parity.io'
-            pre-release: false
-          - name: '#polkadot-announcements:matrix.parity.io'
-            room: '!UqHPWiCBGZWxrmYBkF:matrix.parity.io'
-            pre-release: false
-          - name: "RelEng: Polkadot Release Coordination"
-            room: '!cqAmzdIcbOFwrdrubV:parity.io'
-            pre-release: true
-          - name: 'Ledger <> Polkadot Coordination'
-            room: '!EoIhaKfGPmFOBrNSHT:web3.foundation'
-            pre-release: true
-          - name: 'General: Rust, Polkadot, Substrate'
-            room: '!aJymqQYtCjjqImFLSb:parity.io'
-            pre-release: false
-          - name: 'Team: DevOps'
-            room: '!lUslSijLMgNcEKcAiE:parity.io'
-            pre-release: true
-
-    runs-on: ubuntu-latest
-    steps:
-      - name: Send Matrix message to ${{ matrix.channel.name }}
-        if: github.event.release.prerelease == false || matrix.channel.pre-release
-        uses: s3krit/matrix-message-action@70ad3fb812ee0e45ff8999d6af11cafad11a6ecf # v0.0.3
-        with:
-          room_id: ${{ matrix.channel.room }}
-          access_token: ${{ secrets.RELEASENOTES_MATRIX_V2_ACCESS_TOKEN }}
-          server: m.parity.io
-          message: |
-            ***Polkadot ${{github.event.release.tag_name}} has been released!***<br/>
-            ${{github.event.release.html_url}}<br/><br/>
-            ${{github.event.release.body}}<br/>
diff --git a/polkadot/.gitlab-ci.yml b/polkadot/.gitlab-ci.yml
deleted file mode 100644
index b2d91e61da94a..0000000000000
--- a/polkadot/.gitlab-ci.yml
+++ /dev/null
@@ -1,287 +0,0 @@
-# .gitlab-ci.yml
-#
-# polkadot
-#
-# Pipelines can be triggered manually in the web.
-#
-# Please do not add new jobs without "rules:" and "*-env". There are &test-refs for everything,
-# "docker-env" is used for Rust jobs.
-# And "kubernetes-env" for everything else. Please mention "image:" container name to be used
-# with it, as there's no default one.
-
-# All jobs are sorted according to their duration using DAG mechanism
-# Currently, test-linux-stable job is the longest one and other jobs are
-# sorted in order to complete during this job and occupy less runners in one
-# moment of time.
-
-stages:
-  - .pre
-  - weights
-  - check
-  - test
-  - build
-  - publish
-  - zombienet
-  - short-benchmarks
-
-workflow:
-  rules:
-    - if: $CI_COMMIT_TAG
-    - if: $CI_COMMIT_BRANCH
-
-variables:
-  GIT_STRATEGY: fetch
-  GIT_DEPTH: 100
-  CI_SERVER_NAME: "GitLab CI"
-  CI_IMAGE: !reference [.ci-unified, variables, CI_IMAGE]
-  BUILDAH_IMAGE: "quay.io/buildah/stable:v1.29"
-  BUILDAH_COMMAND: "buildah --storage-driver overlay2"
-  DOCKER_OS: "debian:stretch"
-  ARCH: "x86_64"
-  ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.65"
-
-default:
-  cache: {}
-  retry:
-    max: 2
-    when:
-      - runner_system_failure
-      - unknown_failure
-      - api_failure
-  interruptible: true
-
-.common-before-script:
-  before_script:
-    - !reference [.job-switcher, before_script]
-    - !reference [.timestamp, before_script]
-
-.collect-artifacts:
-  artifacts:
-    name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}"
-    when: on_success
-    expire_in: 7 days
-    paths:
-      - ./artifacts/
-
-.collect-artifacts-short:
-  artifacts:
-    name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}"
-    when: on_success
-    expire_in: 1 days
-    paths:
-      - ./artifacts/
-
-# collecting vars for pipeline stopper
-# they will be used if the job fails
-.pipeline-stopper-vars:
-  before_script:
-    - echo "FAILED_JOB_URL=${CI_JOB_URL}" > pipeline-stopper.env
-    - echo "FAILED_JOB_NAME=${CI_JOB_NAME}" >> pipeline-stopper.env
-    - echo "FAILED_JOB_NAME=${CI_JOB_NAME}" >> pipeline-stopper.env
-    - echo "PR_NUM=${CI_COMMIT_REF_NAME}" >> pipeline-stopper.env
-
-.pipeline-stopper-artifacts:
-  artifacts:
-    reports:
-      dotenv: pipeline-stopper.env
-
-.job-switcher:
-  before_script:
-    - if echo "$CI_DISABLED_JOBS" | grep -xF "$CI_JOB_NAME"; then echo "The job has been cancelled in CI settings"; exit 0; fi
-
-.kubernetes-env:
-  image: "${CI_IMAGE}"
-  before_script:
-    - !reference [.common-before-script, before_script]
-  tags:
-    - kubernetes-parity-build
-
-.docker-env:
-  image: "${CI_IMAGE}"
-  before_script:
-    - !reference [.common-before-script, before_script]
-  tags:
-    - linux-docker-vm-c2
-
-.compiler-info:
-  before_script:
-    - !reference [.common-before-script, before_script]
-    - rustup show
-    - cargo --version
-
-.test-refs:
-  rules:
-    - if: $CI_PIPELINE_SOURCE == "web"
-    - if: $CI_PIPELINE_SOURCE == "schedule"
-    - if: $CI_COMMIT_REF_NAME == "master"
-    - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
-
-.common-refs:
-  # these jobs run always*
-  rules:
-    - if: $CI_PIPELINE_SOURCE == "web"
-    - if: $CI_PIPELINE_SOURCE == "schedule"
-    - if: $CI_COMMIT_REF_NAME == "master"
-    - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
-    - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1
-    - if: $CI_COMMIT_REF_NAME =~ /^release-v[0-9]+\.[0-9]+.*$/ # i.e. release-v0.9.27
-
-.test-pr-refs:
-  rules:
-    - if: $CI_PIPELINE_SOURCE == "pipeline"
-      when: never
-    - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
-
-.zombienet-refs:
-  rules:
-    - if: $CI_PIPELINE_SOURCE == "pipeline"
-      when: never
-    - if: $CI_PIPELINE_SOURCE == "schedule"
-      when: never
-    - if: $CI_COMMIT_REF_NAME == "master"
-    - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
-
-.deploy-testnet-refs:
-  rules:
-    - if: $CI_PIPELINE_SOURCE == "pipeline"
-      when: never
-    - if: $CI_PIPELINE_SOURCE == "schedule"
-    - if: $CI_COMMIT_REF_NAME == "master"
-
-.publish-refs:
-  rules:
-    - if: $CI_PIPELINE_SOURCE == "pipeline"
-      when: never
-    - if: $CI_PIPELINE_SOURCE == "schedule"
-    - if: $CI_PIPELINE_SOURCE == "web" &&
-        $CI_COMMIT_REF_NAME == "master"
-    - if: $CI_COMMIT_REF_NAME == "master"
-    - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1
-
-.build-push-image:
-  variables:
-    CI_IMAGE: "${BUILDAH_IMAGE}"
-
-    REGISTRY: "docker.io"
-    DOCKER_OWNER: "paritypr"
-    DOCKER_USER: "${PARITYPR_USER}"
-    DOCKER_PASS: "${PARITYPR_PASS}"
-    IMAGE: "${REGISTRY}/${DOCKER_OWNER}/${IMAGE_NAME}"
-
-    ENGINE: "${BUILDAH_COMMAND}"
-    BUILDAH_FORMAT: "docker"
-    SKIP_IMAGE_VALIDATION: 1
-
-    PROJECT_ROOT: "."
-    BIN_FOLDER: "./artifacts"
-    VCS_REF: "${CI_COMMIT_SHA}"
-
-  before_script:
-    - !reference [.common-before-script, before_script]
-    - test -s ./artifacts/VERSION || exit 1
-    - test -s ./artifacts/EXTRATAG || exit 1
-    - export VERSION="$(cat ./artifacts/VERSION)"
-    - EXTRATAG="$(cat ./artifacts/EXTRATAG)"
-    - echo "Polkadot version = ${VERSION} (EXTRATAG = ${EXTRATAG})"
-  script:
-    - test "$DOCKER_USER" -a "$DOCKER_PASS" ||
-      ( echo "no docker credentials provided"; exit 1 )
-    - TAGS="${VERSION},${EXTRATAG}" scripts/ci/dockerfiles/build-injected.sh
-    - echo "$DOCKER_PASS" |
-      buildah login --username "$DOCKER_USER" --password-stdin "${REGISTRY}"
-    - $BUILDAH_COMMAND info
-    - $BUILDAH_COMMAND push --format=v2s2 "$IMAGE:$VERSION"
-    - $BUILDAH_COMMAND push --format=v2s2 "$IMAGE:$EXTRATAG"
-  after_script:
-    - buildah logout --all
-
-#### stage:                       .pre
-
-# By default our pipelines are interruptible, but some special pipelines shouldn't be interrupted:
-# * multi-project pipelines such as the ones triggered by the scripts repo
-#
-# In those cases, we add an uninterruptible .pre job; once that one has started,
-# the entire pipeline becomes uninterruptible.
-uninterruptible-pipeline:
-  extends: .kubernetes-env
-  variables:
-    CI_IMAGE: "paritytech/tools:latest"
-  stage: .pre
-  interruptible: false
-  rules:
-    - if: $CI_PIPELINE_SOURCE == "pipeline"
-  script: "true"
-
-include:
-  # weights jobs
-  - scripts/ci/gitlab/pipeline/weights.yml
-  # check jobs
-  - scripts/ci/gitlab/pipeline/check.yml
-  # test jobs
-  - scripts/ci/gitlab/pipeline/test.yml
-  # build jobs
-  - scripts/ci/gitlab/pipeline/build.yml
-  # short-benchmarks jobs
-  - scripts/ci/gitlab/pipeline/short-benchmarks.yml
-  # publish jobs
-  - scripts/ci/gitlab/pipeline/publish.yml
-  # zombienet jobs
-  - scripts/ci/gitlab/pipeline/zombienet.yml
-  # timestamp handler
-  - project: parity/infrastructure/ci_cd/shared
-    ref: main
-    file: /common/timestamp.yml
-  - project: parity/infrastructure/ci_cd/shared
-    ref: main
-    file: /common/ci-unified.yml
-
-
-#### stage:                        .post
-
-deploy-parity-testnet:
-  stage: .post
-  extends:
-    - .deploy-testnet-refs
-  variables:
-    POLKADOT_CI_COMMIT_NAME: "${CI_COMMIT_REF_NAME}"
-    POLKADOT_CI_COMMIT_REF: "${CI_COMMIT_SHORT_SHA}"
-  allow_failure: false
-  trigger: "parity/infrastructure/parity-testnet"
-
-# This job cancels the whole pipeline if any of provided jobs fail.
-# In a DAG, every jobs chain is executed independently of others. The `fail_fast` principle suggests
-# to fail the pipeline as soon as possible to shorten the feedback loop.
-.cancel-pipeline-template:
-  stage: .post
-  rules:
-    - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
-      when: on_failure
-  variables:
-    PROJECT_ID: "${CI_PROJECT_ID}"
-    PROJECT_NAME: "${CI_PROJECT_NAME}"
-    PIPELINE_ID: "${CI_PIPELINE_ID}"
-    FAILED_JOB_URL: "${FAILED_JOB_URL}"
-    FAILED_JOB_NAME: "${FAILED_JOB_NAME}"
-    PR_NUM: "${PR_NUM}"
-  trigger:
-    project: "parity/infrastructure/ci_cd/pipeline-stopper"
-    branch: "as-improve"
-
-remove-cancel-pipeline-message:
-  stage: .post
-  rules:
-    - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
-  variables:
-    PROJECT_ID: "${CI_PROJECT_ID}"
-    PROJECT_NAME: "${CI_PROJECT_NAME}"
-    PIPELINE_ID: "${CI_PIPELINE_ID}"
-    FAILED_JOB_URL: "https://gitlab.com"
-    FAILED_JOB_NAME: "nope"
-    PR_NUM: "${CI_COMMIT_REF_NAME}"
-  trigger:
-    project: "parity/infrastructure/ci_cd/pipeline-stopper"
-
-cancel-pipeline-test-linux-stable:
-  extends: .cancel-pipeline-template
-  needs:
-    - job: test-linux-stable
diff --git a/polkadot/cli/Cargo.toml b/polkadot/cli/Cargo.toml
index 4646fb1c58822..8057342aaea0c 100644
--- a/polkadot/cli/Cargo.toml
+++ b/polkadot/cli/Cargo.toml
@@ -15,7 +15,7 @@ wasm-opt = false
 crate-type = ["cdylib", "rlib"]
 
 [dependencies]
-clap = { version = "4.4.4", features = ["derive"], optional = true }
+clap = { version = "4.4.6", features = ["derive"], optional = true }
 log = "0.4.17"
 thiserror = "1.0.48"
 futures = "0.3.21"
diff --git a/polkadot/node/core/backing/src/tests/mod.rs b/polkadot/node/core/backing/src/tests/mod.rs
index 6debfc7e5b562..6d0cbc5353e46 100644
--- a/polkadot/node/core/backing/src/tests/mod.rs
+++ b/polkadot/node/core/backing/src/tests/mod.rs
@@ -1608,8 +1608,8 @@ fn retry_works() {
 				},
 				AllMessages::RuntimeApi(RuntimeApiMessage::Request(
 					_,
-					RuntimeApiRequest::SessionExecutorParams(sess_idx, tx),
-				)) if sess_idx == 1 => {
+					RuntimeApiRequest::SessionExecutorParams(1, tx),
+				)) => {
 					tx.send(Ok(Some(ExecutorParams::default()))).unwrap();
 				},
 				msg => {
diff --git a/polkadot/node/core/pvf/common/Cargo.toml b/polkadot/node/core/pvf/common/Cargo.toml
index 0f7308396d809..4bdacca72f420 100644
--- a/polkadot/node/core/pvf/common/Cargo.toml
+++ b/polkadot/node/core/pvf/common/Cargo.toml
@@ -29,7 +29,7 @@ sp-io = { path = "../../../../../substrate/primitives/io" }
 sp-tracing = { path = "../../../../../substrate/primitives/tracing" }
 
 [target.'cfg(target_os = "linux")'.dependencies]
-landlock = "0.2.0"
+landlock = "0.3.0"
 
 [dev-dependencies]
 assert_matches = "1.4.0"
diff --git a/polkadot/node/core/pvf/common/src/worker/mod.rs b/polkadot/node/core/pvf/common/src/worker/mod.rs
index 59973f6cbbc64..d0bd5b6bd7c56 100644
--- a/polkadot/node/core/pvf/common/src/worker/mod.rs
+++ b/polkadot/node/core/pvf/common/src/worker/mod.rs
@@ -18,7 +18,7 @@
 
 pub mod security;
 
-use crate::{worker_dir, SecurityStatus, LOG_TARGET};
+use crate::{SecurityStatus, LOG_TARGET};
 use cpu_time::ProcessTime;
 use futures::never::Never;
 use std::{
@@ -115,6 +115,7 @@ macro_rules! decl_worker_main {
 				},
 			}
 
+			let mut socket_path = None;
 			let mut worker_dir_path = None;
 			let mut node_version = None;
 			let mut can_enable_landlock = false;
@@ -123,6 +124,10 @@ macro_rules! decl_worker_main {
 			let mut i = 2;
 			while i < args.len() {
 				match args[i].as_ref() {
+					"--socket-path" => {
+						socket_path = Some(args[i + 1].as_str());
+						i += 1
+					},
 					"--worker-dir-path" => {
 						worker_dir_path = Some(args[i + 1].as_str());
 						i += 1
@@ -138,16 +143,24 @@ macro_rules! decl_worker_main {
 				}
 				i += 1;
 			}
+			let socket_path = socket_path.expect("the --socket-path argument is required");
 			let worker_dir_path =
 				worker_dir_path.expect("the --worker-dir-path argument is required");
 
+			let socket_path = std::path::Path::new(socket_path).to_owned();
 			let worker_dir_path = std::path::Path::new(worker_dir_path).to_owned();
 			let security_status = $crate::SecurityStatus {
 				can_enable_landlock,
 				can_unshare_user_namespace_and_change_root,
 			};
 
-			$entrypoint(worker_dir_path, node_version, Some($worker_version), security_status);
+			$entrypoint(
+				socket_path,
+				worker_dir_path,
+				node_version,
+				Some($worker_version),
+				security_status,
+			);
 		}
 	};
 }
@@ -177,6 +190,7 @@ impl fmt::Display for WorkerKind {
 // the version that this crate was compiled with.
 pub fn worker_event_loop<F, Fut>(
 	worker_kind: WorkerKind,
+	socket_path: PathBuf,
 	#[cfg_attr(not(target_os = "linux"), allow(unused_mut))] mut worker_dir_path: PathBuf,
 	node_version: Option<&str>,
 	worker_version: Option<&str>,
@@ -190,6 +204,7 @@ pub fn worker_event_loop<F, Fut>(
 	gum::debug!(
 		target: LOG_TARGET,
 		%worker_pid,
+		?socket_path,
 		?worker_dir_path,
 		?security_status,
 		"starting pvf worker ({})",
@@ -237,12 +252,9 @@ pub fn worker_event_loop<F, Fut>(
 	}
 
 	// Connect to the socket.
-	let socket_path = worker_dir::socket(&worker_dir_path);
 	let stream = || -> std::io::Result<UnixStream> {
 		let stream = UnixStream::connect(&socket_path)?;
-		// Remove the socket here. We don't also need to do this on the host-side; on failed
-		// rendezvous, the host will delete the whole worker dir.
-		std::fs::remove_file(&socket_path)?;
+		let _ = std::fs::remove_file(&socket_path);
 		Ok(stream)
 	}();
 	let stream = match stream {
diff --git a/polkadot/node/core/pvf/common/src/worker/security.rs b/polkadot/node/core/pvf/common/src/worker/security.rs
index b7abf028f9410..1b76141774485 100644
--- a/polkadot/node/core/pvf/common/src/worker/security.rs
+++ b/polkadot/node/core/pvf/common/src/worker/security.rs
@@ -223,13 +223,22 @@ pub mod landlock {
 	/// Landlock ABI version. We use ABI V1 because:
 	///
 	/// 1. It is supported by our reference kernel version.
-	/// 2. Later versions do not (yet) provide additional security.
+	/// 2. Later versions do not (yet) provide additional security that would benefit us.
 	///
-	/// # Versions (as of June 2023)
+	/// # Versions (as of October 2023)
 	///
 	/// - Polkadot reference kernel version: 5.16+
-	/// - ABI V1: 5.13 - introduces	landlock, including full restrictions on file reads
-	/// - ABI V2: 5.19 - adds ability to configure file renaming (not used by us)
+	///
+	/// - ABI V1: kernel 5.13 - Introduces landlock, including full restrictions on file reads.
+	///
+	/// - ABI V2: kernel 5.19 - Adds ability to prevent file renaming. Does not help us. During
+	///   execution an attacker can only affect the name of a symlinked artifact and not the
+	///   original one.
+	///
+	/// - ABI V3: kernel 6.2 - Adds ability to prevent file truncation. During execution, can
+	///   prevent attackers from affecting a symlinked artifact. We don't strictly need this as we
+	///   plan to check for file integrity anyway; see
+	///   <https://github.com/paritytech/polkadot-sdk/issues/677>.
 	///
 	/// # Determinism
 	///
@@ -335,7 +344,7 @@ pub mod landlock {
 		A: Into<BitFlags<AccessFs>>,
 	{
 		let mut ruleset =
-			Ruleset::new().handle_access(AccessFs::from_all(LANDLOCK_ABI))?.create()?;
+			Ruleset::default().handle_access(AccessFs::from_all(LANDLOCK_ABI))?.create()?;
 		for (fs_path, access_bits) in fs_exceptions {
 			let paths = &[fs_path.as_ref().to_owned()];
 			let mut rules = path_beneath_rules(paths, access_bits).peekable();
@@ -466,5 +475,38 @@ pub mod landlock {
 
 			assert!(handle.join().is_ok());
 		}
+
+		// Test that checks whether landlock under our ABI version is able to truncate files.
+		#[test]
+		fn restricted_thread_can_truncate_file() {
+			// TODO: This would be nice: <https://github.com/rust-lang/rust/issues/68007>.
+			if !check_is_fully_enabled() {
+				return
+			}
+
+			// Restricted thread can truncate file.
+			let handle =
+				thread::spawn(|| {
+					// Create and write a file. This should succeed before any landlock
+					// restrictions are applied.
+					const TEXT: &str = "foo";
+					let tmpfile = tempfile::NamedTempFile::new().unwrap();
+					let path = tmpfile.path();
+
+					fs::write(path, TEXT).unwrap();
+
+					// Apply Landlock with all exceptions under the current ABI.
+					let status = try_restrict(vec![(path, AccessFs::from_all(LANDLOCK_ABI))]);
+					if !matches!(status, Ok(RulesetStatus::FullyEnforced)) {
+						panic!("Ruleset should be enforced since we checked if landlock is enabled: {:?}", status);
+					}
+
+					// Try to truncate the file.
+					let result = tmpfile.as_file().set_len(0);
+					assert!(result.is_ok());
+				});
+
+			assert!(handle.join().is_ok());
+		}
 	}
 }
diff --git a/polkadot/node/core/pvf/common/src/worker_dir.rs b/polkadot/node/core/pvf/common/src/worker_dir.rs
index c2610a4d11285..1cdf43a61e48b 100644
--- a/polkadot/node/core/pvf/common/src/worker_dir.rs
+++ b/polkadot/node/core/pvf/common/src/worker_dir.rs
@@ -20,7 +20,6 @@ use std::path::{Path, PathBuf};
 
 const WORKER_EXECUTE_ARTIFACT_NAME: &str = "artifact";
 const WORKER_PREPARE_TMP_ARTIFACT_NAME: &str = "tmp-artifact";
-const WORKER_SOCKET_NAME: &str = "socket";
 
 pub fn execute_artifact(worker_dir_path: &Path) -> PathBuf {
 	worker_dir_path.join(WORKER_EXECUTE_ARTIFACT_NAME)
@@ -29,7 +28,3 @@ pub fn execute_artifact(worker_dir_path: &Path) -> PathBuf {
 pub fn prepare_tmp_artifact(worker_dir_path: &Path) -> PathBuf {
 	worker_dir_path.join(WORKER_PREPARE_TMP_ARTIFACT_NAME)
 }
-
-pub fn socket(worker_dir_path: &Path) -> PathBuf {
-	worker_dir_path.join(WORKER_SOCKET_NAME)
-}
diff --git a/polkadot/node/core/pvf/execute-worker/src/lib.rs b/polkadot/node/core/pvf/execute-worker/src/lib.rs
index 9d7bfdf286699..02eaedb96f28e 100644
--- a/polkadot/node/core/pvf/execute-worker/src/lib.rs
+++ b/polkadot/node/core/pvf/execute-worker/src/lib.rs
@@ -111,6 +111,8 @@ fn send_response(stream: &mut UnixStream, response: Response) -> io::Result<()>
 ///
 /// # Parameters
 ///
+/// - `socket_path`: specifies the path to the socket used to communicate with the host.
+///
 /// - `worker_dir_path`: specifies the path to the worker-specific temporary directory.
 ///
 /// - `node_version`: if `Some`, is checked against the `worker_version`. A mismatch results in
@@ -121,6 +123,7 @@ fn send_response(stream: &mut UnixStream, response: Response) -> io::Result<()>
 ///
 /// - `security_status`: contains the detected status of security features.
 pub fn worker_entrypoint(
+	socket_path: PathBuf,
 	worker_dir_path: PathBuf,
 	node_version: Option<&str>,
 	worker_version: Option<&str>,
@@ -128,6 +131,7 @@ pub fn worker_entrypoint(
 ) {
 	worker_event_loop(
 		WorkerKind::Execute,
+		socket_path,
 		worker_dir_path,
 		node_version,
 		worker_version,
diff --git a/polkadot/node/core/pvf/prepare-worker/src/lib.rs b/polkadot/node/core/pvf/prepare-worker/src/lib.rs
index a24f5024722bb..fcc7f6754a7e2 100644
--- a/polkadot/node/core/pvf/prepare-worker/src/lib.rs
+++ b/polkadot/node/core/pvf/prepare-worker/src/lib.rs
@@ -87,6 +87,8 @@ fn send_response(stream: &mut UnixStream, result: PrepareResult) -> io::Result<(
 ///
 /// # Parameters
 ///
+/// - `socket_path`: specifies the path to the socket used to communicate with the host.
+///
 /// - `worker_dir_path`: specifies the path to the worker-specific temporary directory.
 ///
 /// - `node_version`: if `Some`, is checked against the `worker_version`. A mismatch results in
@@ -116,6 +118,7 @@ fn send_response(stream: &mut UnixStream, result: PrepareResult) -> io::Result<(
 /// 7. Send the result of preparation back to the host. If any error occurred in the above steps, we
 ///    send that in the `PrepareResult`.
 pub fn worker_entrypoint(
+	socket_path: PathBuf,
 	worker_dir_path: PathBuf,
 	node_version: Option<&str>,
 	worker_version: Option<&str>,
@@ -123,6 +126,7 @@ pub fn worker_entrypoint(
 ) {
 	worker_event_loop(
 		WorkerKind::Prepare,
+		socket_path,
 		worker_dir_path,
 		node_version,
 		worker_version,
diff --git a/polkadot/node/core/pvf/src/worker_intf.rs b/polkadot/node/core/pvf/src/worker_intf.rs
index 9825506ba88f6..bd85d84055ce5 100644
--- a/polkadot/node/core/pvf/src/worker_intf.rs
+++ b/polkadot/node/core/pvf/src/worker_intf.rs
@@ -20,7 +20,7 @@ use crate::LOG_TARGET;
 use futures::FutureExt as _;
 use futures_timer::Delay;
 use pin_project::pin_project;
-use polkadot_node_core_pvf_common::{worker_dir, SecurityStatus};
+use polkadot_node_core_pvf_common::SecurityStatus;
 use rand::Rng;
 use std::{
 	fmt, mem,
@@ -67,71 +67,99 @@ pub async fn spawn_with_program_path(
 ) -> Result<(IdleWorker, WorkerHandle), SpawnErr> {
 	let program_path = program_path.into();
 	let worker_dir = WorkerDir::new(debug_id, cache_path).await?;
-	let socket_path = worker_dir::socket(&worker_dir.path);
-
 	let extra_args: Vec<String> = extra_args.iter().map(|arg| arg.to_string()).collect();
 
-	let listener = UnixListener::bind(&socket_path).map_err(|err| {
-		gum::warn!(
-			target: LOG_TARGET,
-			%debug_id,
-			?program_path,
-			?extra_args,
-			?worker_dir,
-			?socket_path,
-			"cannot bind unix socket: {:?}",
-			err,
-		);
-		SpawnErr::Bind
-	})?;
-
-	let handle = WorkerHandle::spawn(&program_path, &extra_args, &worker_dir.path, security_status)
-		.map_err(|err| {
-			gum::warn!(
-				target: LOG_TARGET,
-				%debug_id,
-				?program_path,
-				?extra_args,
-				?worker_dir.path,
-				?socket_path,
-				"cannot spawn a worker: {:?}",
-				err,
-			);
-			SpawnErr::ProcessSpawn
-		})?;
-
-	let worker_dir_path = worker_dir.path.clone();
-	futures::select! {
-		accept_result = listener.accept().fuse() => {
-			let (stream, _) = accept_result.map_err(|err| {
+	with_transient_socket_path(debug_id, |socket_path| {
+		let socket_path = socket_path.to_owned();
+
+		async move {
+			let listener = UnixListener::bind(&socket_path).map_err(|err| {
 				gum::warn!(
 					target: LOG_TARGET,
 					%debug_id,
 					?program_path,
 					?extra_args,
-					?worker_dir_path,
+					?worker_dir,
 					?socket_path,
-					"cannot accept a worker: {:?}",
+					"cannot bind unix socket: {:?}",
 					err,
 				);
-				SpawnErr::Accept
+				SpawnErr::Bind
 			})?;
-			Ok((IdleWorker { stream, pid: handle.id(), worker_dir }, handle))
-		}
-		_ = Delay::new(spawn_timeout).fuse() => {
-			gum::warn!(
-				target: LOG_TARGET,
-				%debug_id,
-				?program_path,
-				?extra_args,
-				?worker_dir_path,
-				?socket_path,
-				?spawn_timeout,
-				"spawning and connecting to socket timed out",
-			);
-			Err(SpawnErr::AcceptTimeout)
+
+			let handle = WorkerHandle::spawn(
+				&program_path,
+				&extra_args,
+				&socket_path,
+				&worker_dir.path,
+				security_status,
+			)
+			.map_err(|err| {
+				gum::warn!(
+					target: LOG_TARGET,
+					%debug_id,
+					?program_path,
+					?extra_args,
+					?worker_dir.path,
+					?socket_path,
+					"cannot spawn a worker: {:?}",
+					err,
+				);
+				SpawnErr::ProcessSpawn
+			})?;
+
+			let worker_dir_path = worker_dir.path.clone();
+			futures::select! {
+				accept_result = listener.accept().fuse() => {
+					let (stream, _) = accept_result.map_err(|err| {
+						gum::warn!(
+							target: LOG_TARGET,
+							%debug_id,
+							?program_path,
+							?extra_args,
+							?worker_dir_path,
+							?socket_path,
+							"cannot accept a worker: {:?}",
+							err,
+						);
+						SpawnErr::Accept
+					})?;
+					Ok((IdleWorker { stream, pid: handle.id(), worker_dir }, handle))
+				}
+				_ = Delay::new(spawn_timeout).fuse() => {
+					gum::warn!(
+						target: LOG_TARGET,
+						%debug_id,
+						?program_path,
+						?extra_args,
+						?worker_dir_path,
+						?socket_path,
+						?spawn_timeout,
+						"spawning and connecting to socket timed out",
+					);
+					Err(SpawnErr::AcceptTimeout)
+				}
+			}
 		}
-	}
+	})
+	.await
+}
+
+async fn with_transient_socket_path<T, F, Fut>(debug_id: &'static str, f: F) -> Result<T, SpawnErr>
+where
+	F: FnOnce(&Path) -> Fut,
+	Fut: futures::Future<Output = Result<T, SpawnErr>> + 'static,
+{
+	let socket_path = tmppath(&format!("pvf-host-{}", debug_id))
+		.await
+		.map_err(|_| SpawnErr::TmpPath)?;
+	let result = f(&socket_path).await;
+
+	// Best effort to remove the socket file. Under normal circumstances the socket will be removed
+	// by the worker. We make sure that it is removed here, just in case a failed rendezvous.
+	let _ = tokio::fs::remove_file(socket_path).await;
+
+	result
 }
 
 /// Returns a path under the given `dir`. The path name will start with the given prefix.
@@ -169,7 +197,6 @@ pub async fn tmppath_in(prefix: &str, dir: &Path) -> io::Result<PathBuf> {
 }
 
 /// The same as [`tmppath_in`], but uses [`std::env::temp_dir`] as the directory.
-#[cfg(test)]
 pub async fn tmppath(prefix: &str) -> io::Result<PathBuf> {
 	let temp_dir = PathBuf::from(std::env::temp_dir());
 	tmppath_in(prefix, &temp_dir).await
@@ -234,6 +261,7 @@ impl WorkerHandle {
 	fn spawn(
 		program: impl AsRef<Path>,
 		extra_args: &[String],
+		socket_path: impl AsRef<Path>,
 		worker_dir_path: impl AsRef<Path>,
 		security_status: SecurityStatus,
 	) -> io::Result<Self> {
@@ -257,6 +285,8 @@ impl WorkerHandle {
 		}
 		let mut child = command
 			.args(extra_args)
+			.arg("--socket-path")
+			.arg(socket_path.as_ref().as_os_str())
 			.arg("--worker-dir-path")
 			.arg(worker_dir_path.as_ref().as_os_str())
 			.args(&security_args)
diff --git a/polkadot/node/gum/proc-macro/Cargo.toml b/polkadot/node/gum/proc-macro/Cargo.toml
index 83d064cadbed3..1ffaf6160ba2b 100644
--- a/polkadot/node/gum/proc-macro/Cargo.toml
+++ b/polkadot/node/gum/proc-macro/Cargo.toml
@@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"]
 proc-macro = true
 
 [dependencies]
-syn = { version = "2.0.37", features = ["full", "extra-traits"] }
+syn = { version = "2.0.38", features = ["full", "extra-traits"] }
 quote = "1.0.28"
 proc-macro2 = "1.0.56"
 proc-macro-crate = "1.1.3"
diff --git a/polkadot/node/malus/Cargo.toml b/polkadot/node/malus/Cargo.toml
index 42dd4af73c130..9ce725f168221 100644
--- a/polkadot/node/malus/Cargo.toml
+++ b/polkadot/node/malus/Cargo.toml
@@ -40,7 +40,7 @@ assert_matches = "1.5"
 async-trait = "0.1.57"
 sp-keystore = { path = "../../../substrate/primitives/keystore" }
 sp-core = { path = "../../../substrate/primitives/core" }
-clap = { version = "4.4.4", features = ["derive"] }
+clap = { version = "4.4.6", features = ["derive"] }
 futures = "0.3.21"
 futures-timer = "3.0.2"
 gum = { package = "tracing-gum", path = "../gum" }
diff --git a/polkadot/node/overseer/examples/minimal-example.rs b/polkadot/node/overseer/examples/minimal-example.rs
index e78941776d5ec..cffdfd9f8aa14 100644
--- a/polkadot/node/overseer/examples/minimal-example.rs
+++ b/polkadot/node/overseer/examples/minimal-example.rs
@@ -163,7 +163,6 @@ fn main() {
 			.unwrap();
 
 		let overseer_fut = overseer.run().fuse();
-		let timer_stream = timer_stream;
 
 		pin_mut!(timer_stream);
 		pin_mut!(overseer_fut);
diff --git a/polkadot/node/service/chain-specs/rococo.json b/polkadot/node/service/chain-specs/rococo.json
index 43dc959b57677..2648063641c91 100644
--- a/polkadot/node/service/chain-specs/rococo.json
+++ b/polkadot/node/service/chain-specs/rococo.json
@@ -3,14 +3,22 @@
   "id": "rococo_v2_2",
   "chainType": "Live",
   "bootNodes": [
-    "/dns/rococo-bootnode-0.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWGikJMBmRiG5ofCqn8aijCijgfmZR5H9f53yUF3srm6Nm",
-    "/dns/rococo-bootnode-1.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWLDfH9mHRCidrd5NfQjp7rRMUcJSEUwSvEKyu7xU2cG3d",
-    "/dns/rococo-bootnode-2.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWSikgbrcWjVgSed7r1uXk4TeAieDnHKtrPDVZBu5XkQha",
-    "/dns/rococo-bootnode-3.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWPeKuW1BBPv4pNr8xqEv7jqy7rQnS3oq9U7xTCvj9qt2k",
-    "/dns/rococo-bootnode-4.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWNy7K8TNaP2Whcp3tsjBVUg2HcKMUvAArsimjvd1g31w4",
-    "/dns/rococo-bootnode-5.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWAVV9DZfvJp2brvs5zcQDTBFxNmEFJKy2dsvezWL4Bmy8",
-    "/dns/rococo-bootnode-6.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWM3hvXvaShyp7drQCavFHuwobkYdnCp2uHU5iRRAQwsw2",
-    "/dns/rococo-bootnode-7.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWSbGtxfWCwn1tdmfZYESbmxzbTG2LKwKUrioDaZBcdMY4",
+    "/dns/rococo-bootnode-0.polkadot.io/tcp/30333/p2p/12D3KooWGikJMBmRiG5ofCqn8aijCijgfmZR5H9f53yUF3srm6Nm",
+    "/dns/rococo-bootnode-1.polkadot.io/tcp/30333/p2p/12D3KooWLDfH9mHRCidrd5NfQjp7rRMUcJSEUwSvEKyu7xU2cG3d",
+    "/dns/rococo-bootnode-2.polkadot.io/tcp/30333/p2p/12D3KooWSikgbrcWjVgSed7r1uXk4TeAieDnHKtrPDVZBu5XkQha",
+    "/dns/rococo-bootnode-3.polkadot.io/tcp/30333/p2p/12D3KooWPeKuW1BBPv4pNr8xqEv7jqy7rQnS3oq9U7xTCvj9qt2k",
+    "/dns/rococo-bootnode-4.polkadot.io/tcp/30333/p2p/12D3KooWNy7K8TNaP2Whcp3tsjBVUg2HcKMUvAArsimjvd1g31w4",
+    "/dns/rococo-bootnode-5.polkadot.io/tcp/30333/p2p/12D3KooWAVV9DZfvJp2brvs5zcQDTBFxNmEFJKy2dsvezWL4Bmy8",
+    "/dns/rococo-bootnode-6.polkadot.io/tcp/30333/p2p/12D3KooWM3hvXvaShyp7drQCavFHuwobkYdnCp2uHU5iRRAQwsw2",
+    "/dns/rococo-bootnode-7.polkadot.io/tcp/30333/p2p/12D3KooWSbGtxfWCwn1tdmfZYESbmxzbTG2LKwKUrioDaZBcdMY4",
+    "/dns/rococo-bootnode-0.polkadot.io/tcp/30334/ws/p2p/12D3KooWGikJMBmRiG5ofCqn8aijCijgfmZR5H9f53yUF3srm6Nm",
+    "/dns/rococo-bootnode-1.polkadot.io/tcp/30334/ws/p2p/12D3KooWLDfH9mHRCidrd5NfQjp7rRMUcJSEUwSvEKyu7xU2cG3d",
+    "/dns/rococo-bootnode-2.polkadot.io/tcp/30334/ws/p2p/12D3KooWSikgbrcWjVgSed7r1uXk4TeAieDnHKtrPDVZBu5XkQha",
+    "/dns/rococo-bootnode-3.polkadot.io/tcp/30334/ws/p2p/12D3KooWPeKuW1BBPv4pNr8xqEv7jqy7rQnS3oq9U7xTCvj9qt2k",
+    "/dns/rococo-bootnode-4.polkadot.io/tcp/30334/ws/p2p/12D3KooWNy7K8TNaP2Whcp3tsjBVUg2HcKMUvAArsimjvd1g31w4",
+    "/dns/rococo-bootnode-5.polkadot.io/tcp/30334/ws/p2p/12D3KooWAVV9DZfvJp2brvs5zcQDTBFxNmEFJKy2dsvezWL4Bmy8",
+    "/dns/rococo-bootnode-6.polkadot.io/tcp/30334/ws/p2p/12D3KooWM3hvXvaShyp7drQCavFHuwobkYdnCp2uHU5iRRAQwsw2",
+    "/dns/rococo-bootnode-7.polkadot.io/tcp/30334/ws/p2p/12D3KooWSbGtxfWCwn1tdmfZYESbmxzbTG2LKwKUrioDaZBcdMY4",
     "/dns/rococo-bootnode-0.polkadot.io/tcp/443/wss/p2p/12D3KooWGikJMBmRiG5ofCqn8aijCijgfmZR5H9f53yUF3srm6Nm",
     "/dns/rococo-bootnode-1.polkadot.io/tcp/443/wss/p2p/12D3KooWLDfH9mHRCidrd5NfQjp7rRMUcJSEUwSvEKyu7xU2cG3d",
     "/dns/rococo-bootnode-2.polkadot.io/tcp/443/wss/p2p/12D3KooWSikgbrcWjVgSed7r1uXk4TeAieDnHKtrPDVZBu5XkQha",
diff --git a/polkadot/node/service/chain-specs/westend.json b/polkadot/node/service/chain-specs/westend.json
index e57786f78a641..fd1f4550127fc 100644
--- a/polkadot/node/service/chain-specs/westend.json
+++ b/polkadot/node/service/chain-specs/westend.json
@@ -2,16 +2,18 @@
   "name": "Westend",
   "id": "westend2",
   "bootNodes": [
-    "/dns/0.westend.paritytech.net/tcp/30333/p2p/12D3KooWKer94o1REDPtAhjtYR4SdLehnSrN8PEhBnZm5NBoCrMC",
-    "/dns/0.westend.paritytech.net/tcp/30334/ws/p2p/12D3KooWKer94o1REDPtAhjtYR4SdLehnSrN8PEhBnZm5NBoCrMC",
-    "/dns/1.westend.paritytech.net/tcp/30333/p2p/12D3KooWPVPzs42GvRBShdUMtFsk4SvnByrSdWqb6aeAAHvLMSLS",
-    "/dns/1.westend.paritytech.net/tcp/30334/ws/p2p/12D3KooWPVPzs42GvRBShdUMtFsk4SvnByrSdWqb6aeAAHvLMSLS",
-    "/dns/2.westend.paritytech.net/tcp/30333/p2p/12D3KooWByVpK92hMi9CzTjyFg9cPHDU5ariTM3EPMq9vdh5S5Po",
-    "/dns/2.westend.paritytech.net/tcp/30334/ws/p2p/12D3KooWByVpK92hMi9CzTjyFg9cPHDU5ariTM3EPMq9vdh5S5Po",
-    "/dns/3.westend.paritytech.net/tcp/30333/p2p/12D3KooWGi1tCpKXLMYED9y28QXLnwgD4neYb1Arqq4QpeV1Sv3K",
-    "/dns/3.westend.paritytech.net/tcp/30334/ws/p2p/12D3KooWGi1tCpKXLMYED9y28QXLnwgD4neYb1Arqq4QpeV1Sv3K",
-    "/dns/westend-connect-0.polkadot.io/tcp/443/wss/p2p/12D3KooWNg8iUqhux7X7voNU9Nty5pzehrFJwkQwg1CJnqN3CTzE",
-    "/dns/westend-connect-1.polkadot.io/tcp/443/wss/p2p/12D3KooWAq2A7UNFS6725XFatD5QW7iYBezTLdAUx1SmRkxN79Ne",
+    "/dns/westend-bootnode-0.polkadot.io/tcp/30333/p2p/12D3KooWKer94o1REDPtAhjtYR4SdLehnSrN8PEhBnZm5NBoCrMC",
+    "/dns/westend-bootnode-0.polkadot.io/tcp/30334/ws/p2p/12D3KooWKer94o1REDPtAhjtYR4SdLehnSrN8PEhBnZm5NBoCrMC",
+    "/dns/westend-bootnode-0.polkadot.io/tcp/443/wss/p2p/12D3KooWKer94o1REDPtAhjtYR4SdLehnSrN8PEhBnZm5NBoCrMC",
+    "/dns/westend-bootnode-1.polkadot.io/tcp/30333/p2p/12D3KooWPVPzs42GvRBShdUMtFsk4SvnByrSdWqb6aeAAHvLMSLS",
+    "/dns/westend-bootnode-1.polkadot.io/tcp/30334/ws/p2p/12D3KooWPVPzs42GvRBShdUMtFsk4SvnByrSdWqb6aeAAHvLMSLS",
+    "/dns/westend-bootnode-1.polkadot.io/tcp/443/wss/p2p/12D3KooWPVPzs42GvRBShdUMtFsk4SvnByrSdWqb6aeAAHvLMSLS",
+    "/dns/westend-bootnode-2.polkadot.io/tcp/30333/p2p/12D3KooWByVpK92hMi9CzTjyFg9cPHDU5ariTM3EPMq9vdh5S5Po",
+    "/dns/westend-bootnode-2.polkadot.io/tcp/30334/ws/p2p/12D3KooWByVpK92hMi9CzTjyFg9cPHDU5ariTM3EPMq9vdh5S5Po",
+    "/dns/westend-bootnode-2.polkadot.io/tcp/443/wss/p2p/12D3KooWByVpK92hMi9CzTjyFg9cPHDU5ariTM3EPMq9vdh5S5Po",
+    "/dns/westend-bootnode-3.polkadot.io/tcp/30333/p2p/12D3KooWGi1tCpKXLMYED9y28QXLnwgD4neYb1Arqq4QpeV1Sv3K",
+    "/dns/westend-bootnode-3.polkadot.io/tcp/30334/ws/p2p/12D3KooWGi1tCpKXLMYED9y28QXLnwgD4neYb1Arqq4QpeV1Sv3K",
+    "/dns/westend-bootnode-3.polkadot.io/tcp/443/wss/p2p/12D3KooWGi1tCpKXLMYED9y28QXLnwgD4neYb1Arqq4QpeV1Sv3K",
     "/dns/boot.stake.plus/tcp/32333/p2p/12D3KooWK8fjVoSvMq5copQYMsdYreSGPGgcMbGMgbMDPfpf3sm7",
     "/dns/boot.stake.plus/tcp/32334/wss/p2p/12D3KooWK8fjVoSvMq5copQYMsdYreSGPGgcMbGMgbMDPfpf3sm7",
     "/dns/boot-node.helikon.io/tcp/7080/p2p/12D3KooWRFDPyT8vA8mLzh6dJoyujn4QNjeqi6Ch79eSMz9beKXC",
diff --git a/polkadot/parachain/test-parachains/adder/collator/Cargo.toml b/polkadot/parachain/test-parachains/adder/collator/Cargo.toml
index 73b1fab529ef4..70f2ae769a8f4 100644
--- a/polkadot/parachain/test-parachains/adder/collator/Cargo.toml
+++ b/polkadot/parachain/test-parachains/adder/collator/Cargo.toml
@@ -13,7 +13,7 @@ path = "src/main.rs"
 
 [dependencies]
 parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] }
-clap = { version = "4.4.4", features = ["derive"] }
+clap = { version = "4.4.6", features = ["derive"] }
 futures = "0.3.21"
 futures-timer = "3.0.2"
 log = "0.4.17"
diff --git a/polkadot/parachain/test-parachains/undying/collator/Cargo.toml b/polkadot/parachain/test-parachains/undying/collator/Cargo.toml
index 3fbed4046bded..4569d4e153b19 100644
--- a/polkadot/parachain/test-parachains/undying/collator/Cargo.toml
+++ b/polkadot/parachain/test-parachains/undying/collator/Cargo.toml
@@ -13,7 +13,7 @@ path = "src/main.rs"
 
 [dependencies]
 parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] }
-clap = { version = "4.4.4", features = ["derive"] }
+clap = { version = "4.4.6", features = ["derive"] }
 futures = "0.3.21"
 futures-timer = "3.0.2"
 log = "0.4.17"
diff --git a/polkadot/runtime/common/Cargo.toml b/polkadot/runtime/common/Cargo.toml
index 17617bf4ada3f..2d1aad6a575e7 100644
--- a/polkadot/runtime/common/Cargo.toml
+++ b/polkadot/runtime/common/Cargo.toml
@@ -38,6 +38,7 @@ pallet-timestamp = { path = "../../../substrate/frame/timestamp", default-featur
 pallet-vesting = { path = "../../../substrate/frame/vesting", default-features = false }
 pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment", default-features = false }
 pallet-treasury = { path = "../../../substrate/frame/treasury", default-features = false }
+pallet-asset-rate = { path = "../../../substrate/frame/asset-rate", default-features = false }
 pallet-election-provider-multi-phase = { path = "../../../substrate/frame/election-provider-multi-phase", default-features = false }
 frame-election-provider-support = { path = "../../../substrate/frame/election-provider-support", default-features = false }
 
@@ -50,6 +51,7 @@ runtime-parachains = { package = "polkadot-runtime-parachains", path = "../parac
 
 slot-range-helper = { path = "slot_range_helper", default-features = false }
 xcm = { package = "staging-xcm", path = "../../xcm", default-features = false }
+xcm-builder = { package = "staging-xcm-builder", path = "../../xcm/xcm-builder", default-features = false }
 
 [dev-dependencies]
 hex-literal = "0.4.1"
@@ -74,6 +76,7 @@ std = [
 	"inherents/std",
 	"libsecp256k1/std",
 	"log/std",
+	"pallet-asset-rate/std",
 	"pallet-authorship/std",
 	"pallet-balances/std",
 	"pallet-election-provider-multi-phase/std",
@@ -100,6 +103,7 @@ std = [
 	"sp-session/std",
 	"sp-staking/std",
 	"sp-std/std",
+	"xcm-builder/std",
 	"xcm/std",
 ]
 runtime-benchmarks = [
@@ -109,6 +113,7 @@ runtime-benchmarks = [
 	"frame-system/runtime-benchmarks",
 	"libsecp256k1/hmac",
 	"libsecp256k1/static-context",
+	"pallet-asset-rate/runtime-benchmarks",
 	"pallet-babe/runtime-benchmarks",
 	"pallet-balances/runtime-benchmarks",
 	"pallet-election-provider-multi-phase/runtime-benchmarks",
@@ -121,12 +126,14 @@ runtime-benchmarks = [
 	"runtime-parachains/runtime-benchmarks",
 	"sp-runtime/runtime-benchmarks",
 	"sp-staking/runtime-benchmarks",
+	"xcm-builder/runtime-benchmarks",
 ]
 try-runtime = [
 	"frame-election-provider-support/try-runtime",
 	"frame-support-test/try-runtime",
 	"frame-support/try-runtime",
 	"frame-system/try-runtime",
+	"pallet-asset-rate/try-runtime",
 	"pallet-authorship/try-runtime",
 	"pallet-babe?/try-runtime",
 	"pallet-balances/try-runtime",
diff --git a/polkadot/runtime/common/src/impls.rs b/polkadot/runtime/common/src/impls.rs
index 0d0dee2e9ad91..590593745ed04 100644
--- a/polkadot/runtime/common/src/impls.rs
+++ b/polkadot/runtime/common/src/impls.rs
@@ -18,8 +18,10 @@
 
 use crate::NegativeImbalance;
 use frame_support::traits::{Currency, Imbalance, OnUnbalanced};
+use parity_scale_codec::{Decode, Encode, MaxEncodedLen};
 use primitives::Balance;
-use sp_runtime::Perquintill;
+use sp_runtime::{traits::TryConvert, Perquintill, RuntimeDebug};
+use xcm::VersionedMultiLocation;
 
 /// Logic for the author to get a portion of fees.
 pub struct ToAuthor<R>(sp_std::marker::PhantomData<R>);
@@ -98,13 +100,104 @@ pub fn era_payout(
 	(staking_payout, rest)
 }
 
+/// Versioned locatable asset type which contains both an XCM `location` and `asset_id` to identify
+/// an asset which exists on some chain.
+#[derive(
+	Encode, Decode, Eq, PartialEq, Clone, RuntimeDebug, scale_info::TypeInfo, MaxEncodedLen,
+)]
+pub enum VersionedLocatableAsset {
+	#[codec(index = 3)]
+	V3 {
+		/// The (relative) location in which the asset ID is meaningful.
+		location: xcm::v3::MultiLocation,
+		/// The asset's ID.
+		asset_id: xcm::v3::AssetId,
+	},
+}
+
+/// Converts the [`VersionedLocatableAsset`] to the [`xcm_builder::LocatableAssetId`].
+pub struct LocatableAssetConverter;
+impl TryConvert<VersionedLocatableAsset, xcm_builder::LocatableAssetId>
+	for LocatableAssetConverter
+{
+	fn try_convert(
+		asset: VersionedLocatableAsset,
+	) -> Result<xcm_builder::LocatableAssetId, VersionedLocatableAsset> {
+		match asset {
+			VersionedLocatableAsset::V3 { location, asset_id } =>
+				Ok(xcm_builder::LocatableAssetId { asset_id, location }),
+		}
+	}
+}
+
+/// Converts the [`VersionedMultiLocation`] to the [`xcm::latest::MultiLocation`].
+pub struct VersionedMultiLocationConverter;
+impl TryConvert<&VersionedMultiLocation, xcm::latest::MultiLocation>
+	for VersionedMultiLocationConverter
+{
+	fn try_convert(
+		location: &VersionedMultiLocation,
+	) -> Result<xcm::latest::MultiLocation, &VersionedMultiLocation> {
+		let latest = match location.clone() {
+			VersionedMultiLocation::V2(l) => l.try_into().map_err(|_| location)?,
+			VersionedMultiLocation::V3(l) => l,
+		};
+		Ok(latest)
+	}
+}
+
+#[cfg(feature = "runtime-benchmarks")]
+pub mod benchmarks {
+	use super::VersionedLocatableAsset;
+	use pallet_asset_rate::AssetKindFactory;
+	use pallet_treasury::ArgumentsFactory as TreasuryArgumentsFactory;
+	use xcm::prelude::*;
+
+	/// Provides a factory method for the [`VersionedLocatableAsset`].
+	/// The location of the asset is determined as a Parachain with an ID equal to the passed seed.
+	pub struct AssetRateArguments;
+	impl AssetKindFactory<VersionedLocatableAsset> for AssetRateArguments {
+		fn create_asset_kind(seed: u32) -> VersionedLocatableAsset {
+			VersionedLocatableAsset::V3 {
+				location: xcm::v3::MultiLocation::new(0, X1(Parachain(seed))),
+				asset_id: xcm::v3::MultiLocation::new(
+					0,
+					X2(PalletInstance(seed.try_into().unwrap()), GeneralIndex(seed.into())),
+				)
+				.into(),
+			}
+		}
+	}
+
+	/// Provide factory methods for the [`VersionedLocatableAsset`] and the `Beneficiary` of the
+	/// [`VersionedMultiLocation`]. The location of the asset is determined as a Parachain with an
+	/// ID equal to the passed seed.
+	pub struct TreasuryArguments;
+	impl TreasuryArgumentsFactory<VersionedLocatableAsset, VersionedMultiLocation>
+		for TreasuryArguments
+	{
+		fn create_asset_kind(seed: u32) -> VersionedLocatableAsset {
+			AssetRateArguments::create_asset_kind(seed)
+		}
+		fn create_beneficiary(seed: [u8; 32]) -> VersionedMultiLocation {
+			VersionedMultiLocation::V3(xcm::v3::MultiLocation::new(
+				0,
+				X1(AccountId32 { network: None, id: seed }),
+			))
+		}
+	}
+}
+
 #[cfg(test)]
 mod tests {
 	use super::*;
 	use frame_support::{
 		dispatch::DispatchClass,
 		parameter_types,
-		traits::{ConstU32, FindAuthor},
+		traits::{
+			tokens::{PayFromAccount, UnityAssetBalanceConversion},
+			ConstU32, FindAuthor,
+		},
 		weights::Weight,
 		PalletId,
 	};
@@ -189,6 +282,7 @@ mod tests {
 	parameter_types! {
 		pub const TreasuryPalletId: PalletId = PalletId(*b"py/trsry");
 		pub const MaxApprovals: u32 = 100;
+		pub TreasuryAccount: AccountId = Treasury::account_id();
 	}
 
 	impl pallet_treasury::Config for Test {
@@ -208,6 +302,14 @@ mod tests {
 		type MaxApprovals = MaxApprovals;
 		type WeightInfo = ();
 		type SpendOrigin = frame_support::traits::NeverEnsureOrigin<u64>;
+		type AssetKind = ();
+		type Beneficiary = Self::AccountId;
+		type BeneficiaryLookup = IdentityLookup<Self::AccountId>;
+		type Paymaster = PayFromAccount<Balances, TreasuryAccount>;
+		type BalanceConverter = UnityAssetBalanceConversion;
+		type PayoutPeriod = ConstU64<0>;
+		#[cfg(feature = "runtime-benchmarks")]
+		type BenchmarkHelper = ();
 	}
 
 	pub struct OneAuthor;
diff --git a/polkadot/runtime/parachains/src/hrmp.rs b/polkadot/runtime/parachains/src/hrmp.rs
index b3bbcb433c0b2..42592d9d9f149 100644
--- a/polkadot/runtime/parachains/src/hrmp.rs
+++ b/polkadot/runtime/parachains/src/hrmp.rs
@@ -554,14 +554,26 @@ pub mod pallet {
 		///
 		/// Origin must be the `ChannelManager`.
 		#[pallet::call_index(3)]
-		#[pallet::weight(<T as Config>::WeightInfo::force_clean_hrmp(*_inbound, *_outbound))]
+		#[pallet::weight(<T as Config>::WeightInfo::force_clean_hrmp(*num_inbound, *num_outbound))]
 		pub fn force_clean_hrmp(
 			origin: OriginFor<T>,
 			para: ParaId,
-			_inbound: u32,
-			_outbound: u32,
+			num_inbound: u32,
+			num_outbound: u32,
 		) -> DispatchResult {
 			T::ChannelManager::ensure_origin(origin)?;
+
+			ensure!(
+				HrmpIngressChannelsIndex::<T>::decode_len(para).unwrap_or_default() <=
+					num_inbound as usize,
+				Error::<T>::WrongWitness
+			);
+			ensure!(
+				HrmpEgressChannelsIndex::<T>::decode_len(para).unwrap_or_default() <=
+					num_outbound as usize,
+				Error::<T>::WrongWitness
+			);
+
 			Self::clean_hrmp_after_outgoing(&para);
 			Ok(())
 		}
@@ -575,9 +587,16 @@ pub mod pallet {
 		///
 		/// Origin must be the `ChannelManager`.
 		#[pallet::call_index(4)]
-		#[pallet::weight(<T as Config>::WeightInfo::force_process_hrmp_open(*_channels))]
-		pub fn force_process_hrmp_open(origin: OriginFor<T>, _channels: u32) -> DispatchResult {
+		#[pallet::weight(<T as Config>::WeightInfo::force_process_hrmp_open(*channels))]
+		pub fn force_process_hrmp_open(origin: OriginFor<T>, channels: u32) -> DispatchResult {
 			T::ChannelManager::ensure_origin(origin)?;
+
+			ensure!(
+				HrmpOpenChannelRequestsList::<T>::decode_len().unwrap_or_default() as u32 <=
+					channels,
+				Error::<T>::WrongWitness
+			);
+
 			let host_config = configuration::Pallet::<T>::config();
 			Self::process_hrmp_open_channel_requests(&host_config);
 			Ok(())
@@ -592,9 +611,16 @@ pub mod pallet {
 		///
 		/// Origin must be the `ChannelManager`.
 		#[pallet::call_index(5)]
-		#[pallet::weight(<T as Config>::WeightInfo::force_process_hrmp_close(*_channels))]
-		pub fn force_process_hrmp_close(origin: OriginFor<T>, _channels: u32) -> DispatchResult {
+		#[pallet::weight(<T as Config>::WeightInfo::force_process_hrmp_close(*channels))]
+		pub fn force_process_hrmp_close(origin: OriginFor<T>, channels: u32) -> DispatchResult {
 			T::ChannelManager::ensure_origin(origin)?;
+
+			ensure!(
+				HrmpCloseChannelRequestsList::<T>::decode_len().unwrap_or_default() as u32 <=
+					channels,
+				Error::<T>::WrongWitness
+			);
+
 			Self::process_hrmp_close_channel_requests();
 			Ok(())
 		}
diff --git a/polkadot/runtime/rococo/Cargo.toml b/polkadot/runtime/rococo/Cargo.toml
index ab9b2f11acfbe..0b8a8624bb673 100644
--- a/polkadot/runtime/rococo/Cargo.toml
+++ b/polkadot/runtime/rococo/Cargo.toml
@@ -52,6 +52,7 @@ pallet-collective = { path = "../../../substrate/frame/collective", default-feat
 pallet-conviction-voting = { path = "../../../substrate/frame/conviction-voting", default-features = false }
 pallet-democracy = { path = "../../../substrate/frame/democracy", default-features = false }
 pallet-elections-phragmen = { path = "../../../substrate/frame/elections-phragmen", default-features = false }
+pallet-asset-rate = {  path = "../../../substrate/frame/asset-rate", default-features = false }
 frame-executive = { path = "../../../substrate/frame/executive", default-features = false }
 pallet-grandpa = { path = "../../../substrate/frame/grandpa", default-features = false }
 pallet-identity = { path = "../../../substrate/frame/identity", default-features = false }
@@ -72,7 +73,7 @@ pallet-scheduler = { path = "../../../substrate/frame/scheduler", default-featur
 pallet-session = { path = "../../../substrate/frame/session", default-features = false }
 pallet-society = { path = "../../../substrate/frame/society", default-features = false }
 pallet-sudo = { path = "../../../substrate/frame/sudo", default-features = false }
-frame-support = { path = "../../../substrate/frame/support", default-features = false }
+frame-support = { path = "../../../substrate/frame/support", default-features = false, features = ["tuples-96"] }
 pallet-staking = { path = "../../../substrate/frame/staking", default-features = false }
 frame-system = { path = "../../../substrate/frame/system", default-features = false }
 frame-system-rpc-runtime-api = { path = "../../../substrate/frame/system/rpc/runtime-api", default-features = false }
@@ -131,6 +132,7 @@ std = [
 	"inherents/std",
 	"log/std",
 	"offchain-primitives/std",
+	"pallet-asset-rate/std",
 	"pallet-authority-discovery/std",
 	"pallet-authorship/std",
 	"pallet-babe/std",
@@ -207,6 +209,7 @@ runtime-benchmarks = [
 	"frame-support/runtime-benchmarks",
 	"frame-system-benchmarking/runtime-benchmarks",
 	"frame-system/runtime-benchmarks",
+	"pallet-asset-rate/runtime-benchmarks",
 	"pallet-babe/runtime-benchmarks",
 	"pallet-balances/runtime-benchmarks",
 	"pallet-bounties/runtime-benchmarks",
@@ -258,6 +261,7 @@ try-runtime = [
 	"frame-system/try-runtime",
 	"frame-try-runtime",
 	"frame-try-runtime/try-runtime",
+	"pallet-asset-rate/try-runtime",
 	"pallet-authority-discovery/try-runtime",
 	"pallet-authorship/try-runtime",
 	"pallet-babe/try-runtime",
diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs
index f1201c0bedc75..9933f64429745 100644
--- a/polkadot/runtime/rococo/src/lib.rs
+++ b/polkadot/runtime/rococo/src/lib.rs
@@ -30,7 +30,10 @@ use primitives::{
 	ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, PARACHAIN_KEY_TYPE_ID,
 };
 use runtime_common::{
-	assigned_slots, auctions, claims, crowdloan, impl_runtime_weights, impls::ToAuthor,
+	assigned_slots, auctions, claims, crowdloan, impl_runtime_weights,
+	impls::{
+		LocatableAssetConverter, ToAuthor, VersionedLocatableAsset, VersionedMultiLocationConverter,
+	},
 	paras_registrar, paras_sudo_wrapper, prod_or_fast, slots, BlockHashCount, BlockLength,
 	SlowAdjustingFeeUpdate,
 };
@@ -81,7 +84,8 @@ use sp_runtime::{
 	create_runtime_str, generic, impl_opaque_keys,
 	traits::{
 		AccountIdLookup, BlakeTwo256, Block as BlockT, ConstU32, ConvertInto,
-		Extrinsic as ExtrinsicT, Keccak256, OpaqueKeys, SaturatedConversion, Verify,
+		Extrinsic as ExtrinsicT, IdentityLookup, Keccak256, OpaqueKeys, SaturatedConversion,
+		Verify,
 	},
 	transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity},
 	ApplyExtrinsicResult, FixedU128, KeyTypeId, Perbill, Percent, Permill, RuntimeDebug,
@@ -90,7 +94,11 @@ use sp_staking::SessionIndex;
 #[cfg(any(feature = "std", test))]
 use sp_version::NativeVersion;
 use sp_version::RuntimeVersion;
-use xcm::latest::Junction;
+use xcm::{
+	latest::{InteriorMultiLocation, Junction, Junction::PalletInstance},
+	VersionedMultiLocation,
+};
+use xcm_builder::PayOverXcm;
 
 pub use frame_system::Call as SystemCall;
 pub use pallet_balances::Call as BalancesCall;
@@ -387,6 +395,10 @@ parameter_types! {
 	pub const SpendPeriod: BlockNumber = 6 * DAYS;
 	pub const Burn: Permill = Permill::from_perthousand(2);
 	pub const TreasuryPalletId: PalletId = PalletId(*b"py/trsry");
+	pub const PayoutSpendPeriod: BlockNumber = 30 * DAYS;
+	// The asset's interior location for the paying account. This is the Treasury
+	// pallet instance (which sits at index 18).
+	pub TreasuryInteriorLocation: InteriorMultiLocation = PalletInstance(18).into();
 
 	pub const TipCountdown: BlockNumber = 1 * DAYS;
 	pub const TipFindersFee: Percent = Percent::from_percent(20);
@@ -396,6 +408,7 @@ parameter_types! {
 	pub const MaxAuthorities: u32 = 100_000;
 	pub const MaxKeys: u32 = 10_000;
 	pub const MaxPeerInHeartbeats: u32 = 10_000;
+	pub const MaxBalance: Balance = Balance::max_value();
 }
 
 impl pallet_treasury::Config for Runtime {
@@ -415,6 +428,23 @@ impl pallet_treasury::Config for Runtime {
 	type WeightInfo = weights::pallet_treasury::WeightInfo<Runtime>;
 	type SpendFunds = Bounties;
 	type SpendOrigin = TreasurySpender;
+	type AssetKind = VersionedLocatableAsset;
+	type Beneficiary = VersionedMultiLocation;
+	type BeneficiaryLookup = IdentityLookup<Self::Beneficiary>;
+	type Paymaster = PayOverXcm<
+		TreasuryInteriorLocation,
+		crate::xcm_config::XcmRouter,
+		crate::XcmPallet,
+		ConstU32<{ 6 * HOURS }>,
+		Self::Beneficiary,
+		Self::AssetKind,
+		LocatableAssetConverter,
+		VersionedMultiLocationConverter,
+	>;
+	type BalanceConverter = AssetRate;
+	type PayoutPeriod = PayoutSpendPeriod;
+	#[cfg(feature = "runtime-benchmarks")]
+	type BenchmarkHelper = runtime_common::impls::benchmarks::TreasuryArguments;
 }
 
 parameter_types! {
@@ -1204,6 +1234,18 @@ impl pallet_sudo::Config for Runtime {
 	type WeightInfo = weights::pallet_sudo::WeightInfo<Runtime>;
 }
 
+impl pallet_asset_rate::Config for Runtime {
+	type WeightInfo = weights::pallet_asset_rate::WeightInfo<Runtime>;
+	type RuntimeEvent = RuntimeEvent;
+	type CreateOrigin = EnsureRoot<AccountId>;
+	type RemoveOrigin = EnsureRoot<AccountId>;
+	type UpdateOrigin = EnsureRoot<AccountId>;
+	type Currency = Balances;
+	type AssetKind = <Runtime as pallet_treasury::Config>::AssetKind;
+	#[cfg(feature = "runtime-benchmarks")]
+	type BenchmarkHelper = runtime_common::impls::benchmarks::AssetRateArguments;
+}
+
 construct_runtime! {
 	pub enum Runtime
 	{
@@ -1281,6 +1323,9 @@ construct_runtime! {
 		// Preimage registrar.
 		Preimage: pallet_preimage::{Pallet, Call, Storage, Event<T>, HoldReason} = 32,
 
+		// Asset rate.
+		AssetRate: pallet_asset_rate::{Pallet, Call, Storage, Event<T>} = 39,
+
 		// Bounties modules.
 		Bounties: pallet_bounties::{Pallet, Call, Storage, Event<T>} = 35,
 		ChildBounties: pallet_child_bounties = 40,
@@ -1467,6 +1512,7 @@ mod benches {
 		[pallet_treasury, Treasury]
 		[pallet_utility, Utility]
 		[pallet_vesting, Vesting]
+		[pallet_asset_rate, AssetRate]
 		[pallet_whitelist, Whitelist]
 		// XCM
 		[pallet_xcm, XcmPallet]
diff --git a/polkadot/runtime/rococo/src/weights/mod.rs b/polkadot/runtime/rococo/src/weights/mod.rs
index e0c1c4f413515..9c563a67d98b7 100644
--- a/polkadot/runtime/rococo/src/weights/mod.rs
+++ b/polkadot/runtime/rococo/src/weights/mod.rs
@@ -16,6 +16,7 @@
 //! A list of the different weight modules for our runtime.
 
 pub mod frame_system;
+pub mod pallet_asset_rate;
 pub mod pallet_balances;
 pub mod pallet_balances_nis_counterpart_balances;
 pub mod pallet_bounties;
diff --git a/polkadot/runtime/rococo/src/weights/pallet_asset_rate.rs b/polkadot/runtime/rococo/src/weights/pallet_asset_rate.rs
new file mode 100644
index 0000000000000..da2d1958cefcf
--- /dev/null
+++ b/polkadot/runtime/rococo/src/weights/pallet_asset_rate.rs
@@ -0,0 +1,86 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Autogenerated weights for `pallet_asset_rate`
+//!
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
+//! DATE: 2023-07-03, STEPS: `50`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! WORST CASE MAP SIZE: `1000000`
+//! HOSTNAME: `cob`, CPU: `<UNKNOWN>`
+//! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024
+
+// Executed Command:
+// ./target/debug/polkadot
+// benchmark
+// pallet
+// --chain=rococo-dev
+// --steps=50
+// --repeat=2
+// --pallet=pallet_asset_rate
+// --extrinsic=*
+// --wasm-execution=compiled
+// --heap-pages=4096
+// --output=./runtime/rococo/src/weights/
+// --header=./file_header.txt
+
+#![cfg_attr(rustfmt, rustfmt_skip)]
+#![allow(unused_parens)]
+#![allow(unused_imports)]
+#![allow(missing_docs)]
+
+use frame_support::{traits::Get, weights::Weight};
+use core::marker::PhantomData;
+
+/// Weight functions for `pallet_asset_rate`.
+pub struct WeightInfo<T>(PhantomData<T>);
+impl<T: frame_system::Config> pallet_asset_rate::WeightInfo for WeightInfo<T> {
+	/// Storage: AssetRate ConversionRateToNative (r:1 w:1)
+	/// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(1237), added: 3712, mode: MaxEncodedLen)
+	fn create() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `42`
+		//  Estimated: `4702`
+		// Minimum execution time: 143_000_000 picoseconds.
+		Weight::from_parts(155_000_000, 0)
+			.saturating_add(Weight::from_parts(0, 4702))
+			.saturating_add(T::DbWeight::get().reads(1))
+			.saturating_add(T::DbWeight::get().writes(1))
+	}
+	/// Storage: AssetRate ConversionRateToNative (r:1 w:1)
+	/// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(1237), added: 3712, mode: MaxEncodedLen)
+	fn update() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `110`
+		//  Estimated: `4702`
+		// Minimum execution time: 156_000_000 picoseconds.
+		Weight::from_parts(172_000_000, 0)
+			.saturating_add(Weight::from_parts(0, 4702))
+			.saturating_add(T::DbWeight::get().reads(1))
+			.saturating_add(T::DbWeight::get().writes(1))
+	}
+	/// Storage: AssetRate ConversionRateToNative (r:1 w:1)
+	/// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(1237), added: 3712, mode: MaxEncodedLen)
+	fn remove() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `110`
+		//  Estimated: `4702`
+		// Minimum execution time: 150_000_000 picoseconds.
+		Weight::from_parts(160_000_000, 0)
+			.saturating_add(Weight::from_parts(0, 4702))
+			.saturating_add(T::DbWeight::get().reads(1))
+			.saturating_add(T::DbWeight::get().writes(1))
+	}
+}
diff --git a/polkadot/runtime/rococo/src/weights/pallet_treasury.rs b/polkadot/runtime/rococo/src/weights/pallet_treasury.rs
index 041d976d82570..144e9d5b87238 100644
--- a/polkadot/runtime/rococo/src/weights/pallet_treasury.rs
+++ b/polkadot/runtime/rococo/src/weights/pallet_treasury.rs
@@ -17,24 +17,24 @@
 //! Autogenerated weights for `pallet_treasury`
 //!
 //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
-//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! DATE: 2023-07-07, STEPS: `50`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz`
-//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024
+//! HOSTNAME: `cob`, CPU: `<UNKNOWN>`
+//! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024
 
 // Executed Command:
-// ./target/production/polkadot
+// ./target/debug/polkadot
 // benchmark
 // pallet
 // --chain=rococo-dev
 // --steps=50
-// --repeat=20
+// --repeat=2
 // --pallet=pallet_treasury
 // --extrinsic=*
-// --execution=wasm
 // --wasm-execution=compiled
-// --header=./file_header.txt
+// --heap-pages=4096
 // --output=./runtime/rococo/src/weights/
+// --header=./file_header.txt
 
 #![cfg_attr(rustfmt, rustfmt_skip)]
 #![allow(unused_parens)]
@@ -47,13 +47,21 @@ use core::marker::PhantomData;
 /// Weight functions for `pallet_treasury`.
 pub struct WeightInfo<T>(PhantomData<T>);
 impl<T: frame_system::Config> pallet_treasury::WeightInfo for WeightInfo<T> {
-	fn spend() -> Weight {
+	/// Storage: Treasury ProposalCount (r:1 w:1)
+	/// Proof: Treasury ProposalCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen)
+	/// Storage: Treasury Approvals (r:1 w:1)
+	/// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen)
+	/// Storage: Treasury Proposals (r:0 w:1)
+	/// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen)
+	fn spend_local() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `0`
-		//  Estimated: `0`
-		// Minimum execution time: 204_000 picoseconds.
-		Weight::from_parts(233_000, 0)
-			.saturating_add(Weight::from_parts(0, 0))
+		//  Measured:  `42`
+		//  Estimated: `1887`
+		// Minimum execution time: 177_000_000 picoseconds.
+		Weight::from_parts(191_000_000, 0)
+			.saturating_add(Weight::from_parts(0, 1887))
+			.saturating_add(T::DbWeight::get().reads(2))
+			.saturating_add(T::DbWeight::get().writes(3))
 	}
 	/// Storage: Treasury ProposalCount (r:1 w:1)
 	/// Proof: Treasury ProposalCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen)
@@ -61,10 +69,10 @@ impl<T: frame_system::Config> pallet_treasury::WeightInfo for WeightInfo<T> {
 	/// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen)
 	fn propose_spend() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `107`
+		//  Measured:  `143`
 		//  Estimated: `1489`
-		// Minimum execution time: 27_592_000 picoseconds.
-		Weight::from_parts(27_960_000, 0)
+		// Minimum execution time: 354_000_000 picoseconds.
+		Weight::from_parts(376_000_000, 0)
 			.saturating_add(Weight::from_parts(0, 1489))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(2))
@@ -75,10 +83,10 @@ impl<T: frame_system::Config> pallet_treasury::WeightInfo for WeightInfo<T> {
 	/// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen)
 	fn reject_proposal() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `265`
+		//  Measured:  `301`
 		//  Estimated: `3593`
-		// Minimum execution time: 40_336_000 picoseconds.
-		Weight::from_parts(41_085_000, 0)
+		// Minimum execution time: 547_000_000 picoseconds.
+		Weight::from_parts(550_000_000, 0)
 			.saturating_add(Weight::from_parts(0, 3593))
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(2))
@@ -90,13 +98,13 @@ impl<T: frame_system::Config> pallet_treasury::WeightInfo for WeightInfo<T> {
 	/// The range of component `p` is `[0, 99]`.
 	fn approve_proposal(p: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `433 + p * (8 ±0)`
+		//  Measured:  `470 + p * (8 ±0)`
 		//  Estimated: `3573`
-		// Minimum execution time: 9_938_000 picoseconds.
-		Weight::from_parts(12_061_206, 0)
+		// Minimum execution time: 104_000_000 picoseconds.
+		Weight::from_parts(121_184_402, 0)
 			.saturating_add(Weight::from_parts(0, 3573))
-			// Standard Error: 801
-			.saturating_add(Weight::from_parts(26_602, 0).saturating_mul(p.into()))
+			// Standard Error: 42_854
+			.saturating_add(Weight::from_parts(153_112, 0).saturating_mul(p.into()))
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
@@ -104,10 +112,10 @@ impl<T: frame_system::Config> pallet_treasury::WeightInfo for WeightInfo<T> {
 	/// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen)
 	fn remove_approval() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `90`
+		//  Measured:  `127`
 		//  Estimated: `1887`
-		// Minimum execution time: 7_421_000 picoseconds.
-		Weight::from_parts(7_620_000, 0)
+		// Minimum execution time: 80_000_000 picoseconds.
+		Weight::from_parts(82_000_000, 0)
 			.saturating_add(Weight::from_parts(0, 1887))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
@@ -118,26 +126,98 @@ impl<T: frame_system::Config> pallet_treasury::WeightInfo for WeightInfo<T> {
 	/// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen)
 	/// Storage: Treasury Approvals (r:1 w:1)
 	/// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen)
-	/// Storage: Treasury Proposals (r:100 w:100)
+	/// Storage: Treasury Proposals (r:99 w:99)
 	/// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen)
-	/// Storage: System Account (r:201 w:201)
+	/// Storage: System Account (r:199 w:199)
 	/// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen)
 	/// Storage: Bounties BountyApprovals (r:1 w:1)
 	/// Proof: Bounties BountyApprovals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen)
-	/// The range of component `p` is `[0, 100]`.
+	/// The range of component `p` is `[0, 99]`.
 	fn on_initialize_proposals(p: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `296 + p * (251 ±0)`
+		//  Measured:  `331 + p * (251 ±0)`
 		//  Estimated: `3593 + p * (5206 ±0)`
-		// Minimum execution time: 62_706_000 picoseconds.
-		Weight::from_parts(61_351_470, 0)
+		// Minimum execution time: 887_000_000 picoseconds.
+		Weight::from_parts(828_616_021, 0)
 			.saturating_add(Weight::from_parts(0, 3593))
-			// Standard Error: 32_787
-			.saturating_add(Weight::from_parts(37_873_920, 0).saturating_mul(p.into()))
+			// Standard Error: 695_351
+			.saturating_add(Weight::from_parts(566_114_524, 0).saturating_mul(p.into()))
 			.saturating_add(T::DbWeight::get().reads(5))
 			.saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(p.into())))
 			.saturating_add(T::DbWeight::get().writes(5))
 			.saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(p.into())))
 			.saturating_add(Weight::from_parts(0, 5206).saturating_mul(p.into()))
 	}
+	/// Storage: AssetRate ConversionRateToNative (r:1 w:0)
+	/// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(1237), added: 3712, mode: MaxEncodedLen)
+	/// Storage: Treasury SpendCount (r:1 w:1)
+	/// Proof: Treasury SpendCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen)
+	/// Storage: Treasury Spends (r:0 w:1)
+	/// Proof: Treasury Spends (max_values: None, max_size: Some(1848), added: 4323, mode: MaxEncodedLen)
+	fn spend() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `114`
+		//  Estimated: `4702`
+		// Minimum execution time: 208_000_000 picoseconds.
+		Weight::from_parts(222_000_000, 0)
+			.saturating_add(Weight::from_parts(0, 4702))
+			.saturating_add(T::DbWeight::get().reads(2))
+			.saturating_add(T::DbWeight::get().writes(2))
+	}
+	/// Storage: Treasury Spends (r:1 w:1)
+	/// Proof: Treasury Spends (max_values: None, max_size: Some(1848), added: 4323, mode: MaxEncodedLen)
+	/// Storage: XcmPallet QueryCounter (r:1 w:1)
+	/// Proof Skipped: XcmPallet QueryCounter (max_values: Some(1), max_size: None, mode: Measured)
+	/// Storage: Configuration ActiveConfig (r:1 w:0)
+	/// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured)
+	/// Storage: Dmp DeliveryFeeFactor (r:1 w:0)
+	/// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured)
+	/// Storage: XcmPallet SupportedVersion (r:1 w:0)
+	/// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured)
+	/// Storage: XcmPallet VersionDiscoveryQueue (r:1 w:1)
+	/// Proof Skipped: XcmPallet VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured)
+	/// Storage: XcmPallet SafeXcmVersion (r:1 w:0)
+	/// Proof Skipped: XcmPallet SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured)
+	/// Storage: Dmp DownwardMessageQueues (r:1 w:1)
+	/// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured)
+	/// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1)
+	/// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured)
+	/// Storage: XcmPallet Queries (r:0 w:1)
+	/// Proof Skipped: XcmPallet Queries (max_values: None, max_size: None, mode: Measured)
+	fn payout() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `737`
+		//  Estimated: `5313`
+		// Minimum execution time: 551_000_000 picoseconds.
+		Weight::from_parts(569_000_000, 0)
+			.saturating_add(Weight::from_parts(0, 5313))
+			.saturating_add(T::DbWeight::get().reads(9))
+			.saturating_add(T::DbWeight::get().writes(6))
+	}
+	/// Storage: Treasury Spends (r:1 w:1)
+	/// Proof: Treasury Spends (max_values: None, max_size: Some(1848), added: 4323, mode: MaxEncodedLen)
+	/// Storage: XcmPallet Queries (r:1 w:1)
+	/// Proof Skipped: XcmPallet Queries (max_values: None, max_size: None, mode: Measured)
+	fn check_status() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `442`
+		//  Estimated: `5313`
+		// Minimum execution time: 245_000_000 picoseconds.
+		Weight::from_parts(281_000_000, 0)
+			.saturating_add(Weight::from_parts(0, 5313))
+			.saturating_add(T::DbWeight::get().reads(2))
+			.saturating_add(T::DbWeight::get().writes(2))
+	}
+	/// Storage: Treasury Spends (r:1 w:1)
+	/// Proof: Treasury Spends (max_values: None, max_size: Some(1848), added: 4323, mode: MaxEncodedLen)
+	fn void_spend() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `172`
+		//  Estimated: `5313`
+		// Minimum execution time: 147_000_000 picoseconds.
+		Weight::from_parts(160_000_000, 0)
+			.saturating_add(Weight::from_parts(0, 5313))
+			.saturating_add(T::DbWeight::get().reads(1))
+			.saturating_add(T::DbWeight::get().writes(1))
+	}
 }
diff --git a/polkadot/runtime/rococo/src/weights/xcm/mod.rs b/polkadot/runtime/rococo/src/weights/xcm/mod.rs
index 1d613717dc527..cc485dfbaf7e4 100644
--- a/polkadot/runtime/rococo/src/weights/xcm/mod.rs
+++ b/polkadot/runtime/rococo/src/weights/xcm/mod.rs
@@ -91,7 +91,6 @@ impl<RuntimeCall> XcmWeightInfo<RuntimeCall> for RococoXcmWeight<RuntimeCall> {
 		assets.weigh_multi_assets(XcmBalancesWeight::<Runtime>::withdraw_asset())
 	}
 	fn reserve_asset_deposited(assets: &MultiAssets) -> Weight {
-		// Rococo doesn't support ReserveAssetDeposited, so this benchmark has a default weight
 		assets.weigh_multi_assets(XcmBalancesWeight::<Runtime>::reserve_asset_deposited())
 	}
 	fn receive_teleported_asset(assets: &MultiAssets) -> Weight {
diff --git a/polkadot/runtime/rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/polkadot/runtime/rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs
index 59c49e4f8c821..60c40429b1ac3 100644
--- a/polkadot/runtime/rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs
+++ b/polkadot/runtime/rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs
@@ -17,10 +17,10 @@
 //! Autogenerated weights for `pallet_xcm_benchmarks::fungible`
 //!
 //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
-//! DATE: 2023-07-17, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! DATE: 2023-09-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `runner-gghbxkbs-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
-//! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024
+//! HOSTNAME: `runner-nbnwcyh-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024
 
 // Executed Command:
 // target/production/polkadot
@@ -31,12 +31,12 @@
 // --extrinsic=*
 // --wasm-execution=compiled
 // --heap-pages=4096
-// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json
+// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
 // --pallet=pallet_xcm_benchmarks::fungible
 // --chain=rococo-dev
-// --header=./file_header.txt
-// --template=./xcm/pallet-xcm-benchmarks/template.hbs
-// --output=./runtime/rococo/src/weights/xcm/
+// --header=./polkadot/file_header.txt
+// --template=./polkadot/xcm/pallet-xcm-benchmarks/template.hbs
+// --output=./polkadot/runtime/rococo/src/weights/xcm/
 
 #![cfg_attr(rustfmt, rustfmt_skip)]
 #![allow(unused_parens)]
@@ -55,8 +55,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `101`
 		//  Estimated: `3593`
-		// Minimum execution time: 24_892_000 picoseconds.
-		Weight::from_parts(25_219_000, 3593)
+		// Minimum execution time: 23_189_000 picoseconds.
+		Weight::from_parts(23_896_000, 3593)
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
@@ -66,8 +66,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `101`
 		//  Estimated: `6196`
-		// Minimum execution time: 52_112_000 picoseconds.
-		Weight::from_parts(53_104_000, 6196)
+		// Minimum execution time: 50_299_000 picoseconds.
+		Weight::from_parts(50_962_000, 6196)
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
@@ -83,10 +83,10 @@ impl<T: frame_system::Config> WeightInfo<T> {
 	/// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	pub(crate) fn transfer_reserve_asset() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `210`
+		//  Measured:  `243`
 		//  Estimated: `6196`
-		// Minimum execution time: 76_459_000 picoseconds.
-		Weight::from_parts(79_152_000, 6196)
+		// Minimum execution time: 71_748_000 picoseconds.
+		Weight::from_parts(74_072_000, 6196)
 			.saturating_add(T::DbWeight::get().reads(6))
 			.saturating_add(T::DbWeight::get().writes(4))
 	}
@@ -96,8 +96,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 2_000_000_000_000 picoseconds.
-		Weight::from_parts(2_000_000_000_000, 0)
+		// Minimum execution time: 18_446_744_073_709_551_000 picoseconds.
+		Weight::from_parts(18_446_744_073_709_551_000, 0)
 	}
 	/// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0)
 	/// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`)
@@ -109,10 +109,10 @@ impl<T: frame_system::Config> WeightInfo<T> {
 	/// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	pub(crate) fn initiate_reserve_withdraw() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `109`
-		//  Estimated: `3574`
-		// Minimum execution time: 29_734_000 picoseconds.
-		Weight::from_parts(30_651_000, 3574)
+		//  Measured:  `142`
+		//  Estimated: `3607`
+		// Minimum execution time: 27_806_000 picoseconds.
+		Weight::from_parts(28_594_000, 3607)
 			.saturating_add(T::DbWeight::get().reads(4))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
@@ -122,8 +122,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `103`
 		//  Estimated: `3593`
-		// Minimum execution time: 23_028_000 picoseconds.
-		Weight::from_parts(23_687_000, 3593)
+		// Minimum execution time: 21_199_000 picoseconds.
+		Weight::from_parts(21_857_000, 3593)
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
@@ -133,8 +133,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `3593`
-		// Minimum execution time: 26_399_000 picoseconds.
-		Weight::from_parts(27_262_000, 3593)
+		// Minimum execution time: 23_578_000 picoseconds.
+		Weight::from_parts(24_060_000, 3593)
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
@@ -150,10 +150,10 @@ impl<T: frame_system::Config> WeightInfo<T> {
 	/// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	pub(crate) fn deposit_reserve_asset() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `109`
-		//  Estimated: `3593`
-		// Minimum execution time: 52_015_000 picoseconds.
-		Weight::from_parts(53_498_000, 3593)
+		//  Measured:  `142`
+		//  Estimated: `3607`
+		// Minimum execution time: 48_522_000 picoseconds.
+		Weight::from_parts(49_640_000, 3607)
 			.saturating_add(T::DbWeight::get().reads(5))
 			.saturating_add(T::DbWeight::get().writes(3))
 	}
@@ -169,10 +169,10 @@ impl<T: frame_system::Config> WeightInfo<T> {
 	/// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	pub(crate) fn initiate_teleport() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `109`
-		//  Estimated: `3593`
-		// Minimum execution time: 53_833_000 picoseconds.
-		Weight::from_parts(55_688_000, 3593)
+		//  Measured:  `142`
+		//  Estimated: `3607`
+		// Minimum execution time: 50_429_000 picoseconds.
+		Weight::from_parts(51_295_000, 3607)
 			.saturating_add(T::DbWeight::get().reads(5))
 			.saturating_add(T::DbWeight::get().writes(3))
 	}
diff --git a/polkadot/runtime/westend/Cargo.toml b/polkadot/runtime/westend/Cargo.toml
index c9721935c32b4..58a6cc21eecdb 100644
--- a/polkadot/runtime/westend/Cargo.toml
+++ b/polkadot/runtime/westend/Cargo.toml
@@ -41,10 +41,11 @@ sp-npos-elections = { path = "../../../substrate/primitives/npos-elections", def
 
 frame-election-provider-support = { path = "../../../substrate/frame/election-provider-support", default-features = false }
 frame-executive = { path = "../../../substrate/frame/executive", default-features = false }
-frame-support = { path = "../../../substrate/frame/support", default-features = false }
+frame-support = { path = "../../../substrate/frame/support", default-features = false, features = ["tuples-96"] }
 frame-system = { path = "../../../substrate/frame/system", default-features = false }
 frame-system-rpc-runtime-api = { path = "../../../substrate/frame/system/rpc/runtime-api", default-features = false }
 westend-runtime-constants = { package = "westend-runtime-constants", path = "constants", default-features = false }
+pallet-asset-rate = {  path = "../../../substrate/frame/asset-rate", default-features = false }
 pallet-authority-discovery = { path = "../../../substrate/frame/authority-discovery", default-features = false }
 pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false }
 pallet-babe = { path = "../../../substrate/frame/babe", default-features = false }
@@ -143,6 +144,7 @@ std = [
 	"inherents/std",
 	"log/std",
 	"offchain-primitives/std",
+	"pallet-asset-rate/std",
 	"pallet-authority-discovery/std",
 	"pallet-authorship/std",
 	"pallet-babe/std",
@@ -228,6 +230,7 @@ runtime-benchmarks = [
 	"frame-system-benchmarking/runtime-benchmarks",
 	"frame-system/runtime-benchmarks",
 	"hex-literal",
+	"pallet-asset-rate/runtime-benchmarks",
 	"pallet-babe/runtime-benchmarks",
 	"pallet-bags-list/runtime-benchmarks",
 	"pallet-balances/runtime-benchmarks",
@@ -283,6 +286,7 @@ try-runtime = [
 	"frame-system/try-runtime",
 	"frame-try-runtime",
 	"frame-try-runtime/try-runtime",
+	"pallet-asset-rate/try-runtime",
 	"pallet-authority-discovery/try-runtime",
 	"pallet-authorship/try-runtime",
 	"pallet-babe/try-runtime",
diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs
index 81dcdb95674aa..0e93b3449fec5 100644
--- a/polkadot/runtime/westend/src/lib.rs
+++ b/polkadot/runtime/westend/src/lib.rs
@@ -53,9 +53,14 @@ use primitives::{
 	ValidatorSignature, PARACHAIN_KEY_TYPE_ID,
 };
 use runtime_common::{
-	assigned_slots, auctions, crowdloan, elections::OnChainAccuracy, impl_runtime_weights,
-	impls::ToAuthor, paras_registrar, paras_sudo_wrapper, prod_or_fast, slots, BalanceToU256,
-	BlockHashCount, BlockLength, CurrencyToVote, SlowAdjustingFeeUpdate, U256ToBalance,
+	assigned_slots, auctions, crowdloan,
+	elections::OnChainAccuracy,
+	impl_runtime_weights,
+	impls::{
+		LocatableAssetConverter, ToAuthor, VersionedLocatableAsset, VersionedMultiLocationConverter,
+	},
+	paras_registrar, paras_sudo_wrapper, prod_or_fast, slots, BalanceToU256, BlockHashCount,
+	BlockLength, CurrencyToVote, SlowAdjustingFeeUpdate, U256ToBalance,
 };
 use runtime_parachains::{
 	assigner_parachains as parachains_assigner_parachains,
@@ -79,7 +84,7 @@ use sp_runtime::{
 	generic, impl_opaque_keys,
 	traits::{
 		AccountIdLookup, BlakeTwo256, Block as BlockT, ConvertInto, Extrinsic as ExtrinsicT,
-		Keccak256, OpaqueKeys, SaturatedConversion, Verify,
+		IdentityLookup, Keccak256, OpaqueKeys, SaturatedConversion, Verify,
 	},
 	transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity},
 	ApplyExtrinsicResult, FixedU128, KeyTypeId, Perbill, Percent, Permill,
@@ -89,7 +94,11 @@ use sp_std::{collections::btree_map::BTreeMap, prelude::*};
 #[cfg(any(feature = "std", test))]
 use sp_version::NativeVersion;
 use sp_version::RuntimeVersion;
-use xcm::latest::Junction;
+use xcm::{
+	latest::{InteriorMultiLocation, Junction, Junction::PalletInstance},
+	VersionedMultiLocation,
+};
+use xcm_builder::PayOverXcm;
 
 pub use frame_system::Call as SystemCall;
 pub use pallet_balances::Call as BalancesCall;
@@ -700,6 +709,10 @@ parameter_types! {
 	pub const SpendPeriod: BlockNumber = 6 * DAYS;
 	pub const Burn: Permill = Permill::from_perthousand(2);
 	pub const TreasuryPalletId: PalletId = PalletId(*b"py/trsry");
+	pub const PayoutSpendPeriod: BlockNumber = 30 * DAYS;
+	// The asset's interior location for the paying account. This is the Treasury
+	// pallet instance (which sits at index 37).
+	pub TreasuryInteriorLocation: InteriorMultiLocation = PalletInstance(37).into();
 
 	pub const TipCountdown: BlockNumber = 1 * DAYS;
 	pub const TipFindersFee: Percent = Percent::from_percent(20);
@@ -709,6 +722,7 @@ parameter_types! {
 	pub const MaxAuthorities: u32 = 100_000;
 	pub const MaxKeys: u32 = 10_000;
 	pub const MaxPeerInHeartbeats: u32 = 10_000;
+	pub const MaxBalance: Balance = Balance::max_value();
 }
 
 impl pallet_treasury::Config for Runtime {
@@ -728,6 +742,23 @@ impl pallet_treasury::Config for Runtime {
 	type WeightInfo = weights::pallet_treasury::WeightInfo<Runtime>;
 	type SpendFunds = ();
 	type SpendOrigin = TreasurySpender;
+	type AssetKind = VersionedLocatableAsset;
+	type Beneficiary = VersionedMultiLocation;
+	type BeneficiaryLookup = IdentityLookup<Self::Beneficiary>;
+	type Paymaster = PayOverXcm<
+		TreasuryInteriorLocation,
+		crate::xcm_config::XcmRouter,
+		crate::XcmPallet,
+		ConstU32<{ 6 * HOURS }>,
+		Self::Beneficiary,
+		Self::AssetKind,
+		LocatableAssetConverter,
+		VersionedMultiLocationConverter,
+	>;
+	type BalanceConverter = AssetRate;
+	type PayoutPeriod = PayoutSpendPeriod;
+	#[cfg(feature = "runtime-benchmarks")]
+	type BenchmarkHelper = runtime_common::impls::benchmarks::TreasuryArguments;
 }
 
 impl pallet_offences::Config for Runtime {
@@ -1333,6 +1364,18 @@ parameter_types! {
 	pub const MigrationMaxKeyLen: u32 = 512;
 }
 
+impl pallet_asset_rate::Config for Runtime {
+	type WeightInfo = weights::pallet_asset_rate::WeightInfo<Runtime>;
+	type RuntimeEvent = RuntimeEvent;
+	type CreateOrigin = EnsureRoot<AccountId>;
+	type RemoveOrigin = EnsureRoot<AccountId>;
+	type UpdateOrigin = EnsureRoot<AccountId>;
+	type Currency = Balances;
+	type AssetKind = <Runtime as pallet_treasury::Config>::AssetKind;
+	#[cfg(feature = "runtime-benchmarks")]
+	type BenchmarkHelper = runtime_common::impls::benchmarks::AssetRateArguments;
+}
+
 construct_runtime! {
 	pub enum Runtime
 	{
@@ -1445,6 +1488,9 @@ construct_runtime! {
 
 		// Generalized message queue
 		MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event<T>} = 100,
+
+		// Asset rate.
+		AssetRate: pallet_asset_rate::{Pallet, Call, Storage, Event<T>} = 101,
 	}
 }
 
@@ -1576,6 +1622,7 @@ mod benches {
 		[pallet_utility, Utility]
 		[pallet_vesting, Vesting]
 		[pallet_whitelist, Whitelist]
+		[pallet_asset_rate, AssetRate]
 		// XCM
 		[pallet_xcm, XcmPallet]
 		// NOTE: Make sure you point to the individual modules below.
diff --git a/polkadot/runtime/westend/src/weights/mod.rs b/polkadot/runtime/westend/src/weights/mod.rs
index faa94bcac5862..9ae6798d70b6e 100644
--- a/polkadot/runtime/westend/src/weights/mod.rs
+++ b/polkadot/runtime/westend/src/weights/mod.rs
@@ -17,6 +17,7 @@
 
 pub mod frame_election_provider_support;
 pub mod frame_system;
+pub mod pallet_asset_rate;
 pub mod pallet_bags_list;
 pub mod pallet_balances;
 pub mod pallet_conviction_voting;
diff --git a/polkadot/runtime/westend/src/weights/pallet_asset_rate.rs b/polkadot/runtime/westend/src/weights/pallet_asset_rate.rs
new file mode 100644
index 0000000000000..810dd01a17026
--- /dev/null
+++ b/polkadot/runtime/westend/src/weights/pallet_asset_rate.rs
@@ -0,0 +1,86 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Autogenerated weights for `pallet_asset_rate`
+//!
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
+//! DATE: 2023-07-04, STEPS: `50`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! WORST CASE MAP SIZE: `1000000`
+//! HOSTNAME: `cob`, CPU: `<UNKNOWN>`
+//! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: Some("polkadot-dev"), DB CACHE: 1024
+
+// Executed Command:
+// ./target/debug/polkadot
+// benchmark
+// pallet
+// --chain=polkadot-dev
+// --steps=50
+// --repeat=2
+// --pallet=pallet_asset_rate
+// --extrinsic=*
+// --wasm-execution=compiled
+// --heap-pages=4096
+// --output=./runtime/polkadot/src/weights/
+// --header=./file_header.txt
+
+#![cfg_attr(rustfmt, rustfmt_skip)]
+#![allow(unused_parens)]
+#![allow(unused_imports)]
+#![allow(missing_docs)]
+
+use frame_support::{traits::Get, weights::Weight};
+use core::marker::PhantomData;
+
+/// Weight functions for `pallet_asset_rate`.
+pub struct WeightInfo<T>(PhantomData<T>);
+impl<T: frame_system::Config> pallet_asset_rate::WeightInfo for WeightInfo<T> {
+	/// Storage: AssetRate ConversionRateToNative (r:1 w:1)
+	/// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(1237), added: 3712, mode: MaxEncodedLen)
+	fn create() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `42`
+		//  Estimated: `4702`
+		// Minimum execution time: 67_000_000 picoseconds.
+		Weight::from_parts(69_000_000, 0)
+			.saturating_add(Weight::from_parts(0, 4702))
+			.saturating_add(T::DbWeight::get().reads(1))
+			.saturating_add(T::DbWeight::get().writes(1))
+	}
+	/// Storage: AssetRate ConversionRateToNative (r:1 w:1)
+	/// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(1237), added: 3712, mode: MaxEncodedLen)
+	fn update() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `110`
+		//  Estimated: `4702`
+		// Minimum execution time: 69_000_000 picoseconds.
+		Weight::from_parts(71_000_000, 0)
+			.saturating_add(Weight::from_parts(0, 4702))
+			.saturating_add(T::DbWeight::get().reads(1))
+			.saturating_add(T::DbWeight::get().writes(1))
+	}
+	/// Storage: AssetRate ConversionRateToNative (r:1 w:1)
+	/// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(1237), added: 3712, mode: MaxEncodedLen)
+	fn remove() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `110`
+		//  Estimated: `4702`
+		// Minimum execution time: 70_000_000 picoseconds.
+		Weight::from_parts(90_000_000, 0)
+			.saturating_add(Weight::from_parts(0, 4702))
+			.saturating_add(T::DbWeight::get().reads(1))
+			.saturating_add(T::DbWeight::get().writes(1))
+	}
+}
diff --git a/polkadot/runtime/westend/src/weights/pallet_treasury.rs b/polkadot/runtime/westend/src/weights/pallet_treasury.rs
index e2eb6abfc7bbb..144e9d5b87238 100644
--- a/polkadot/runtime/westend/src/weights/pallet_treasury.rs
+++ b/polkadot/runtime/westend/src/weights/pallet_treasury.rs
@@ -17,25 +17,24 @@
 //! Autogenerated weights for `pallet_treasury`
 //!
 //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
-//! DATE: 2023-07-13, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! DATE: 2023-07-07, STEPS: `50`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `runner-o7yfgx5n-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
-//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024
+//! HOSTNAME: `cob`, CPU: `<UNKNOWN>`
+//! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024
 
 // Executed Command:
-// target/production/polkadot
+// ./target/debug/polkadot
 // benchmark
 // pallet
+// --chain=rococo-dev
 // --steps=50
-// --repeat=20
+// --repeat=2
+// --pallet=pallet_treasury
 // --extrinsic=*
 // --wasm-execution=compiled
 // --heap-pages=4096
-// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json
-// --pallet=pallet_treasury
-// --chain=westend-dev
+// --output=./runtime/rococo/src/weights/
 // --header=./file_header.txt
-// --output=./runtime/westend/src/weights/
 
 #![cfg_attr(rustfmt, rustfmt_skip)]
 #![allow(unused_parens)]
@@ -48,103 +47,177 @@ use core::marker::PhantomData;
 /// Weight functions for `pallet_treasury`.
 pub struct WeightInfo<T>(PhantomData<T>);
 impl<T: frame_system::Config> pallet_treasury::WeightInfo for WeightInfo<T> {
-	/// Storage: `Treasury::ProposalCount` (r:1 w:1)
-	/// Proof: `Treasury::ProposalCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
-	/// Storage: `Treasury::Approvals` (r:1 w:1)
-	/// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`)
-	/// Storage: `Treasury::Proposals` (r:0 w:1)
-	/// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`)
-	fn spend() -> Weight {
+	/// Storage: Treasury ProposalCount (r:1 w:1)
+	/// Proof: Treasury ProposalCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen)
+	/// Storage: Treasury Approvals (r:1 w:1)
+	/// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen)
+	/// Storage: Treasury Proposals (r:0 w:1)
+	/// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen)
+	fn spend_local() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `6`
+		//  Measured:  `42`
 		//  Estimated: `1887`
-		// Minimum execution time: 13_644_000 picoseconds.
-		Weight::from_parts(13_988_000, 0)
+		// Minimum execution time: 177_000_000 picoseconds.
+		Weight::from_parts(191_000_000, 0)
 			.saturating_add(Weight::from_parts(0, 1887))
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(3))
 	}
-	/// Storage: `Treasury::ProposalCount` (r:1 w:1)
-	/// Proof: `Treasury::ProposalCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
-	/// Storage: `Treasury::Proposals` (r:0 w:1)
-	/// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`)
+	/// Storage: Treasury ProposalCount (r:1 w:1)
+	/// Proof: Treasury ProposalCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen)
+	/// Storage: Treasury Proposals (r:0 w:1)
+	/// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen)
 	fn propose_spend() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `107`
+		//  Measured:  `143`
 		//  Estimated: `1489`
-		// Minimum execution time: 26_304_000 picoseconds.
-		Weight::from_parts(26_850_000, 0)
+		// Minimum execution time: 354_000_000 picoseconds.
+		Weight::from_parts(376_000_000, 0)
 			.saturating_add(Weight::from_parts(0, 1489))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
-	/// Storage: `Treasury::Proposals` (r:1 w:1)
-	/// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`)
-	/// Storage: `System::Account` (r:1 w:1)
-	/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
+	/// Storage: Treasury Proposals (r:1 w:1)
+	/// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen)
+	/// Storage: System Account (r:1 w:1)
+	/// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen)
 	fn reject_proposal() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `265`
+		//  Measured:  `301`
 		//  Estimated: `3593`
-		// Minimum execution time: 40_318_000 picoseconds.
-		Weight::from_parts(41_598_000, 0)
+		// Minimum execution time: 547_000_000 picoseconds.
+		Weight::from_parts(550_000_000, 0)
 			.saturating_add(Weight::from_parts(0, 3593))
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
-	/// Storage: `Treasury::Proposals` (r:1 w:0)
-	/// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`)
-	/// Storage: `Treasury::Approvals` (r:1 w:1)
-	/// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`)
+	/// Storage: Treasury Proposals (r:1 w:0)
+	/// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen)
+	/// Storage: Treasury Approvals (r:1 w:1)
+	/// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen)
 	/// The range of component `p` is `[0, 99]`.
 	fn approve_proposal(p: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `433 + p * (8 ±0)`
+		//  Measured:  `470 + p * (8 ±0)`
 		//  Estimated: `3573`
-		// Minimum execution time: 8_250_000 picoseconds.
-		Weight::from_parts(10_937_873, 0)
+		// Minimum execution time: 104_000_000 picoseconds.
+		Weight::from_parts(121_184_402, 0)
 			.saturating_add(Weight::from_parts(0, 3573))
-			// Standard Error: 1_239
-			.saturating_add(Weight::from_parts(82_426, 0).saturating_mul(p.into()))
+			// Standard Error: 42_854
+			.saturating_add(Weight::from_parts(153_112, 0).saturating_mul(p.into()))
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
-	/// Storage: `Treasury::Approvals` (r:1 w:1)
-	/// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`)
+	/// Storage: Treasury Approvals (r:1 w:1)
+	/// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen)
 	fn remove_approval() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `90`
+		//  Measured:  `127`
 		//  Estimated: `1887`
-		// Minimum execution time: 6_170_000 picoseconds.
-		Weight::from_parts(6_366_000, 0)
+		// Minimum execution time: 80_000_000 picoseconds.
+		Weight::from_parts(82_000_000, 0)
 			.saturating_add(Weight::from_parts(0, 1887))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
-	/// Storage: `Treasury::Deactivated` (r:1 w:1)
-	/// Proof: `Treasury::Deactivated` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`)
-	/// Storage: `Balances::InactiveIssuance` (r:1 w:1)
-	/// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`)
-	/// Storage: `Treasury::Approvals` (r:1 w:1)
-	/// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`)
-	/// Storage: `Treasury::Proposals` (r:100 w:100)
-	/// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`)
-	/// Storage: `System::Account` (r:200 w:200)
-	/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
-	/// The range of component `p` is `[0, 100]`.
+	/// Storage: Treasury Deactivated (r:1 w:1)
+	/// Proof: Treasury Deactivated (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen)
+	/// Storage: Balances InactiveIssuance (r:1 w:1)
+	/// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen)
+	/// Storage: Treasury Approvals (r:1 w:1)
+	/// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen)
+	/// Storage: Treasury Proposals (r:99 w:99)
+	/// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen)
+	/// Storage: System Account (r:199 w:199)
+	/// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen)
+	/// Storage: Bounties BountyApprovals (r:1 w:1)
+	/// Proof: Bounties BountyApprovals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen)
+	/// The range of component `p` is `[0, 99]`.
 	fn on_initialize_proposals(p: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `175 + p * (251 ±0)`
-		//  Estimated: `1887 + p * (5206 ±0)`
-		// Minimum execution time: 39_691_000 picoseconds.
-		Weight::from_parts(29_703_313, 0)
-			.saturating_add(Weight::from_parts(0, 1887))
-			// Standard Error: 18_540
-			.saturating_add(Weight::from_parts(42_601_290, 0).saturating_mul(p.into()))
-			.saturating_add(T::DbWeight::get().reads(3))
+		//  Measured:  `331 + p * (251 ±0)`
+		//  Estimated: `3593 + p * (5206 ±0)`
+		// Minimum execution time: 887_000_000 picoseconds.
+		Weight::from_parts(828_616_021, 0)
+			.saturating_add(Weight::from_parts(0, 3593))
+			// Standard Error: 695_351
+			.saturating_add(Weight::from_parts(566_114_524, 0).saturating_mul(p.into()))
+			.saturating_add(T::DbWeight::get().reads(5))
 			.saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(p.into())))
-			.saturating_add(T::DbWeight::get().writes(3))
+			.saturating_add(T::DbWeight::get().writes(5))
 			.saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(p.into())))
 			.saturating_add(Weight::from_parts(0, 5206).saturating_mul(p.into()))
 	}
+	/// Storage: AssetRate ConversionRateToNative (r:1 w:0)
+	/// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(1237), added: 3712, mode: MaxEncodedLen)
+	/// Storage: Treasury SpendCount (r:1 w:1)
+	/// Proof: Treasury SpendCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen)
+	/// Storage: Treasury Spends (r:0 w:1)
+	/// Proof: Treasury Spends (max_values: None, max_size: Some(1848), added: 4323, mode: MaxEncodedLen)
+	fn spend() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `114`
+		//  Estimated: `4702`
+		// Minimum execution time: 208_000_000 picoseconds.
+		Weight::from_parts(222_000_000, 0)
+			.saturating_add(Weight::from_parts(0, 4702))
+			.saturating_add(T::DbWeight::get().reads(2))
+			.saturating_add(T::DbWeight::get().writes(2))
+	}
+	/// Storage: Treasury Spends (r:1 w:1)
+	/// Proof: Treasury Spends (max_values: None, max_size: Some(1848), added: 4323, mode: MaxEncodedLen)
+	/// Storage: XcmPallet QueryCounter (r:1 w:1)
+	/// Proof Skipped: XcmPallet QueryCounter (max_values: Some(1), max_size: None, mode: Measured)
+	/// Storage: Configuration ActiveConfig (r:1 w:0)
+	/// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured)
+	/// Storage: Dmp DeliveryFeeFactor (r:1 w:0)
+	/// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured)
+	/// Storage: XcmPallet SupportedVersion (r:1 w:0)
+	/// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured)
+	/// Storage: XcmPallet VersionDiscoveryQueue (r:1 w:1)
+	/// Proof Skipped: XcmPallet VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured)
+	/// Storage: XcmPallet SafeXcmVersion (r:1 w:0)
+	/// Proof Skipped: XcmPallet SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured)
+	/// Storage: Dmp DownwardMessageQueues (r:1 w:1)
+	/// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured)
+	/// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1)
+	/// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured)
+	/// Storage: XcmPallet Queries (r:0 w:1)
+	/// Proof Skipped: XcmPallet Queries (max_values: None, max_size: None, mode: Measured)
+	fn payout() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `737`
+		//  Estimated: `5313`
+		// Minimum execution time: 551_000_000 picoseconds.
+		Weight::from_parts(569_000_000, 0)
+			.saturating_add(Weight::from_parts(0, 5313))
+			.saturating_add(T::DbWeight::get().reads(9))
+			.saturating_add(T::DbWeight::get().writes(6))
+	}
+	/// Storage: Treasury Spends (r:1 w:1)
+	/// Proof: Treasury Spends (max_values: None, max_size: Some(1848), added: 4323, mode: MaxEncodedLen)
+	/// Storage: XcmPallet Queries (r:1 w:1)
+	/// Proof Skipped: XcmPallet Queries (max_values: None, max_size: None, mode: Measured)
+	fn check_status() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `442`
+		//  Estimated: `5313`
+		// Minimum execution time: 245_000_000 picoseconds.
+		Weight::from_parts(281_000_000, 0)
+			.saturating_add(Weight::from_parts(0, 5313))
+			.saturating_add(T::DbWeight::get().reads(2))
+			.saturating_add(T::DbWeight::get().writes(2))
+	}
+	/// Storage: Treasury Spends (r:1 w:1)
+	/// Proof: Treasury Spends (max_values: None, max_size: Some(1848), added: 4323, mode: MaxEncodedLen)
+	fn void_spend() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `172`
+		//  Estimated: `5313`
+		// Minimum execution time: 147_000_000 picoseconds.
+		Weight::from_parts(160_000_000, 0)
+			.saturating_add(Weight::from_parts(0, 5313))
+			.saturating_add(T::DbWeight::get().reads(1))
+			.saturating_add(T::DbWeight::get().writes(1))
+	}
 }
diff --git a/polkadot/runtime/westend/src/weights/xcm/mod.rs b/polkadot/runtime/westend/src/weights/xcm/mod.rs
index c6fa6bb93eb6a..d5b3d8257ba54 100644
--- a/polkadot/runtime/westend/src/weights/xcm/mod.rs
+++ b/polkadot/runtime/westend/src/weights/xcm/mod.rs
@@ -94,7 +94,6 @@ impl<RuntimeCall> XcmWeightInfo<RuntimeCall> for WestendXcmWeight<RuntimeCall> {
 		assets.weigh_multi_assets(XcmBalancesWeight::<Runtime>::withdraw_asset())
 	}
 	fn reserve_asset_deposited(assets: &MultiAssets) -> Weight {
-		// Westend doesn't support ReserveAssetDeposited, so this benchmark has a default weight
 		assets.weigh_multi_assets(XcmBalancesWeight::<Runtime>::reserve_asset_deposited())
 	}
 	fn receive_teleported_asset(assets: &MultiAssets) -> Weight {
diff --git a/polkadot/runtime/westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/polkadot/runtime/westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs
index b92749bfa15b3..87e63fbe31070 100644
--- a/polkadot/runtime/westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs
+++ b/polkadot/runtime/westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs
@@ -17,10 +17,10 @@
 //! Autogenerated weights for `pallet_xcm_benchmarks::fungible`
 //!
 //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
-//! DATE: 2023-07-17, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! DATE: 2023-09-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `runner-gghbxkbs-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
-//! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 1024
+//! HOSTNAME: `runner-nbnwcyh-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 1024
 
 // Executed Command:
 // target/production/polkadot
@@ -31,12 +31,12 @@
 // --extrinsic=*
 // --wasm-execution=compiled
 // --heap-pages=4096
-// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json
+// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
 // --pallet=pallet_xcm_benchmarks::fungible
 // --chain=westend-dev
-// --header=./file_header.txt
-// --template=./xcm/pallet-xcm-benchmarks/template.hbs
-// --output=./runtime/westend/src/weights/xcm/
+// --header=./polkadot/file_header.txt
+// --template=./polkadot/xcm/pallet-xcm-benchmarks/template.hbs
+// --output=./polkadot/runtime/westend/src/weights/xcm/
 
 #![cfg_attr(rustfmt, rustfmt_skip)]
 #![allow(unused_parens)]
@@ -55,8 +55,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `101`
 		//  Estimated: `3593`
-		// Minimum execution time: 24_887_000 picoseconds.
-		Weight::from_parts(25_361_000, 3593)
+		// Minimum execution time: 24_642_000 picoseconds.
+		Weight::from_parts(24_973_000, 3593)
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
@@ -66,8 +66,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `101`
 		//  Estimated: `6196`
-		// Minimum execution time: 52_408_000 picoseconds.
-		Weight::from_parts(53_387_000, 6196)
+		// Minimum execution time: 50_882_000 picoseconds.
+		Weight::from_parts(51_516_000, 6196)
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
@@ -83,10 +83,10 @@ impl<T: frame_system::Config> WeightInfo<T> {
 	/// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	pub(crate) fn transfer_reserve_asset() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `177`
+		//  Measured:  `210`
 		//  Estimated: `6196`
-		// Minimum execution time: 74_753_000 picoseconds.
-		Weight::from_parts(76_838_000, 6196)
+		// Minimum execution time: 73_923_000 picoseconds.
+		Weight::from_parts(75_454_000, 6196)
 			.saturating_add(T::DbWeight::get().reads(6))
 			.saturating_add(T::DbWeight::get().writes(4))
 	}
@@ -96,8 +96,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 2_000_000_000_000 picoseconds.
-		Weight::from_parts(2_000_000_000_000, 0)
+		// Minimum execution time: 18_446_744_073_709_551_000 picoseconds.
+		Weight::from_parts(18_446_744_073_709_551_000, 0)
 	}
 	/// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0)
 	/// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`)
@@ -109,10 +109,10 @@ impl<T: frame_system::Config> WeightInfo<T> {
 	/// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	pub(crate) fn initiate_reserve_withdraw() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `76`
-		//  Estimated: `3541`
-		// Minimum execution time: 29_272_000 picoseconds.
-		Weight::from_parts(30_061_000, 3541)
+		//  Measured:  `109`
+		//  Estimated: `3574`
+		// Minimum execution time: 29_035_000 picoseconds.
+		Weight::from_parts(30_086_000, 3574)
 			.saturating_add(T::DbWeight::get().reads(4))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
@@ -122,8 +122,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `103`
 		//  Estimated: `3593`
-		// Minimum execution time: 23_112_000 picoseconds.
-		Weight::from_parts(23_705_000, 3593)
+		// Minimum execution time: 22_094_000 picoseconds.
+		Weight::from_parts(22_560_000, 3593)
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
@@ -133,8 +133,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `3593`
-		// Minimum execution time: 26_077_000 picoseconds.
-		Weight::from_parts(26_486_000, 3593)
+		// Minimum execution time: 24_771_000 picoseconds.
+		Weight::from_parts(25_280_000, 3593)
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
@@ -150,10 +150,10 @@ impl<T: frame_system::Config> WeightInfo<T> {
 	/// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	pub(crate) fn deposit_reserve_asset() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `76`
+		//  Measured:  `109`
 		//  Estimated: `3593`
-		// Minimum execution time: 51_022_000 picoseconds.
-		Weight::from_parts(52_498_000, 3593)
+		// Minimum execution time: 49_777_000 picoseconds.
+		Weight::from_parts(50_833_000, 3593)
 			.saturating_add(T::DbWeight::get().reads(5))
 			.saturating_add(T::DbWeight::get().writes(3))
 	}
@@ -169,10 +169,10 @@ impl<T: frame_system::Config> WeightInfo<T> {
 	/// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	pub(crate) fn initiate_teleport() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `76`
+		//  Measured:  `109`
 		//  Estimated: `3593`
-		// Minimum execution time: 53_062_000 picoseconds.
-		Weight::from_parts(54_300_000, 3593)
+		// Minimum execution time: 51_425_000 picoseconds.
+		Weight::from_parts(52_213_000, 3593)
 			.saturating_add(T::DbWeight::get().reads(5))
 			.saturating_add(T::DbWeight::get().writes(3))
 	}
diff --git a/polkadot/scripts/build-demos.sh b/polkadot/scripts/build-demos.sh
deleted file mode 100755
index 285da143c17d8..0000000000000
--- a/polkadot/scripts/build-demos.sh
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/usr/bin/env bash
-
-# This script assumes that all pre-requisites are installed.
-
-set -e
-
-PROJECT_ROOT=`git rev-parse --show-toplevel`
-source `dirname "$0"`/common.sh
-
-export CARGO_INCREMENTAL=0
-
-# Save current directory.
-pushd .
-
-cd $ROOT
-
-for DEMO in "${DEMOS[@]}"
-do
-  echo "*** Building wasm binaries in $DEMO"
-  cd "$PROJECT_ROOT/$DEMO"
-
-  ./build.sh
-
-  cd - >> /dev/null
-done
-
-# Restore initial directory.
-popd
diff --git a/polkadot/scripts/run_all_benches.sh b/polkadot/scripts/run_all_benches.sh
deleted file mode 100755
index 923013f351555..0000000000000
--- a/polkadot/scripts/run_all_benches.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/bin/bash
-
-# Runs all benchmarks for all pallets, for each of the runtimes specified below
-# Should be run on a reference machine to gain accurate benchmarks
-# current reference machine: https://github.com/paritytech/substrate/pull/5848
-
-runtimes=(
-  polkadot
-  kusama
-  westend
-)
-
-for runtime in "${runtimes[@]}"; do
-  "$(dirname "$0")/run_benches_for_runtime.sh" "$runtime"
-done
diff --git a/polkadot/tests/purge_chain_works.rs b/polkadot/tests/purge_chain_works.rs
index 831155fb4d7e1..f5a73e232e0cb 100644
--- a/polkadot/tests/purge_chain_works.rs
+++ b/polkadot/tests/purge_chain_works.rs
@@ -57,7 +57,6 @@ async fn purge_chain_rocksdb_works() {
 		assert!(cmd.wait().unwrap().success());
 		assert!(tmpdir.path().join("chains/rococo_dev").exists());
 		assert!(tmpdir.path().join("chains/rococo_dev/db/full").exists());
-		assert!(tmpdir.path().join("chains/rococo_dev/db/full/parachains").exists());
 
 		// Purge chain
 		let status = Command::new(cargo_bin("polkadot"))
@@ -102,7 +101,6 @@ async fn purge_chain_paritydb_works() {
 		assert!(cmd.wait().unwrap().success());
 		assert!(tmpdir.path().join("chains/rococo_dev").exists());
 		assert!(tmpdir.path().join("chains/rococo_dev/paritydb/full").exists());
-		assert!(tmpdir.path().join("chains/rococo_dev/paritydb/parachains").exists());
 
 		// Purge chain
 		let status = Command::new(cargo_bin("polkadot"))
@@ -118,8 +116,6 @@ async fn purge_chain_paritydb_works() {
 		// Make sure that the chain folder exists, but `db/full` is deleted.
 		assert!(tmpdir.path().join("chains/rococo_dev").exists());
 		assert!(!tmpdir.path().join("chains/rococo_dev/paritydb/full").exists());
-		// Parachains removal requires calling "purge-chain --parachains".
-		assert!(tmpdir.path().join("chains/rococo_dev/paritydb/parachains").exists());
 	})
 	.await;
 }
diff --git a/polkadot/utils/generate-bags/Cargo.toml b/polkadot/utils/generate-bags/Cargo.toml
index 873b0c0030ade..95ca57ea728e1 100644
--- a/polkadot/utils/generate-bags/Cargo.toml
+++ b/polkadot/utils/generate-bags/Cargo.toml
@@ -6,7 +6,7 @@ edition.workspace = true
 license.workspace = true
 
 [dependencies]
-clap = { version = "4.4.4", features = ["derive"] }
+clap = { version = "4.4.6", features = ["derive"] }
 
 generate-bags = { path = "../../../substrate/utils/frame/generate-bags" }
 sp-io = { path = "../../../substrate/primitives/io" }
diff --git a/polkadot/utils/remote-ext-tests/bags-list/Cargo.toml b/polkadot/utils/remote-ext-tests/bags-list/Cargo.toml
index c59d354dc4a96..e305edc039b5a 100644
--- a/polkadot/utils/remote-ext-tests/bags-list/Cargo.toml
+++ b/polkadot/utils/remote-ext-tests/bags-list/Cargo.toml
@@ -15,6 +15,6 @@ sp-tracing = { path = "../../../../substrate/primitives/tracing" }
 frame-system = { path = "../../../../substrate/frame/system" }
 sp-core = { path = "../../../../substrate/primitives/core" }
 
-clap = { version = "4.4.4", features = ["derive"] }
+clap = { version = "4.4.6", features = ["derive"] }
 log = "0.4.17"
 tokio = { version = "1.24.2", features = ["macros"] }
diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/benchmarking.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/benchmarking.rs
index 504b795403991..760fa33b693e5 100644
--- a/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/benchmarking.rs
+++ b/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/benchmarking.rs
@@ -20,6 +20,7 @@ use frame_benchmarking::{benchmarks_instance_pallet, BenchmarkError, BenchmarkRe
 use frame_support::{
 	pallet_prelude::Get,
 	traits::fungible::{Inspect, Mutate},
+	weights::Weight,
 };
 use sp_runtime::traits::{Bounded, Zero};
 use sp_std::{prelude::*, vec};
@@ -134,7 +135,7 @@ benchmarks_instance_pallet! {
 	reserve_asset_deposited {
 		let (trusted_reserve, transferable_reserve_asset) = T::TrustedReserve::get()
 			.ok_or(BenchmarkError::Override(
-				BenchmarkResult::from_weight(T::BlockWeights::get().max_block)
+				BenchmarkResult::from_weight(Weight::MAX)
 			))?;
 
 		let assets: MultiAssets = vec![ transferable_reserve_asset ].into();
@@ -187,7 +188,7 @@ benchmarks_instance_pallet! {
 	}: {
 		executor.bench_process(xcm).map_err(|_| {
 			BenchmarkError::Override(
-				BenchmarkResult::from_weight(T::BlockWeights::get().max_block)
+				BenchmarkResult::from_weight(Weight::MAX)
 			)
 		})?;
 	} verify {
diff --git a/polkadot/xcm/procedural/Cargo.toml b/polkadot/xcm/procedural/Cargo.toml
index 1ff73c64780e6..56df0d94f5860 100644
--- a/polkadot/xcm/procedural/Cargo.toml
+++ b/polkadot/xcm/procedural/Cargo.toml
@@ -11,5 +11,5 @@ proc-macro = true
 [dependencies]
 proc-macro2 = "1.0.56"
 quote = "1.0.28"
-syn = "2.0.37"
+syn = "2.0.38"
 Inflector = "0.11.4"
diff --git a/prdoc/pr_1818.prdoc b/prdoc/pr_1818.prdoc
new file mode 100644
index 0000000000000..cbafa02f9af56
--- /dev/null
+++ b/prdoc/pr_1818.prdoc
@@ -0,0 +1,16 @@
+title: FRAME pallets warning for unchecked weight witness
+
+doc:
+  - audience: Core Dev
+    description: |
+      FRAME pallets now emit a warning when a call uses a function argument that starts with an underscore in its weight declaration.
+
+migrations:
+  db: [ ]
+  runtime: [ ]
+
+host_functions: []
+
+crates:
+- name: "frame-support-procedural"
+  semver: minor
diff --git a/substrate/.gitattributes b/substrate/.gitattributes
index a77c52fccdb77..4cb3ef4972feb 100644
--- a/substrate/.gitattributes
+++ b/substrate/.gitattributes
@@ -1,4 +1,2 @@
 Cargo.lock linguist-generated=true
-/.gitlab-ci.yml filter=ci-prettier
-/scripts/ci/gitlab/pipeline/*.yml filter=ci-prettier
 frame/**/src/weights.rs linguist-generated=true
diff --git a/substrate/.github/dependabot.yml b/substrate/.github/dependabot.yml
deleted file mode 100644
index 04cf0d1e1a5a4..0000000000000
--- a/substrate/.github/dependabot.yml
+++ /dev/null
@@ -1,12 +0,0 @@
-version: 2
-updates:
-  - package-ecosystem: "cargo"
-    directory: "/"
-    labels: ["A2-insubstantial", "B0-silent", "C1-low", "E2-dependencies"]
-    schedule:
-      interval: "daily"
-  - package-ecosystem: github-actions
-    directory: '/'
-    labels: ["A2-insubstantial", "B0-silent", "C1-low", "E2-dependencies"]
-    schedule:
-      interval: daily
diff --git a/substrate/.github/pr-custom-review.yml b/substrate/.github/pr-custom-review.yml
deleted file mode 100644
index 059f4a283af07..0000000000000
--- a/substrate/.github/pr-custom-review.yml
+++ /dev/null
@@ -1,39 +0,0 @@
-# 🔒 PROTECTED: Changes to locks-review-team should be approved by the current locks-review-team
-locks-review-team: locks-review
-team-leads-team: polkadot-review
-action-review-team: ci
-
-rules:
-  - name: Core developers
-    check_type: changed_files
-    condition:
-      include: .*
-      # excluding files from 'CI team' and 'FRAME coders' rules
-      exclude: ^\.gitlab-ci\.yml|^scripts/ci/.*|^\.github/.*|^\.config/nextest.toml|^frame/(?!.*(nfts/.*|uniques/.*|babe/.*|grandpa/.*|beefy|merkle-mountain-range/.*|contracts/.*|election|nomination-pools/.*|staking/.*|aura/.*))
-    min_approvals: 2
-    teams:
-      - core-devs
-
-  - name: FRAME coders
-    check_type: changed_files
-    condition:
-      include: ^frame/(?!.*(nfts/.*|uniques/.*|babe/.*|grandpa/.*|beefy|merkle-mountain-range/.*|contracts/.*|election|nomination-pools/.*|staking/.*|aura/.*))
-    all:
-      - min_approvals: 2
-        teams:
-          - core-devs
-      - min_approvals: 1
-        teams:
-          - frame-coders
-
-  - name: CI team
-    check_type: changed_files
-    condition:
-      include: ^\.gitlab-ci\.yml|^scripts/ci/.*|^\.github/.*|^\.config/nextest.toml
-    min_approvals: 2
-    teams:
-      - ci
-
-prevent-review-request:
-  teams:
-    - core-devs
diff --git a/substrate/.github/stale.yml b/substrate/.github/stale.yml
deleted file mode 100644
index 61d0fd0228d97..0000000000000
--- a/substrate/.github/stale.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-# Number of days of inactivity before an issue becomes stale
-daysUntilStale: 30
-# Number of days of inactivity before a stale issue is closed
-daysUntilClose: 14
-# Issues with these labels will never be considered stale
-exemptLabels:
-  - "D9-needsaudit 👮"
-# Label to use when marking an issue as stale
-staleLabel: "A3-stale"
-# we only bother with pull requests
-only: pulls
-# Comment to post when marking an issue as stale. Set to `false` to disable
-markComment: >
-  Hey, is anyone still working on this? Due to the inactivity this issue has
-  been automatically marked as stale. It will be closed if no further activity
-  occurs. Thank you for your contributions.
-# Comment to post when closing a stale issue. Set to `false` to disable
-closeComment: false
diff --git a/substrate/.github/workflows/auto-label-issues.yml b/substrate/.github/workflows/auto-label-issues.yml
deleted file mode 100644
index 12ffce702cdcc..0000000000000
--- a/substrate/.github/workflows/auto-label-issues.yml
+++ /dev/null
@@ -1,17 +0,0 @@
-# If the author of the issues is not a contributor to the project, label
-# the issue with 'Z0-unconfirmed'
-
-name: Label New Issues
-on:
-  issues:
-    types: [opened]
-
-jobs:
-  label-new-issues:
-    runs-on: ubuntu-latest
-    steps:
-      - name: Label drafts
-        uses: andymckay/labeler@e6c4322d0397f3240f0e7e30a33b5c5df2d39e90 # 1.0.4
-        if: github.event.issue.author_association == 'NONE'
-        with:
-          add-labels: "I10-unconfirmed"
diff --git a/substrate/.github/workflows/burnin-label-notification.yml b/substrate/.github/workflows/burnin-label-notification.yml
deleted file mode 100644
index f45455d31db1e..0000000000000
--- a/substrate/.github/workflows/burnin-label-notification.yml
+++ /dev/null
@@ -1,24 +0,0 @@
-name: Notify devops when burn-in label applied
-on:
-  pull_request:
-    types: [labeled]
-
-jobs:
-  notify-devops:
-    runs-on: ubuntu-latest
-    strategy:
-      matrix:
-        channel:
-          - name: 'Team: DevOps'
-            room: '!lUslSijLMgNcEKcAiE:parity.io'
-
-    steps:
-      - name: Notify devops
-        if: startsWith(github.event.label.name, 'A1-')
-        uses: s3krit/matrix-message-action@70ad3fb812ee0e45ff8999d6af11cafad11a6ecf # v0.0.3
-        with:
-          room_id: ${{ matrix.channel.room }}
-          access_token: ${{ secrets.RELEASENOTES_MATRIX_V2_ACCESS_TOKEN }}
-          server: "m.parity.io"
-          message: |
-            @room Burn-in request received for [${{ github.event.pull_request.title }}](${{ github.event.pull_request.html_url }})
diff --git a/substrate/.github/workflows/check-D-labels.yml b/substrate/.github/workflows/check-D-labels.yml
deleted file mode 100644
index 7bb358ce1182e..0000000000000
--- a/substrate/.github/workflows/check-D-labels.yml
+++ /dev/null
@@ -1,48 +0,0 @@
-name: Check D labels
-
-on:
-  pull_request:
-    types: [labeled, opened, synchronize, unlabeled]
-    paths:
-      - frame/**
-      - primitives/**
-
-env:
-  IMAGE: paritytech/ruled_labels:0.4.0
-
-jobs:
-  check-labels:
-    runs-on: ubuntu-latest
-    steps:
-      - name: Pull image
-        run: docker pull $IMAGE
-
-      - name: Check labels
-        env:
-          MOUNT: /work
-          GITHUB_PR: ${{ github.event.pull_request.number }}
-          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
-          API_BASE: https://api.github.com/repos
-          REPO: ${{ github.repository }}
-          RULES_PATH: labels/ruled_labels
-          CHECK_SPECS: specs_substrate.yaml
-        run: |
-          echo "REPO: ${REPO}"
-          echo "GITHUB_PR: ${GITHUB_PR}"
-          # Clone repo with labels specs
-          git clone https://github.com/paritytech/labels
-          # Fetch the labels for the PR under test
-          labels=$( curl -H "Authorization: token ${GITHUB_TOKEN}" -s "$API_BASE/${REPO}/pulls/${GITHUB_PR}" | jq '.labels | .[] | .name' | tr "\n" ",")
-
-          if [ -z "${labels}" ]; then
-            docker run --rm -i -v $PWD/${RULES_PATH}/:$MOUNT $IMAGE check $MOUNT/$CHECK_SPECS --tags audit --no-label
-          fi
-
-          labels_args=${labels: :-1}
-          printf "Checking labels: %s\n" "${labels_args}"
-
-          # Prevent the shell from splitting labels with spaces
-          IFS=","
-
-          # --dev is more useful to debug mode to debug
-          docker run --rm -i -v $PWD/${RULES_PATH}/:$MOUNT $IMAGE check $MOUNT/$CHECK_SPECS --labels ${labels_args} --dev --tags audit
diff --git a/substrate/.github/workflows/check-labels.yml b/substrate/.github/workflows/check-labels.yml
deleted file mode 100644
index 55b8f7389fa7f..0000000000000
--- a/substrate/.github/workflows/check-labels.yml
+++ /dev/null
@@ -1,45 +0,0 @@
-name: Check labels
-
-on:
-  pull_request:
-    types: [labeled, opened, synchronize, unlabeled]
-
-env:
-  IMAGE: paritytech/ruled_labels:0.4.0
-
-jobs:
-  check-labels:
-    runs-on: ubuntu-latest
-    steps:
-      - name: Pull image
-        run: docker pull $IMAGE
-
-      - name: Check labels
-        env:
-          MOUNT: /work
-          GITHUB_PR: ${{ github.event.pull_request.number }}
-          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
-          API_BASE: https://api.github.com/repos
-          REPO: ${{ github.repository }}
-          RULES_PATH: labels/ruled_labels
-          CHECK_SPECS: specs_substrate.yaml
-        run: |
-          echo "REPO: ${REPO}"
-          echo "GITHUB_PR: ${GITHUB_PR}"
-          # Clone repo with labels specs
-          git clone https://github.com/paritytech/labels
-          # Fetch the labels for the PR under test
-          labels=$( curl -H "Authorization: token ${GITHUB_TOKEN}" -s "$API_BASE/${REPO}/pulls/${GITHUB_PR}" | jq '.labels | .[] | .name' | tr "\n" ",")
-
-          if [ -z "${labels}" ]; then
-            docker run --rm -i -v $PWD/${RULES_PATH}/:$MOUNT $IMAGE check $MOUNT/$CHECK_SPECS --tags PR --no-label
-          fi
-
-          labels_args=${labels: :-1}
-          printf "Checking labels: %s\n" "${labels_args}"
-
-          # Prevent the shell from splitting labels with spaces
-          IFS=","
-
-          # --dev is more useful to debug mode to debug
-          docker run --rm -i -v $PWD/${RULES_PATH}/:$MOUNT $IMAGE check $MOUNT/$CHECK_SPECS --labels ${labels_args} --dev --tags PR
diff --git a/substrate/.github/workflows/md-link-check.yml b/substrate/.github/workflows/md-link-check.yml
deleted file mode 100644
index e1387f6da13f7..0000000000000
--- a/substrate/.github/workflows/md-link-check.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-name: Check Links
-
-on:
-  pull_request:
-    branches:
-    - master
-  push:
-    branches:
-    - master
-
-jobs:
-  markdown-link-check:
-    runs-on: ubuntu-latest
-    steps:
-    - uses: actions/checkout@v3
-    - uses: gaurav-nelson/github-action-markdown-link-check@0a51127e9955b855a9bbfa1ff5577f1d1338c9a5 # 1.0.14
-      with:
-        use-quiet-mode: 'yes'
-        config-file: '.github/workflows/mlc_config.json'
diff --git a/substrate/.github/workflows/mlc_config.json b/substrate/.github/workflows/mlc_config.json
deleted file mode 100644
index e7e620b39e0a9..0000000000000
--- a/substrate/.github/workflows/mlc_config.json
+++ /dev/null
@@ -1,7 +0,0 @@
-{
-    "ignorePatterns": [
-        {
-            "pattern": "^https://crates.io",
-        }
-    ]
-}
diff --git a/substrate/.github/workflows/monthly-tag.yml b/substrate/.github/workflows/monthly-tag.yml
deleted file mode 100644
index 055207d85a4dd..0000000000000
--- a/substrate/.github/workflows/monthly-tag.yml
+++ /dev/null
@@ -1,43 +0,0 @@
-name: Monthly Snapshot Tag
-
-on:
-  schedule:
-    - cron: "0 1 1 * *"
-  workflow_dispatch:
-
-jobs:
-  build:
-    name: Take Snapshot
-    runs-on: ubuntu-latest
-    steps:
-      - name: Get the tags by date
-        id: tags
-        run: |
-          echo "new=$(date +'monthly-%Y-%m')" >> $GITHUB_OUTPUT
-          echo "old=$(date -d'1 month ago' +'monthly-%Y-%m')" >> $GITHUB_OUTPUT
-      - name: Checkout branch "master"
-        uses: actions/checkout@v3
-        with:
-          ref: 'master'
-          fetch-depth: 0
-      - name: Generate changelog
-        id: changelog
-        env:
-          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
-        run: |
-          echo "# Automatic snapshot pre-release ${{ steps.tags.outputs.new }}" > Changelog.md
-          echo "" >> Changelog.md
-          echo "## Changes since last snapshot (${{ steps.tags.outputs.old }})" >> Changelog.md
-          echo "" >> Changelog.md
-          ./scripts/ci/github/generate_changelog.sh ${{ steps.tags.outputs.old }} >>  Changelog.md
-      - name: Release snapshot
-        id: release-snapshot
-        uses: actions/create-release@0cb9c9b65d5d1901c1f53e5e66eaf4afd303e70e # v1.1.4 latest version, repo archived
-        env:
-          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
-        with:
-          tag_name: ${{ steps.tags.outputs.new }}
-          release_name: ${{ steps.tags.outputs.new }}
-          draft: false
-          prerelease: true
-          body_path: Changelog.md
diff --git a/substrate/.github/workflows/pr-custom-review.yml b/substrate/.github/workflows/pr-custom-review.yml
deleted file mode 100644
index 8e40c9ee72989..0000000000000
--- a/substrate/.github/workflows/pr-custom-review.yml
+++ /dev/null
@@ -1,42 +0,0 @@
-name: Assign reviewers
-
-on:
-  pull_request:
-    branches:
-      - master
-      - main
-    types:
-      - opened
-      - reopened
-      - synchronize
-      - review_requested
-      - review_request_removed
-      - ready_for_review
-      - converted_to_draft
-  pull_request_review:
-
-jobs:
-  pr-custom-review:
-    runs-on: ubuntu-latest
-    steps:
-      - name: Skip if pull request is in Draft
-        # `if: github.event.pull_request.draft == true` should be kept here, at
-        # the step level, rather than at the job level. The latter is not
-        # recommended because when the PR is moved from "Draft" to "Ready to
-        # review" the workflow will immediately be passing (since it was skipped),
-        # even though it hasn't actually ran, since it takes a few seconds for
-        # the workflow to start. This is also disclosed in:
-        # https://github.community/t/dont-run-actions-on-draft-pull-requests/16817/17
-        # That scenario would open an opportunity for the check to be bypassed:
-        # 1. Get your PR approved
-        # 2. Move it to Draft
-        # 3. Push whatever commits you want
-        # 4. Move it to "Ready for review"; now the workflow is passing (it was
-        #    skipped) and "Check reviews" is also passing (it won't be updated
-        #    until the workflow is finished)
-        if: github.event.pull_request.draft == true
-        run: exit 1
-      - name: pr-custom-review
-        uses: paritytech/pr-custom-review@action-v3
-        with:
-          checks-reviews-api: http://pcr.parity-prod.parity.io/api/v1/check_reviews
diff --git a/substrate/.github/workflows/release-bot.yml b/substrate/.github/workflows/release-bot.yml
deleted file mode 100644
index 05bea32abc697..0000000000000
--- a/substrate/.github/workflows/release-bot.yml
+++ /dev/null
@@ -1,31 +0,0 @@
-name: Pushes release updates to a pre-defined Matrix room
-on:
-  release:
-    types:
-      - edited
-      - prereleased
-      - published
-jobs:
-  ping_matrix:
-    runs-on: ubuntu-latest
-    strategy:
-      matrix:
-        channel:
-          - name: 'General: Rust, Polkadot, Substrate'
-            room: '!aJymqQYtCjjqImFLSb:parity.io'
-            pre-release: false
-
-    steps:
-      - name: send message
-        uses: s3krit/matrix-message-action@70ad3fb812ee0e45ff8999d6af11cafad11a6ecf # v0.0.3
-        with:
-          room_id: ${{ matrix.channel.room }}
-          access_token: ${{ secrets.RELEASENOTES_MATRIX_V2_ACCESS_TOKEN }}
-          server: "m.parity.io"
-          message: |
-            ***${{github.event.repository.full_name}}:*** A release has been ${{github.event.action}}<br/>
-            Release version [${{github.event.release.tag_name}}](${{github.event.release.html_url}})
-
-            -----
-
-            ${{github.event.release.body}}<br/>
diff --git a/substrate/.github/workflows/release-tagging.yml b/substrate/.github/workflows/release-tagging.yml
deleted file mode 100644
index 1862582f40eba..0000000000000
--- a/substrate/.github/workflows/release-tagging.yml
+++ /dev/null
@@ -1,20 +0,0 @@
-# Github action to ensure the `release` tag always tracks latest release
-
-name: Retag release
-
-on:
-  release:
-    types: [ published ]
-
-jobs:
-  build:
-    runs-on: ubuntu-latest
-
-    steps:
-      - name: Set Git tag
-        uses: s3krit/walking-tag-action@d04f7a53b72ceda4e20283736ce3627011275178 # latest version from master
-        with:
-          tag-name: release
-          tag-message: Latest release
-        env:
-          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/substrate/.gitlab-ci.yml b/substrate/.gitlab-ci.yml
deleted file mode 100644
index f00836528973e..0000000000000
--- a/substrate/.gitlab-ci.yml
+++ /dev/null
@@ -1,412 +0,0 @@
-# .gitlab-ci.yml
-#
-# substrate
-#
-# pipelines can be triggered manually in the web
-#
-# Currently the file is divided into subfiles. Each stage has a different file which
-# can be found here: scripts/ci/gitlab/pipeline/<stage_name>.yml
-#
-# Instead of YAML anchors "extends" is used.
-# Useful links:
-#    https://docs.gitlab.com/ee/ci/yaml/index.html#extends
-#    https://docs.gitlab.com/ee/ci/yaml/yaml_optimization.html#reference-tags
-#
-# SAMPLE JOB TEMPLATE - This is not a complete example but is enough to build a
-# simple CI job. For full documentation, visit https://docs.gitlab.com/ee/ci/yaml/
-#
-# my-example-job:
-#   stage:                           test # One of the stages listed below this job (required)
-#   image:                           paritytech/tools:latest # Any docker image (required)
-#   allow_failure:                   true # Allow the pipeline to continue if this job fails (default: false)
-#   needs:
-#     - job:                         test-linux # Any jobs that are required to run before this job (optional)
-#   variables:
-#     MY_ENVIRONMENT_VARIABLE:       "some useful value" # Environment variables passed to the job (optional)
-#   script:
-#     - echo "List of shell commands to run in your job"
-#     - echo "You can also just specify a script here, like so:"
-#     - ./scripts/ci/gitlab/my_amazing_script.sh
-
-stages:
-  - check
-  - test
-  - build
-  - publish
-  - notify
-  - zombienet
-  - deploy
-
-workflow:
-  rules:
-    - if: $CI_COMMIT_TAG
-    - if: $CI_COMMIT_BRANCH
-
-variables:
-  GIT_STRATEGY: fetch
-  GIT_DEPTH: 100
-  CARGO_INCREMENTAL: 0
-  DOCKER_OS: "debian:bullseye"
-  ARCH: "x86_64"
-  CI_IMAGE: !reference [.ci-unified, variables, CI_IMAGE]
-  BUILDAH_IMAGE: "quay.io/buildah/stable:v1.29"
-  BUILDAH_COMMAND: "buildah --storage-driver overlay2"
-  RELENG_SCRIPTS_BRANCH: "master"
-
-  RUSTY_CACHIER_SINGLE_BRANCH: master
-  RUSTY_CACHIER_DONT_OPERATE_ON_MAIN_BRANCH: "true"
-  RUSTY_CACHIER_MINIO_ALIAS: rustycachier_gcs
-  RUSTY_CACHIER_MINIO_BUCKET: parity-build-rusty-cachier
-  RUSTY_CACHIER_COMPRESSION_METHOD: zstd
-
-  NEXTEST_FAILURE_OUTPUT: immediate-final
-  NEXTEST_SUCCESS_OUTPUT: final
-  ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.55"
-
-default:
-  retry:
-    max: 2
-    when:
-      - runner_system_failure
-      - unknown_failure
-      - api_failure
-  cache: {}
-  interruptible: true
-
-.collect-artifacts:
-  artifacts:
-    name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}"
-    when: on_success
-    expire_in: 7 days
-    paths:
-      - artifacts/
-
-.collect-artifacts-short:
-  artifacts:
-    name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}"
-    when: on_success
-    expire_in: 3 hours
-    paths:
-      - artifacts/
-
-.prepare-env:
-  before_script:
-    # TODO: remove unset invocation when we'll be free from 'ENV RUSTC_WRAPPER=sccache' & sccache
-    # itself in all images
-    - unset RUSTC_WRAPPER
-    # $WASM_BUILD_WORKSPACE_HINT enables wasm-builder to find the Cargo.lock from within generated
-    # packages
-    - export WASM_BUILD_WORKSPACE_HINT="$PWD"
-    # ensure that RUSTFLAGS are set correctly
-    - echo $RUSTFLAGS
-
-.job-switcher:
-  before_script:
-    - if echo "$CI_DISABLED_JOBS" | grep -xF "$CI_JOB_NAME"; then echo "The job has been cancelled in CI settings"; exit 0; fi
-
-.kubernetes-env:
-  image: "${CI_IMAGE}"
-  before_script:
-    - !reference [.timestamp, before_script]
-    - !reference [.job-switcher, before_script]
-    - !reference [.prepare-env, before_script]
-  tags:
-    - kubernetes-parity-build
-
-.rust-info-script:
-  script:
-    - rustup show
-    - cargo --version
-    - rustup +nightly show
-    - cargo +nightly --version
-
-.pipeline-stopper-vars:
-  script:
-    - !reference [.job-switcher, before_script]
-    - echo "Collecting env variables for the cancel-pipeline job"
-    - echo "FAILED_JOB_URL=${CI_JOB_URL}" > pipeline-stopper.env
-    - echo "FAILED_JOB_NAME=${CI_JOB_NAME}" >> pipeline-stopper.env
-    - echo "PR_NUM=${CI_COMMIT_REF_NAME}" >> pipeline-stopper.env
-
-.pipeline-stopper-artifacts:
-  artifacts:
-    reports:
-      dotenv: pipeline-stopper.env
-
-.docker-env:
-  image: "${CI_IMAGE}"
-  before_script:
-    - !reference [.timestamp, before_script]
-    - !reference [.job-switcher, before_script]
-    - !reference [.prepare-env, before_script]
-    - !reference [.rust-info-script, script]
-    - !reference [.rusty-cachier, before_script]
-    - !reference [.pipeline-stopper-vars, script]
-  after_script:
-    - !reference [.rusty-cachier, after_script]
-  tags:
-    - linux-docker-vm-c2
-
-# rusty-cachier's hidden job. Parts of this job are used to instrument the pipeline's other real jobs with rusty-cachier
-# Description of the commands is available here - https://gitlab.parity.io/parity/infrastructure/ci_cd/rusty-cachier/client#description
-.rusty-cachier:
-  before_script:
-    - curl -s https://gitlab-ci-token:${CI_JOB_TOKEN}@gitlab.parity.io/parity/infrastructure/ci_cd/rusty-cachier/client/-/raw/release/util/install.sh | bash
-    - rusty-cachier environment check --gracefully
-    - $(rusty-cachier environment inject)
-    - rusty-cachier project mtime
-  after_script:
-    - env RUSTY_CACHIER_SUPRESS_OUTPUT=true rusty-cachier snapshot destroy
-
-.test-refs:
-  rules:
-    - if: $CI_PIPELINE_SOURCE == "web"
-    - if: $CI_PIPELINE_SOURCE == "schedule"
-    - if: $CI_COMMIT_REF_NAME == "master"
-    - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
-    - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1
-
-# handle the specific case where benches could store incorrect bench data because of the downstream staging runs
-# exclude cargo-check-benches from such runs
-.test-refs-check-benches:
-  rules:
-    - if: $CI_COMMIT_REF_NAME == "master" && $CI_PIPELINE_SOURCE == "pipeline"  && $CI_IMAGE =~ /staging$/
-      when: never
-    - if: $CI_PIPELINE_SOURCE == "web"
-    - if: $CI_PIPELINE_SOURCE == "schedule"
-    - if: $CI_COMMIT_REF_NAME == "master"
-    - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
-    - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1
-
-.test-refs-no-trigger:
-  rules:
-    - if: $CI_PIPELINE_SOURCE == "pipeline"
-      when: never
-    - if: $CI_PIPELINE_SOURCE == "web"
-    - if: $CI_PIPELINE_SOURCE == "schedule"
-    - if: $CI_COMMIT_REF_NAME == "master"
-    - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
-    - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1
-    - if: $CI_COMMIT_REF_NAME =~ /^ci-release-.*$/
-
-.test-refs-no-trigger-prs-only:
-  rules:
-    - if: $CI_PIPELINE_SOURCE == "pipeline"
-      when: never
-    - if: $CI_PIPELINE_SOURCE == "web"
-    - if: $CI_PIPELINE_SOURCE == "schedule"
-    - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
-
-.publish-refs:
-  rules:
-    - if: $CI_PIPELINE_SOURCE == "pipeline"
-      when: never
-    - if: $CI_PIPELINE_SOURCE == "web"
-    - if: $CI_PIPELINE_SOURCE == "schedule"
-    - if: $CI_COMMIT_REF_NAME == "master"
-    - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1
-
-.build-refs:
-  # publish-refs + PRs
-  rules:
-    - if: $CI_PIPELINE_SOURCE == "pipeline"
-      when: never
-    - if: $CI_PIPELINE_SOURCE == "web"
-    - if: $CI_PIPELINE_SOURCE == "schedule"
-    - if: $CI_COMMIT_REF_NAME == "master"
-    - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1
-    - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
-
-.zombienet-refs:
-  extends: .build-refs
-
-.crates-publishing-variables:
-  variables:
-    CRATESIO_CRATES_OWNER: parity-crate-owner
-    REPO: substrate
-    REPO_OWNER: paritytech
-
-.crates-publishing-pipeline:
-  extends: .crates-publishing-variables
-  rules:
-    - if: $CI_PIPELINE_SOURCE == "schedule" && $CI_COMMIT_REF_NAME == "master" && $PIPELINE == "automatic-crate-publishing"
-
-.crates-publishing-template:
-  extends:
-    - .docker-env
-    - .crates-publishing-variables
-  # collect artifacts even on failure so that we know how the crates were generated (they'll be
-  # generated to the artifacts folder according to SPUB_TMP further down)
-  artifacts:
-    name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}"
-    when: always
-    expire_in: 7 days
-    paths:
-      - artifacts/
-  variables:
-    SPUB_TMP: artifacts
-    # disable timestamping for the crate publishing jobs, they leave stray child processes behind
-    # which don't interact well with the timestamping script
-    CI_DISABLE_TIMESTAMP: 1
-
-#### stage:                       .pre
-
-check-crates-publishing-pipeline:
-  stage: .pre
-  extends:
-    - .kubernetes-env
-    - .crates-publishing-pipeline
-  script:
-    - git clone
-      --depth 1
-      --branch "$RELENG_SCRIPTS_BRANCH"
-      https://github.com/paritytech/releng-scripts.git
-    - ONLY_CHECK_PIPELINE=true ./releng-scripts/publish-crates
-
-# By default our pipelines are interruptible, but some special pipelines shouldn't be interrupted:
-# * multi-project pipelines such as the ones triggered by the scripts repo
-# * the scheduled automatic-crate-publishing pipeline
-#
-# In those cases, we add an uninterruptible .pre job; once that one has started,
-# the entire pipeline becomes uninterruptible
-uninterruptible-pipeline:
-  extends: .kubernetes-env
-  variables:
-    CI_IMAGE: "paritytech/tools:latest"
-  stage: .pre
-  interruptible: false
-  rules:
-    - if: $CI_PIPELINE_SOURCE == "pipeline"
-    - if: $CI_PIPELINE_SOURCE == "schedule" && $PIPELINE == "automatic-crate-publishing"
-  script: "true"
-
-include:
-  # check jobs
-  - scripts/ci/gitlab/pipeline/check.yml
-  # tests jobs
-  - scripts/ci/gitlab/pipeline/test.yml
-  # build jobs
-  - scripts/ci/gitlab/pipeline/build.yml
-  # publish jobs
-  - scripts/ci/gitlab/pipeline/publish.yml
-  # zombienet jobs
-  - scripts/ci/gitlab/pipeline/zombienet.yml
-  # The crate-publishing pipeline requires a customized `interruptible` configuration. Unfortunately
-  # `interruptible` can't currently be dynamically set based on variables as per:
-  # - https://gitlab.com/gitlab-org/gitlab/-/issues/38349
-  # - https://gitlab.com/gitlab-org/gitlab/-/issues/194023
-  # Thus we work around that limitation by using conditional includes.
-  # For crate-publishing pipelines: run it with defaults + `interruptible: false`. The WHOLE
-  # pipeline is made uninterruptible to ensure that test jobs also get a chance to run to
-  # completion, because the publishing jobs depends on them AS INTENDED: crates should not be
-  # published before their source code is checked.
-  - project: parity/infrastructure/ci_cd/shared
-    ref: main
-    file: /common/timestamp.yml
-  - project: parity/infrastructure/ci_cd/shared
-    ref: main
-    file: /common/ci-unified.yml
-
-
-#### stage:                        notify
-
-# This job notifies rusty-cachier about the latest commit with the cache.
-# This info is later used for the cache distribution and an overlay creation.
-# Note that we don't use any .rusty-cachier references as we assume that a pipeline has reached this stage with working rusty-cachier.
-rusty-cachier-notify:
-  stage: notify
-  extends: .kubernetes-env
-  variables:
-    CI_IMAGE: paritytech/rusty-cachier-env:latest
-    GIT_STRATEGY: none
-  dependencies: []
-  script:
-    - curl -s https://gitlab-ci-token:${CI_JOB_TOKEN}@gitlab.parity.io/parity/infrastructure/ci_cd/rusty-cachier/client/-/raw/release/util/install.sh | bash
-    - rusty-cachier cache notify
-
-#### stage:                        .post
-
-# This job cancels the whole pipeline if any of provided jobs fail.
-# In a DAG, every jobs chain is executed independently of others. The `fail_fast` principle suggests
-# to fail the pipeline as soon as possible to shorten the feedback loop.
-.cancel-pipeline-template:
-  stage: .post
-  rules:
-    - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
-      when: on_failure
-  variables:
-    PROJECT_ID: "${CI_PROJECT_ID}"
-    PROJECT_NAME: "${CI_PROJECT_NAME}"
-    PIPELINE_ID: "${CI_PIPELINE_ID}"
-    FAILED_JOB_URL: "${FAILED_JOB_URL}"
-    FAILED_JOB_NAME: "${FAILED_JOB_NAME}"
-    PR_NUM: "${PR_NUM}"
-  trigger:
-    project: "parity/infrastructure/ci_cd/pipeline-stopper"
-
-remove-cancel-pipeline-message:
-  stage: .post
-  rules:
-    - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
-  variables:
-    PROJECT_ID: "${CI_PROJECT_ID}"
-    PROJECT_NAME: "${CI_PROJECT_NAME}"
-    PIPELINE_ID: "${CI_PIPELINE_ID}"
-    FAILED_JOB_URL: "https://gitlab.com"
-    FAILED_JOB_NAME: "nope"
-    PR_NUM: "${CI_COMMIT_REF_NAME}"
-  trigger:
-    project: "parity/infrastructure/ci_cd/pipeline-stopper"
-    branch: "as-improve"
-
-# need to copy jobs this way because otherwise gitlab will wait
-# for all 3 jobs to finish instead of cancelling if one fails
-cancel-pipeline-test-linux-stable1:
-  extends: .cancel-pipeline-template
-  needs:
-    - job: "test-linux-stable 1/3"
-
-cancel-pipeline-test-linux-stable2:
-  extends: .cancel-pipeline-template
-  needs:
-    - job: "test-linux-stable 2/3"
-
-cancel-pipeline-test-linux-stable3:
-  extends: .cancel-pipeline-template
-  needs:
-    - job: "test-linux-stable 3/3"
-
-cancel-pipeline-cargo-check-benches1:
-  extends: .cancel-pipeline-template
-  needs:
-    - job: "cargo-check-benches 1/2"
-
-cancel-pipeline-cargo-check-benches2:
-  extends: .cancel-pipeline-template
-  needs:
-    - job: "cargo-check-benches 2/2"
-
-cancel-pipeline-test-linux-stable-int:
-  extends: .cancel-pipeline-template
-  needs:
-    - job: test-linux-stable-int
-
-cancel-pipeline-cargo-check-each-crate-1:
-  extends: .cancel-pipeline-template
-  needs:
-    - job: "cargo-check-each-crate 1/2"
-
-cancel-pipeline-cargo-check-each-crate-2:
-  extends: .cancel-pipeline-template
-  needs:
-    - job: "cargo-check-each-crate 2/2"
-
-cancel-pipeline-cargo-check-each-crate-macos:
-  extends: .cancel-pipeline-template
-  needs:
-    - job: cargo-check-each-crate-macos
-
-cancel-pipeline-check-tracing:
-  extends: .cancel-pipeline-template
-  needs:
-    - job: check-tracing
diff --git a/substrate/Cargo.toml b/substrate/Cargo.toml
new file mode 100644
index 0000000000000..d77f02c606031
--- /dev/null
+++ b/substrate/Cargo.toml
@@ -0,0 +1,28 @@
+[package]
+name = "substrate"
+description = "Next-generation framework for blockchain innovation"
+license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
+homepage = "https://substrate.io"
+repository.workspace = true
+authors.workspace = true
+edition.workspace = true
+version = "1.0.0"
+
+# The dependencies are only needed for docs.
+[dependencies]
+aquamarine = "0.3.2"
+
+subkey = { path = "bin/utils/subkey" }
+chain-spec-builder = { path = "bin/utils/chain-spec-builder" }
+
+sc-service = { path = "client/service" }
+sc-cli = { path = "client/cli" }
+sc-consensus-aura = { path = "client/consensus/aura" }
+sc-consensus-babe = { path = "client/consensus/babe" }
+sc-consensus-grandpa = { path = "client/consensus/grandpa" }
+sc-consensus-beefy = { path = "client/consensus/beefy" }
+sc-consensus-manual-seal = { path = "client/consensus/manual-seal" }
+sc-consensus-pow = { path = "client/consensus/pow" }
+
+sp-runtime = { path = "primitives/runtime" }
+frame-support = { path = "frame/support" }
diff --git a/substrate/bin/node-template/node/Cargo.toml b/substrate/bin/node-template/node/Cargo.toml
index 35654e7d56499..23840cce2229b 100644
--- a/substrate/bin/node-template/node/Cargo.toml
+++ b/substrate/bin/node-template/node/Cargo.toml
@@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"]
 name = "node-template"
 
 [dependencies]
-clap = { version = "4.4.4", features = ["derive"] }
+clap = { version = "4.4.6", features = ["derive"] }
 futures = { version = "0.3.21", features = ["thread-pool"]}
 
 sc-cli = { path = "../../../client/cli" }
diff --git a/substrate/bin/node/bench/Cargo.toml b/substrate/bin/node/bench/Cargo.toml
index 33560bca4923d..c111d345623d5 100644
--- a/substrate/bin/node/bench/Cargo.toml
+++ b/substrate/bin/node/bench/Cargo.toml
@@ -13,7 +13,7 @@ publish = false
 
 [dependencies]
 array-bytes = "6.1"
-clap = { version = "4.4.4", features = ["derive"] }
+clap = { version = "4.4.6", features = ["derive"] }
 log = "0.4.17"
 node-primitives = { path = "../primitives" }
 node-testing = { path = "../testing" }
diff --git a/substrate/bin/node/cli/Cargo.toml b/substrate/bin/node/cli/Cargo.toml
index c47f8a5c3e52f..49dc39099be03 100644
--- a/substrate/bin/node/cli/Cargo.toml
+++ b/substrate/bin/node/cli/Cargo.toml
@@ -38,7 +38,7 @@ crate-type = ["cdylib", "rlib"]
 [dependencies]
 # third-party dependencies
 array-bytes = "6.1"
-clap = { version = "4.4.4", features = ["derive"], optional = true }
+clap = { version = "4.4.6", features = ["derive"], optional = true }
 codec = { package = "parity-scale-codec", version = "3.6.1" }
 serde = { version = "1.0.188", features = ["derive"] }
 jsonrpsee = { version = "0.16.2", features = ["server"] }
@@ -60,6 +60,7 @@ sp-keystore = { path = "../../../primitives/keystore" }
 sp-consensus = { path = "../../../primitives/consensus/common" }
 sp-transaction-storage-proof = { path = "../../../primitives/transaction-storage-proof" }
 sp-io = { path = "../../../primitives/io" }
+sp-mixnet = { path = "../../../primitives/mixnet" }
 sp-statement-store = { path = "../../../primitives/statement-store" }
 
 # client dependencies
@@ -82,6 +83,7 @@ sc-service = { path = "../../../client/service", default-features = false}
 sc-telemetry = { path = "../../../client/telemetry" }
 sc-executor = { path = "../../../client/executor" }
 sc-authority-discovery = { path = "../../../client/authority-discovery" }
+sc-mixnet = { path = "../../../client/mixnet" }
 sc-sync-state-rpc = { path = "../../../client/sync-state-rpc" }
 sc-sysinfo = { path = "../../../client/sysinfo" }
 sc-storage-monitor = { path = "../../../client/storage-monitor" }
@@ -135,7 +137,7 @@ pallet-timestamp = { path = "../../../frame/timestamp" }
 substrate-cli-test-utils = { path = "../../../test-utils/cli" }
 
 [build-dependencies]
-clap = { version = "4.4.4", optional = true }
+clap = { version = "4.4.6", optional = true }
 clap_complete = { version = "4.0.2", optional = true }
 node-inspect = { path = "../inspect", optional = true}
 frame-benchmarking-cli = { path = "../../../utils/frame/benchmarking-cli", optional = true}
diff --git a/substrate/bin/node/cli/benches/block_production.rs b/substrate/bin/node/cli/benches/block_production.rs
index b877aa7350228..246de8f3e925d 100644
--- a/substrate/bin/node/cli/benches/block_production.rs
+++ b/substrate/bin/node/cli/benches/block_production.rs
@@ -100,7 +100,7 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase {
 		wasm_runtime_overrides: None,
 	};
 
-	node_cli::service::new_full_base(config, false, |_, _| ())
+	node_cli::service::new_full_base(config, None, false, |_, _| ())
 		.expect("creating a full node doesn't fail")
 }
 
diff --git a/substrate/bin/node/cli/benches/transaction_pool.rs b/substrate/bin/node/cli/benches/transaction_pool.rs
index d21edc55bbac3..47f890574151d 100644
--- a/substrate/bin/node/cli/benches/transaction_pool.rs
+++ b/substrate/bin/node/cli/benches/transaction_pool.rs
@@ -96,7 +96,7 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase {
 		wasm_runtime_overrides: None,
 	};
 
-	node_cli::service::new_full_base(config, false, |_, _| ()).expect("Creates node")
+	node_cli::service::new_full_base(config, None, false, |_, _| ()).expect("Creates node")
 }
 
 fn create_accounts(num: usize) -> Vec<sr25519::Pair> {
diff --git a/substrate/bin/node/cli/src/chain_spec.rs b/substrate/bin/node/cli/src/chain_spec.rs
index 51beaad03688a..52b480925aa94 100644
--- a/substrate/bin/node/cli/src/chain_spec.rs
+++ b/substrate/bin/node/cli/src/chain_spec.rs
@@ -33,6 +33,7 @@ use serde::{Deserialize, Serialize};
 use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId;
 use sp_consensus_babe::AuthorityId as BabeId;
 use sp_core::{crypto::UncheckedInto, sr25519, Pair, Public};
+use sp_mixnet::types::AuthorityId as MixnetId;
 use sp_runtime::{
 	traits::{IdentifyAccount, Verify},
 	Perbill,
@@ -72,8 +73,9 @@ fn session_keys(
 	babe: BabeId,
 	im_online: ImOnlineId,
 	authority_discovery: AuthorityDiscoveryId,
+	mixnet: MixnetId,
 ) -> SessionKeys {
-	SessionKeys { grandpa, babe, im_online, authority_discovery }
+	SessionKeys { grandpa, babe, im_online, authority_discovery, mixnet }
 }
 
 fn staging_testnet_config_genesis() -> RuntimeGenesisConfig {
@@ -93,6 +95,7 @@ fn staging_testnet_config_genesis() -> RuntimeGenesisConfig {
 		BabeId,
 		ImOnlineId,
 		AuthorityDiscoveryId,
+		MixnetId,
 	)> = vec![
 		(
 			// 5Fbsd6WXDGiLTxunqeK5BATNiocfCqu9bS1yArVjCgeBLkVy
@@ -111,6 +114,9 @@ fn staging_testnet_config_genesis() -> RuntimeGenesisConfig {
 			// 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8
 			array_bytes::hex2array_unchecked("6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106")
 				.unchecked_into(),
+			// 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8
+			array_bytes::hex2array_unchecked("6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106")
+				.unchecked_into(),
 		),
 		(
 			// 5ERawXCzCWkjVq3xz1W5KGNtVx2VdefvZ62Bw1FEuZW4Vny2
@@ -129,6 +135,9 @@ fn staging_testnet_config_genesis() -> RuntimeGenesisConfig {
 			// 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ
 			array_bytes::hex2array_unchecked("482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e")
 				.unchecked_into(),
+			// 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ
+			array_bytes::hex2array_unchecked("482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e")
+				.unchecked_into(),
 		),
 		(
 			// 5DyVtKWPidondEu8iHZgi6Ffv9yrJJ1NDNLom3X9cTDi98qp
@@ -147,6 +156,9 @@ fn staging_testnet_config_genesis() -> RuntimeGenesisConfig {
 			// 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH
 			array_bytes::hex2array_unchecked("482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a")
 				.unchecked_into(),
+			// 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH
+			array_bytes::hex2array_unchecked("482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a")
+				.unchecked_into(),
 		),
 		(
 			// 5HYZnKWe5FVZQ33ZRJK1rG3WaLMztxWrrNDb1JRwaHHVWyP9
@@ -165,6 +177,9 @@ fn staging_testnet_config_genesis() -> RuntimeGenesisConfig {
 			// 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x
 			array_bytes::hex2array_unchecked("00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378")
 				.unchecked_into(),
+			// 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x
+			array_bytes::hex2array_unchecked("00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378")
+				.unchecked_into(),
 		),
 	];
 
@@ -217,7 +232,7 @@ where
 /// Helper function to generate stash, controller and session key from seed.
 pub fn authority_keys_from_seed(
 	seed: &str,
-) -> (AccountId, AccountId, GrandpaId, BabeId, ImOnlineId, AuthorityDiscoveryId) {
+) -> (AccountId, AccountId, GrandpaId, BabeId, ImOnlineId, AuthorityDiscoveryId, MixnetId) {
 	(
 		get_account_id_from_seed::<sr25519::Public>(&format!("{}//stash", seed)),
 		get_account_id_from_seed::<sr25519::Public>(seed),
@@ -225,6 +240,7 @@ pub fn authority_keys_from_seed(
 		get_from_seed::<BabeId>(seed),
 		get_from_seed::<ImOnlineId>(seed),
 		get_from_seed::<AuthorityDiscoveryId>(seed),
+		get_from_seed::<MixnetId>(seed),
 	)
 }
 
@@ -237,6 +253,7 @@ pub fn testnet_genesis(
 		BabeId,
 		ImOnlineId,
 		AuthorityDiscoveryId,
+		MixnetId,
 	)>,
 	initial_nominators: Vec<AccountId>,
 	root_key: AccountId,
@@ -306,7 +323,13 @@ pub fn testnet_genesis(
 					(
 						x.0.clone(),
 						x.0.clone(),
-						session_keys(x.2.clone(), x.3.clone(), x.4.clone(), x.5.clone()),
+						session_keys(
+							x.2.clone(),
+							x.3.clone(),
+							x.4.clone(),
+							x.5.clone(),
+							x.6.clone(),
+						),
 					)
 				})
 				.collect::<Vec<_>>(),
@@ -367,6 +390,7 @@ pub fn testnet_genesis(
 			..Default::default()
 		},
 		glutton: Default::default(),
+		mixnet: Default::default(),
 	}
 }
 
@@ -475,7 +499,7 @@ pub(crate) mod tests {
 
 		sc_service_test::connectivity(integration_test_config_with_two_authorities(), |config| {
 			let NewFullBase { task_manager, client, network, sync, transaction_pool, .. } =
-				new_full_base(config, false, |_, _| ())?;
+				new_full_base(config, None, false, |_, _| ())?;
 			Ok(sc_service_test::TestNetComponents::new(
 				task_manager,
 				client,
diff --git a/substrate/bin/node/cli/src/cli.rs b/substrate/bin/node/cli/src/cli.rs
index 4e0d6303870cb..f3c0435fd32dc 100644
--- a/substrate/bin/node/cli/src/cli.rs
+++ b/substrate/bin/node/cli/src/cli.rs
@@ -27,6 +27,10 @@ pub struct Cli {
 	#[clap(flatten)]
 	pub run: sc_cli::RunCmd,
 
+	#[allow(missing_docs)]
+	#[clap(flatten)]
+	pub mixnet_params: sc_cli::MixnetParams,
+
 	/// Disable automatic hardware benchmarks.
 	///
 	/// By default these benchmarks are automatically ran at startup and measure
diff --git a/substrate/bin/node/cli/src/command.rs b/substrate/bin/node/cli/src/command.rs
index 6bd8b76581acf..16d0415ff2637 100644
--- a/substrate/bin/node/cli/src/command.rs
+++ b/substrate/bin/node/cli/src/command.rs
@@ -111,7 +111,7 @@ pub fn run() -> Result<()> {
 					},
 					BenchmarkCmd::Block(cmd) => {
 						// ensure that we keep the task manager alive
-						let partial = new_partial(&config)?;
+						let partial = new_partial(&config, None)?;
 						cmd.run(partial.client)
 					},
 					#[cfg(not(feature = "runtime-benchmarks"))]
@@ -122,7 +122,7 @@ pub fn run() -> Result<()> {
 					#[cfg(feature = "runtime-benchmarks")]
 					BenchmarkCmd::Storage(cmd) => {
 						// ensure that we keep the task manager alive
-						let partial = new_partial(&config)?;
+						let partial = new_partial(&config, None)?;
 						let db = partial.backend.expose_db();
 						let storage = partial.backend.expose_storage();
 
@@ -130,7 +130,7 @@ pub fn run() -> Result<()> {
 					},
 					BenchmarkCmd::Overhead(cmd) => {
 						// ensure that we keep the task manager alive
-						let partial = new_partial(&config)?;
+						let partial = new_partial(&config, None)?;
 						let ext_builder = RemarkBuilder::new(partial.client.clone());
 
 						cmd.run(
@@ -143,7 +143,7 @@ pub fn run() -> Result<()> {
 					},
 					BenchmarkCmd::Extrinsic(cmd) => {
 						// ensure that we keep the task manager alive
-						let partial = service::new_partial(&config)?;
+						let partial = service::new_partial(&config, None)?;
 						// Register the *Remark* and *TKA* builders.
 						let ext_factory = ExtrinsicFactory(vec![
 							Box::new(RemarkBuilder::new(partial.client.clone())),
@@ -178,21 +178,21 @@ pub fn run() -> Result<()> {
 			let runner = cli.create_runner(cmd)?;
 			runner.async_run(|config| {
 				let PartialComponents { client, task_manager, import_queue, .. } =
-					new_partial(&config)?;
+					new_partial(&config, None)?;
 				Ok((cmd.run(client, import_queue), task_manager))
 			})
 		},
 		Some(Subcommand::ExportBlocks(cmd)) => {
 			let runner = cli.create_runner(cmd)?;
 			runner.async_run(|config| {
-				let PartialComponents { client, task_manager, .. } = new_partial(&config)?;
+				let PartialComponents { client, task_manager, .. } = new_partial(&config, None)?;
 				Ok((cmd.run(client, config.database), task_manager))
 			})
 		},
 		Some(Subcommand::ExportState(cmd)) => {
 			let runner = cli.create_runner(cmd)?;
 			runner.async_run(|config| {
-				let PartialComponents { client, task_manager, .. } = new_partial(&config)?;
+				let PartialComponents { client, task_manager, .. } = new_partial(&config, None)?;
 				Ok((cmd.run(client, config.chain_spec), task_manager))
 			})
 		},
@@ -200,7 +200,7 @@ pub fn run() -> Result<()> {
 			let runner = cli.create_runner(cmd)?;
 			runner.async_run(|config| {
 				let PartialComponents { client, task_manager, import_queue, .. } =
-					new_partial(&config)?;
+					new_partial(&config, None)?;
 				Ok((cmd.run(client, import_queue), task_manager))
 			})
 		},
@@ -211,7 +211,8 @@ pub fn run() -> Result<()> {
 		Some(Subcommand::Revert(cmd)) => {
 			let runner = cli.create_runner(cmd)?;
 			runner.async_run(|config| {
-				let PartialComponents { client, task_manager, backend, .. } = new_partial(&config)?;
+				let PartialComponents { client, task_manager, backend, .. } =
+					new_partial(&config, None)?;
 				let aux_revert = Box::new(|client: Arc<FullClient>, backend, blocks| {
 					sc_consensus_babe::revert(client.clone(), backend, blocks)?;
 					grandpa::revert(client, blocks)?;
diff --git a/substrate/bin/node/cli/src/service.rs b/substrate/bin/node/cli/src/service.rs
index 977c90e73e9ff..5a85f4cde0ae0 100644
--- a/substrate/bin/node/cli/src/service.rs
+++ b/substrate/bin/node/cli/src/service.rs
@@ -134,6 +134,7 @@ pub fn create_extrinsic(
 /// Creates a new partial node.
 pub fn new_partial(
 	config: &Configuration,
+	mixnet_config: Option<&sc_mixnet::Config>,
 ) -> Result<
 	sc_service::PartialComponents<
 		FullClient,
@@ -154,6 +155,7 @@ pub fn new_partial(
 			grandpa::SharedVoterState,
 			Option<Telemetry>,
 			Arc<StatementStore>,
+			Option<sc_mixnet::ApiBackend>,
 		),
 	>,
 	ServiceError,
@@ -246,6 +248,8 @@ pub fn new_partial(
 	)
 	.map_err(|e| ServiceError::Other(format!("Statement store error: {:?}", e)))?;
 
+	let (mixnet_api, mixnet_api_backend) = mixnet_config.map(sc_mixnet::Api::new).unzip();
+
 	let (rpc_extensions_builder, rpc_setup) = {
 		let (_, grandpa_link, _) = &import_setup;
 
@@ -287,6 +291,7 @@ pub fn new_partial(
 				},
 				statement_store: rpc_statement_store.clone(),
 				backend: rpc_backend.clone(),
+				mixnet_api: mixnet_api.as_ref().cloned(),
 			};
 
 			node_rpc::create_full(deps).map_err(Into::into)
@@ -303,7 +308,14 @@ pub fn new_partial(
 		select_chain,
 		import_queue,
 		transaction_pool,
-		other: (rpc_extensions_builder, import_setup, rpc_setup, telemetry, statement_store),
+		other: (
+			rpc_extensions_builder,
+			import_setup,
+			rpc_setup,
+			telemetry,
+			statement_store,
+			mixnet_api_backend,
+		),
 	})
 }
 
@@ -326,6 +338,7 @@ pub struct NewFullBase {
 /// Creates a full service from the configuration.
 pub fn new_full_base(
 	config: Configuration,
+	mixnet_config: Option<sc_mixnet::Config>,
 	disable_hardware_benchmarks: bool,
 	with_startup_data: impl FnOnce(
 		&sc_consensus_babe::BabeBlockImport<Block, FullClient, FullGrandpaBlockImport>,
@@ -347,31 +360,36 @@ pub fn new_full_base(
 		keystore_container,
 		select_chain,
 		transaction_pool,
-		other: (rpc_builder, import_setup, rpc_setup, mut telemetry, statement_store),
-	} = new_partial(&config)?;
+		other:
+			(rpc_builder, import_setup, rpc_setup, mut telemetry, statement_store, mixnet_api_backend),
+	} = new_partial(&config, mixnet_config.as_ref())?;
 
 	let shared_voter_state = rpc_setup;
 	let auth_disc_publish_non_global_ips = config.network.allow_non_globals_in_dht;
 	let mut net_config = sc_network::config::FullNetworkConfiguration::new(&config.network);
 
-	let grandpa_protocol_name = grandpa::protocol_standard_name(
-		&client.block_hash(0).ok().flatten().expect("Genesis block exists; qed"),
-		&config.chain_spec,
-	);
+	let genesis_hash = client.block_hash(0).ok().flatten().expect("Genesis block exists; qed");
+
+	let grandpa_protocol_name = grandpa::protocol_standard_name(&genesis_hash, &config.chain_spec);
 	net_config.add_notification_protocol(grandpa::grandpa_peers_set_config(
 		grandpa_protocol_name.clone(),
 	));
 
 	let statement_handler_proto = sc_network_statement::StatementHandlerPrototype::new(
-		client
-			.block_hash(0u32.into())
-			.ok()
-			.flatten()
-			.expect("Genesis block exists; qed"),
+		genesis_hash,
 		config.chain_spec.fork_id(),
 	);
 	net_config.add_notification_protocol(statement_handler_proto.set_config());
 
+	let mixnet_protocol_name =
+		sc_mixnet::protocol_name(genesis_hash.as_ref(), config.chain_spec.fork_id());
+	if let Some(mixnet_config) = &mixnet_config {
+		net_config.add_notification_protocol(sc_mixnet::peers_set_config(
+			mixnet_protocol_name.clone(),
+			mixnet_config,
+		));
+	}
+
 	let warp_sync = Arc::new(grandpa::warp_proof::NetworkProvider::new(
 		backend.clone(),
 		import_setup.1.shared_authority_set().clone(),
@@ -391,6 +409,20 @@ pub fn new_full_base(
 			block_relay: None,
 		})?;
 
+	if let Some(mixnet_config) = mixnet_config {
+		let mixnet = sc_mixnet::run(
+			mixnet_config,
+			mixnet_api_backend.expect("Mixnet API backend created if mixnet enabled"),
+			client.clone(),
+			sync_service.clone(),
+			network.clone(),
+			mixnet_protocol_name,
+			transaction_pool.clone(),
+			Some(keystore_container.keystore()),
+		);
+		task_manager.spawn_handle().spawn("mixnet", None, mixnet);
+	}
+
 	let role = config.role.clone();
 	let force_authoring = config.force_authoring;
 	let backoff_authoring_blocks =
@@ -546,7 +578,7 @@ pub fn new_full_base(
 		// and vote data availability than the observer. The observer has not
 		// been tested extensively yet and having most nodes in a network run it
 		// could lead to finality stalls.
-		let grandpa_config = grandpa::GrandpaParams {
+		let grandpa_params = grandpa::GrandpaParams {
 			config: grandpa_config,
 			link: grandpa_link,
 			network: network.clone(),
@@ -563,7 +595,7 @@ pub fn new_full_base(
 		task_manager.spawn_essential_handle().spawn_blocking(
 			"grandpa-voter",
 			None,
-			grandpa::run_grandpa_voter(grandpa_config)?,
+			grandpa::run_grandpa_voter(grandpa_params)?,
 		);
 	}
 
@@ -623,8 +655,9 @@ pub fn new_full_base(
 
 /// Builds a new service for a full client.
 pub fn new_full(config: Configuration, cli: Cli) -> Result<TaskManager, ServiceError> {
+	let mixnet_config = cli.mixnet_params.config(config.role.is_authority());
 	let database_source = config.database.clone();
-	let task_manager = new_full_base(config, cli.no_hardware_benchmarks, |_, _| ())
+	let task_manager = new_full_base(config, mixnet_config, cli.no_hardware_benchmarks, |_, _| ())
 		.map(|NewFullBase { task_manager, .. }| task_manager)?;
 
 	sc_storage_monitor::StorageMonitorService::try_spawn(
@@ -702,6 +735,7 @@ mod tests {
 				let NewFullBase { task_manager, client, network, sync, transaction_pool, .. } =
 					new_full_base(
 						config,
+						None,
 						false,
 						|block_import: &sc_consensus_babe::BabeBlockImport<Block, _, _>,
 						 babe_link: &sc_consensus_babe::BabeLink<Block>| {
@@ -876,7 +910,7 @@ mod tests {
 			crate::chain_spec::tests::integration_test_config_with_two_authorities(),
 			|config| {
 				let NewFullBase { task_manager, client, network, sync, transaction_pool, .. } =
-					new_full_base(config, false, |_, _| ())?;
+					new_full_base(config, None, false, |_, _| ())?;
 				Ok(sc_service_test::TestNetComponents::new(
 					task_manager,
 					client,
diff --git a/substrate/bin/node/executor/tests/submit_transaction.rs b/substrate/bin/node/executor/tests/submit_transaction.rs
index 7678a3c6e5a9f..5cbb0103d471b 100644
--- a/substrate/bin/node/executor/tests/submit_transaction.rs
+++ b/substrate/bin/node/executor/tests/submit_transaction.rs
@@ -239,7 +239,7 @@ fn submitted_transaction_should_be_valid() {
 		let author = extrinsic.signature.clone().unwrap().0;
 		let address = Indices::lookup(author).unwrap();
 		let data = pallet_balances::AccountData { free: 5_000_000_000_000, ..Default::default() };
-		let account = frame_system::AccountInfo { data, ..Default::default() };
+		let account = frame_system::AccountInfo { providers: 1, data, ..Default::default() };
 		<frame_system::Account<Runtime>>::insert(&address, account);
 
 		// check validity
diff --git a/substrate/bin/node/inspect/Cargo.toml b/substrate/bin/node/inspect/Cargo.toml
index 06e9674117e68..4a92db2918589 100644
--- a/substrate/bin/node/inspect/Cargo.toml
+++ b/substrate/bin/node/inspect/Cargo.toml
@@ -13,7 +13,7 @@ publish = false
 targets = ["x86_64-unknown-linux-gnu"]
 
 [dependencies]
-clap = { version = "4.4.4", features = ["derive"] }
+clap = { version = "4.4.6", features = ["derive"] }
 codec = { package = "parity-scale-codec", version = "3.6.1" }
 thiserror = "1.0"
 sc-cli = { path = "../../../client/cli" }
diff --git a/substrate/bin/node/rpc/Cargo.toml b/substrate/bin/node/rpc/Cargo.toml
index ec8d16bd27ded..43db4ab9d34f7 100644
--- a/substrate/bin/node/rpc/Cargo.toml
+++ b/substrate/bin/node/rpc/Cargo.toml
@@ -23,6 +23,7 @@ sc-consensus-babe = { path = "../../../client/consensus/babe" }
 sc-consensus-babe-rpc = { path = "../../../client/consensus/babe/rpc" }
 sc-consensus-grandpa = { path = "../../../client/consensus/grandpa" }
 sc-consensus-grandpa-rpc = { path = "../../../client/consensus/grandpa/rpc" }
+sc-mixnet = { path = "../../../client/mixnet" }
 sc-rpc = { path = "../../../client/rpc" }
 sc-rpc-api = { path = "../../../client/rpc-api" }
 sc-rpc-spec-v2 = { path = "../../../client/rpc-spec-v2" }
diff --git a/substrate/bin/node/rpc/src/lib.rs b/substrate/bin/node/rpc/src/lib.rs
index 6d8aa5ff0a9da..acc58777e912d 100644
--- a/substrate/bin/node/rpc/src/lib.rs
+++ b/substrate/bin/node/rpc/src/lib.rs
@@ -92,6 +92,8 @@ pub struct FullDeps<C, P, SC, B> {
 	pub statement_store: Arc<dyn sp_statement_store::StatementStore>,
 	/// The backend used by the node.
 	pub backend: Arc<B>,
+	/// Mixnet API.
+	pub mixnet_api: Option<sc_mixnet::Api>,
 }
 
 /// Instantiate all Full RPC extensions.
@@ -106,6 +108,7 @@ pub fn create_full<C, P, SC, B>(
 		grandpa,
 		statement_store,
 		backend,
+		mixnet_api,
 	}: FullDeps<C, P, SC, B>,
 ) -> Result<RpcModule<()>, Box<dyn std::error::Error + Send + Sync>>
 where
@@ -133,6 +136,7 @@ where
 	use sc_consensus_grandpa_rpc::{Grandpa, GrandpaApiServer};
 	use sc_rpc::{
 		dev::{Dev, DevApiServer},
+		mixnet::MixnetApiServer,
 		statement::StatementApiServer,
 	};
 	use sc_rpc_spec_v2::chain_spec::{ChainSpec, ChainSpecApiServer};
@@ -196,5 +200,10 @@ where
 		sc_rpc::statement::StatementStore::new(statement_store, deny_unsafe).into_rpc();
 	io.merge(statement_store)?;
 
+	if let Some(mixnet_api) = mixnet_api {
+		let mixnet = sc_rpc::mixnet::Mixnet::new(mixnet_api).into_rpc();
+		io.merge(mixnet)?;
+	}
+
 	Ok(io)
 }
diff --git a/substrate/bin/node/runtime/Cargo.toml b/substrate/bin/node/runtime/Cargo.toml
index 7771b5f209719..e5bade12029e5 100644
--- a/substrate/bin/node/runtime/Cargo.toml
+++ b/substrate/bin/node/runtime/Cargo.toml
@@ -35,6 +35,7 @@ sp-block-builder = { path = "../../../primitives/block-builder", default-feature
 sp-genesis-builder = { version = "0.1.0-dev", default-features = false, path = "../../../primitives/genesis-builder" }
 sp-inherents = { path = "../../../primitives/inherents", default-features = false}
 node-primitives = { path = "../primitives", default-features = false}
+sp-mixnet = { path = "../../../primitives/mixnet", default-features = false }
 sp-offchain = { path = "../../../primitives/offchain", default-features = false}
 sp-core = { path = "../../../primitives/core", default-features = false}
 sp-std = { path = "../../../primitives/std", default-features = false}
@@ -88,6 +89,7 @@ pallet-identity = { path = "../../../frame/identity", default-features = false}
 pallet-lottery = { path = "../../../frame/lottery", default-features = false}
 pallet-membership = { path = "../../../frame/membership", default-features = false}
 pallet-message-queue = { path = "../../../frame/message-queue", default-features = false}
+pallet-mixnet = { path = "../../../frame/mixnet", default-features = false }
 pallet-mmr = { path = "../../../frame/merkle-mountain-range", default-features = false}
 pallet-multisig = { path = "../../../frame/multisig", default-features = false}
 pallet-nfts = { path = "../../../frame/nfts", default-features = false}
@@ -185,6 +187,7 @@ std = [
 	"pallet-lottery/std",
 	"pallet-membership/std",
 	"pallet-message-queue/std",
+	"pallet-mixnet/std",
 	"pallet-mmr/std",
 	"pallet-multisig/std",
 	"pallet-nft-fractionalization/std",
@@ -235,6 +238,7 @@ std = [
 	"sp-genesis-builder/std",
 	"sp-inherents/std",
 	"sp-io/std",
+	"sp-mixnet/std",
 	"sp-offchain/std",
 	"sp-runtime/std",
 	"sp-session/std",
@@ -281,6 +285,7 @@ runtime-benchmarks = [
 	"pallet-lottery/runtime-benchmarks",
 	"pallet-membership/runtime-benchmarks",
 	"pallet-message-queue/runtime-benchmarks",
+	"pallet-mixnet/runtime-benchmarks",
 	"pallet-mmr/runtime-benchmarks",
 	"pallet-multisig/runtime-benchmarks",
 	"pallet-nft-fractionalization/runtime-benchmarks",
@@ -354,6 +359,7 @@ try-runtime = [
 	"pallet-lottery/try-runtime",
 	"pallet-membership/try-runtime",
 	"pallet-message-queue/try-runtime",
+	"pallet-mixnet/try-runtime",
 	"pallet-mmr/try-runtime",
 	"pallet-multisig/try-runtime",
 	"pallet-nft-fractionalization/try-runtime",
diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs
index f018639b732e3..2070e3f12d04f 100644
--- a/substrate/bin/node/runtime/src/lib.rs
+++ b/substrate/bin/node/runtime/src/lib.rs
@@ -37,7 +37,7 @@ use frame_support::{
 	parameter_types,
 	traits::{
 		fungible::{Balanced, Credit, HoldConsideration, ItemOf},
-		tokens::{nonfungibles_v2::Inspect, GetSalary, PayFromAccount},
+		tokens::{nonfungibles_v2::Inspect, pay::PayAssetFromAccount, GetSalary, PayFromAccount},
 		AsEnsureOriginWithArg, ConstBool, ConstU128, ConstU16, ConstU32, Contains, Currency,
 		EitherOfDiverse, EqualPrivilegeOnly, Imbalance, InsideBoth, InstanceFilter,
 		KeyOwnerProofSystem, LinearStoragePrice, LockIdentifier, Nothing, OnUnbalanced,
@@ -589,6 +589,7 @@ impl_opaque_keys! {
 		pub babe: Babe,
 		pub im_online: ImOnline,
 		pub authority_discovery: AuthorityDiscovery,
+		pub mixnet: Mixnet,
 	}
 }
 
@@ -1048,7 +1049,7 @@ impl pallet_democracy::Config for Runtime {
 		pallet_collective::EnsureProportionAtLeast<AccountId, TechnicalCollective, 2, 3>;
 	type InstantOrigin =
 		pallet_collective::EnsureProportionAtLeast<AccountId, TechnicalCollective, 1, 1>;
-	type InstantAllowed = frame_support::traits::ConstBool<true>;
+	type InstantAllowed = ConstBool<true>;
 	type FastTrackVotingPeriod = FastTrackVotingPeriod;
 	// To cancel a proposal which has been passed, 2/3 of the council must agree to it.
 	type CancellationOrigin =
@@ -1186,6 +1187,7 @@ parameter_types! {
 	pub const MaximumReasonLength: u32 = 300;
 	pub const MaxApprovals: u32 = 100;
 	pub const MaxBalance: Balance = Balance::max_value();
+	pub const SpendPayoutPeriod: BlockNumber = 30 * DAYS;
 }
 
 impl pallet_treasury::Config for Runtime {
@@ -1211,6 +1213,14 @@ impl pallet_treasury::Config for Runtime {
 	type WeightInfo = pallet_treasury::weights::SubstrateWeight<Runtime>;
 	type MaxApprovals = MaxApprovals;
 	type SpendOrigin = EnsureWithSuccess<EnsureRoot<AccountId>, AccountId, MaxBalance>;
+	type AssetKind = u32;
+	type Beneficiary = AccountId;
+	type BeneficiaryLookup = Indices;
+	type Paymaster = PayAssetFromAccount<Assets, TreasuryAccount>;
+	type BalanceConverter = AssetRate;
+	type PayoutPeriod = SpendPayoutPeriod;
+	#[cfg(feature = "runtime-benchmarks")]
+	type BenchmarkHelper = ();
 }
 
 impl pallet_asset_rate::Config for Runtime {
@@ -2019,6 +2029,29 @@ impl pallet_broker::Config for Runtime {
 	type PriceAdapter = pallet_broker::Linear;
 }
 
+parameter_types! {
+	pub const MixnetNumCoverToCurrentBlocks: BlockNumber = 3;
+	pub const MixnetNumRequestsToCurrentBlocks: BlockNumber = 3;
+	pub const MixnetNumCoverToPrevBlocks: BlockNumber = 3;
+	pub const MixnetNumRegisterStartSlackBlocks: BlockNumber = 3;
+	pub const MixnetNumRegisterEndSlackBlocks: BlockNumber = 3;
+	pub const MixnetRegistrationPriority: TransactionPriority = ImOnlineUnsignedPriority::get() - 1;
+}
+
+impl pallet_mixnet::Config for Runtime {
+	type MaxAuthorities = MaxAuthorities;
+	type MaxExternalAddressSize = ConstU32<128>;
+	type MaxExternalAddressesPerMixnode = ConstU32<16>;
+	type NextSessionRotation = Babe;
+	type NumCoverToCurrentBlocks = MixnetNumCoverToCurrentBlocks;
+	type NumRequestsToCurrentBlocks = MixnetNumRequestsToCurrentBlocks;
+	type NumCoverToPrevBlocks = MixnetNumCoverToPrevBlocks;
+	type NumRegisterStartSlackBlocks = MixnetNumRegisterStartSlackBlocks;
+	type NumRegisterEndSlackBlocks = MixnetNumRegisterEndSlackBlocks;
+	type RegistrationPriority = MixnetRegistrationPriority;
+	type MinMixnodes = ConstU32<7>; // Low to allow small testing networks
+}
+
 construct_runtime!(
 	pub struct Runtime
 	{
@@ -2095,6 +2128,7 @@ construct_runtime!(
 		SafeMode: pallet_safe_mode,
 		Statement: pallet_statement,
 		Broker: pallet_broker,
+		Mixnet: pallet_mixnet,
 	}
 );
 
@@ -2654,6 +2688,24 @@ impl_runtime_apis! {
 		}
 	}
 
+	impl sp_mixnet::runtime_api::MixnetApi<Block> for Runtime {
+		fn session_status() -> sp_mixnet::types::SessionStatus {
+			Mixnet::session_status()
+		}
+
+		fn prev_mixnodes() -> Result<Vec<sp_mixnet::types::Mixnode>, sp_mixnet::types::MixnodesErr> {
+			Mixnet::prev_mixnodes()
+		}
+
+		fn current_mixnodes() -> Result<Vec<sp_mixnet::types::Mixnode>, sp_mixnet::types::MixnodesErr> {
+			Mixnet::current_mixnodes()
+		}
+
+		fn maybe_register(session_index: sp_mixnet::types::SessionIndex, mixnode: sp_mixnet::types::Mixnode) -> bool {
+			Mixnet::maybe_register(session_index, mixnode)
+		}
+	}
+
 	impl sp_session::SessionKeys<Block> for Runtime {
 		fn generate_session_keys(seed: Option<Vec<u8>>) -> Vec<u8> {
 			SessionKeys::generate(seed)
diff --git a/substrate/bin/node/testing/src/genesis.rs b/substrate/bin/node/testing/src/genesis.rs
index 6e7bcebfc00d1..ab5311751a55f 100644
--- a/substrate/bin/node/testing/src/genesis.rs
+++ b/substrate/bin/node/testing/src/genesis.rs
@@ -109,5 +109,6 @@ pub fn config_endowed(code: Option<&[u8]>, extra_endowed: Vec<AccountId>) -> Run
 			trash_data_count: Default::default(),
 			..Default::default()
 		},
+		mixnet: Default::default(),
 	}
 }
diff --git a/substrate/bin/node/testing/src/keyring.rs b/substrate/bin/node/testing/src/keyring.rs
index b4b714d9083d6..22a8f5deb19f7 100644
--- a/substrate/bin/node/testing/src/keyring.rs
+++ b/substrate/bin/node/testing/src/keyring.rs
@@ -64,6 +64,7 @@ pub fn to_session_keys(
 		babe: sr25519_keyring.to_owned().public().into(),
 		im_online: sr25519_keyring.to_owned().public().into(),
 		authority_discovery: sr25519_keyring.to_owned().public().into(),
+		mixnet: sr25519_keyring.to_owned().public().into(),
 	}
 }
 
diff --git a/substrate/bin/utils/chain-spec-builder/Cargo.toml b/substrate/bin/utils/chain-spec-builder/Cargo.toml
index f564ff19af0f3..c7690faf7d065 100644
--- a/substrate/bin/utils/chain-spec-builder/Cargo.toml
+++ b/substrate/bin/utils/chain-spec-builder/Cargo.toml
@@ -22,7 +22,7 @@ crate-type = ["rlib"]
 
 [dependencies]
 ansi_term = "0.12.1"
-clap = { version = "4.4.4", features = ["derive"] }
+clap = { version = "4.4.6", features = ["derive"] }
 rand = "0.8"
 node-cli = { path = "../../node/cli" }
 sc-chain-spec = { path = "../../../client/chain-spec" }
diff --git a/substrate/bin/utils/chain-spec-builder/src/lib.rs b/substrate/bin/utils/chain-spec-builder/src/lib.rs
index 528b6b70115a0..2b88e40ef74fd 100644
--- a/substrate/bin/utils/chain-spec-builder/src/lib.rs
+++ b/substrate/bin/utils/chain-spec-builder/src/lib.rs
@@ -179,7 +179,7 @@ pub fn generate_authority_keys_and_store(
 				.map_err(|err| err.to_string())?
 				.into();
 
-		let (_, _, grandpa, babe, im_online, authority_discovery) =
+		let (_, _, grandpa, babe, im_online, authority_discovery, mixnet) =
 			chain_spec::authority_keys_from_seed(seed);
 
 		let insert_key = |key_type, public| {
@@ -198,6 +198,8 @@ pub fn generate_authority_keys_and_store(
 			sp_core::crypto::key_types::AUTHORITY_DISCOVERY,
 			authority_discovery.as_slice(),
 		)?;
+
+		insert_key(sp_core::crypto::key_types::MIXNET, mixnet.as_slice())?;
 	}
 
 	Ok(())
diff --git a/substrate/bin/utils/subkey/Cargo.toml b/substrate/bin/utils/subkey/Cargo.toml
index 4e8cb606c94e5..6606d8ac365f9 100644
--- a/substrate/bin/utils/subkey/Cargo.toml
+++ b/substrate/bin/utils/subkey/Cargo.toml
@@ -17,5 +17,5 @@ path = "src/main.rs"
 name = "subkey"
 
 [dependencies]
-clap = { version = "4.4.4", features = ["derive"] }
+clap = { version = "4.4.6", features = ["derive"] }
 sc-cli = { path = "../../../client/cli" }
diff --git a/substrate/client/basic-authorship/src/basic_authorship.rs b/substrate/client/basic-authorship/src/basic_authorship.rs
index 0fb61b6fab1f6..57c2996ab4063 100644
--- a/substrate/client/basic-authorship/src/basic_authorship.rs
+++ b/substrate/client/basic-authorship/src/basic_authorship.rs
@@ -79,7 +79,7 @@ pub struct ProposerFactory<A, B, C, PR> {
 	/// The soft deadline indicates where we should stop attempting to add transactions
 	/// to the block, which exhaust resources. After soft deadline is reached,
 	/// we switch to a fixed-amount mode, in which after we see `MAX_SKIPPED_TRANSACTIONS`
-	/// transactions which exhaust resrouces, we will conclude that the block is full.
+	/// transactions which exhaust resources, we will conclude that the block is full.
 	soft_deadline_percent: Percent,
 	telemetry: Option<TelemetryHandle>,
 	/// When estimating the block size, should the proof be included?
diff --git a/substrate/client/chain-spec/derive/Cargo.toml b/substrate/client/chain-spec/derive/Cargo.toml
index 202817438b7d8..74b8b656a4042 100644
--- a/substrate/client/chain-spec/derive/Cargo.toml
+++ b/substrate/client/chain-spec/derive/Cargo.toml
@@ -18,4 +18,4 @@ proc-macro = true
 proc-macro-crate = "1.1.3"
 proc-macro2 = "1.0.56"
 quote = "1.0.28"
-syn = "2.0.37"
+syn = "2.0.38"
diff --git a/substrate/client/cli/Cargo.toml b/substrate/client/cli/Cargo.toml
index cfdcb39b1fa79..98928700328fc 100644
--- a/substrate/client/cli/Cargo.toml
+++ b/substrate/client/cli/Cargo.toml
@@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"]
 [dependencies]
 array-bytes = "6.1"
 chrono = "0.4.27"
-clap = { version = "4.4.4", features = ["derive", "string"] }
+clap = { version = "4.4.6", features = ["derive", "string"] }
 fdlimit = "0.2.1"
 futures = "0.3.21"
 libp2p-identity = { version = "0.1.3", features = ["peerid", "ed25519"]}
@@ -33,6 +33,7 @@ tokio = { version = "1.22.0", features = ["signal", "rt-multi-thread", "parking_
 sc-client-api = { path = "../api" }
 sc-client-db = { path = "../db", default-features = false}
 sc-keystore = { path = "../keystore" }
+sc-mixnet = { path = "../mixnet" }
 sc-network = { path = "../network" }
 sc-service = { path = "../service", default-features = false}
 sc-telemetry = { path = "../telemetry" }
diff --git a/substrate/client/cli/src/commands/inspect_node_key.rs b/substrate/client/cli/src/commands/inspect_node_key.rs
index 19b5a31ca12c0..6cf025a2d1150 100644
--- a/substrate/client/cli/src/commands/inspect_node_key.rs
+++ b/substrate/client/cli/src/commands/inspect_node_key.rs
@@ -85,7 +85,7 @@ mod tests {
 	fn inspect_node_key() {
 		let path = tempfile::tempdir().unwrap().into_path().join("node-id").into_os_string();
 		let path = path.to_str().unwrap();
-		let cmd = GenerateNodeKeyCmd::parse_from(&["generate-node-key", "--file", path.clone()]);
+		let cmd = GenerateNodeKeyCmd::parse_from(&["generate-node-key", "--file", path]);
 
 		assert!(cmd.run().is_ok());
 
diff --git a/substrate/client/cli/src/commands/purge_chain_cmd.rs b/substrate/client/cli/src/commands/purge_chain_cmd.rs
index 2ff3d4b9a04c0..6e7b8143a5bb7 100644
--- a/substrate/client/cli/src/commands/purge_chain_cmd.rs
+++ b/substrate/client/cli/src/commands/purge_chain_cmd.rs
@@ -48,7 +48,7 @@ pub struct PurgeChainCmd {
 impl PurgeChainCmd {
 	/// Run the purge command
 	pub fn run(&self, database_config: DatabaseSource) -> error::Result<()> {
-		let db_path = database_config.path().ok_or_else(|| {
+		let db_path = database_config.path().and_then(|p| p.parent()).ok_or_else(|| {
 			error::Error::Input("Cannot purge custom database implementation".into())
 		})?;
 
diff --git a/substrate/client/cli/src/params/mixnet_params.rs b/substrate/client/cli/src/params/mixnet_params.rs
new file mode 100644
index 0000000000000..4758a84ec458d
--- /dev/null
+++ b/substrate/client/cli/src/params/mixnet_params.rs
@@ -0,0 +1,67 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+use clap::Args;
+use sp_core::H256;
+use std::str::FromStr;
+
+fn parse_kx_secret(s: &str) -> Result<sc_mixnet::KxSecret, String> {
+	H256::from_str(s).map(H256::to_fixed_bytes).map_err(|err| err.to_string())
+}
+
+/// Parameters used to create the mixnet configuration.
+#[derive(Debug, Clone, Args)]
+pub struct MixnetParams {
+	/// Enable the mixnet service.
+	///
+	/// This will make the mixnet RPC methods available. If the node is running as a validator, it
+	/// will also attempt to register and operate as a mixnode.
+	#[arg(long)]
+	pub mixnet: bool,
+
+	/// The mixnet key-exchange secret to use in session 0.
+	///
+	/// Should be 64 hex characters, giving a 32-byte secret.
+	///
+	/// WARNING: Secrets provided as command-line arguments are easily exposed. Use of this option
+	/// should be limited to development and testing.
+	#[arg(long, value_name = "SECRET", value_parser = parse_kx_secret)]
+	pub mixnet_session_0_kx_secret: Option<sc_mixnet::KxSecret>,
+}
+
+impl MixnetParams {
+	/// Returns the mixnet configuration, or `None` if the mixnet is disabled.
+	pub fn config(&self, is_authority: bool) -> Option<sc_mixnet::Config> {
+		self.mixnet.then(|| {
+			let mut config = sc_mixnet::Config {
+				core: sc_mixnet::CoreConfig {
+					session_0_kx_secret: self.mixnet_session_0_kx_secret,
+					..Default::default()
+				},
+				..Default::default()
+			};
+			if !is_authority {
+				// Only authorities can be mixnodes; don't attempt to register
+				config.substrate.register = false;
+				// Only mixnodes need to allow connections from non-mixnodes
+				config.substrate.num_gateway_slots = 0;
+			}
+			config
+		})
+	}
+}
diff --git a/substrate/client/cli/src/params/mod.rs b/substrate/client/cli/src/params/mod.rs
index a73bd8844fec4..f07223ec6a73e 100644
--- a/substrate/client/cli/src/params/mod.rs
+++ b/substrate/client/cli/src/params/mod.rs
@@ -19,6 +19,7 @@ mod database_params;
 mod import_params;
 mod keystore_params;
 mod message_params;
+mod mixnet_params;
 mod network_params;
 mod node_key_params;
 mod offchain_worker_params;
@@ -39,9 +40,10 @@ use sp_runtime::{
 use std::{fmt::Debug, str::FromStr};
 
 pub use crate::params::{
-	database_params::*, import_params::*, keystore_params::*, message_params::*, network_params::*,
-	node_key_params::*, offchain_worker_params::*, prometheus_params::*, pruning_params::*,
-	runtime_params::*, shared_params::*, telemetry_params::*, transaction_pool_params::*,
+	database_params::*, import_params::*, keystore_params::*, message_params::*, mixnet_params::*,
+	network_params::*, node_key_params::*, offchain_worker_params::*, prometheus_params::*,
+	pruning_params::*, runtime_params::*, shared_params::*, telemetry_params::*,
+	transaction_pool_params::*,
 };
 
 /// Parse Ss58AddressFormat
diff --git a/substrate/client/mixnet/Cargo.toml b/substrate/client/mixnet/Cargo.toml
new file mode 100644
index 0000000000000..86c5a37754afb
--- /dev/null
+++ b/substrate/client/mixnet/Cargo.toml
@@ -0,0 +1,36 @@
+[package]
+description = "Substrate mixnet service"
+name = "sc-mixnet"
+version = "0.1.0-dev"
+license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
+authors = ["Parity Technologies <admin@parity.io>"]
+edition = "2021"
+homepage = "https://substrate.io"
+repository = "https://github.com/paritytech/substrate/"
+readme = "README.md"
+
+[package.metadata.docs.rs]
+targets = ["x86_64-unknown-linux-gnu"]
+
+[dependencies]
+array-bytes = "4.1"
+arrayvec = "0.7.2"
+blake2 = "0.10.4"
+codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] }
+futures = "0.3.25"
+futures-timer = "3.0.2"
+libp2p-identity = { version = "0.1.3", features = ["peerid"] }
+log = "0.4.17"
+mixnet = "0.7.0"
+multiaddr = "0.17.1"
+parking_lot = "0.12.1"
+sc-client-api = { path = "../api" }
+sc-network = { path = "../network" }
+sc-transaction-pool-api = { path = "../transaction-pool/api" }
+sp-api = { path = "../../primitives/api" }
+sp-consensus = { path = "../../primitives/consensus/common" }
+sp-core = { path = "../../primitives/core" }
+sp-keystore = { path = "../../primitives/keystore" }
+sp-mixnet = { path = "../../primitives/mixnet" }
+sp-runtime = { path = "../../primitives/runtime" }
+thiserror = "1.0"
diff --git a/substrate/client/mixnet/README.md b/substrate/client/mixnet/README.md
new file mode 100644
index 0000000000000..cd8d147408387
--- /dev/null
+++ b/substrate/client/mixnet/README.md
@@ -0,0 +1,3 @@
+Substrate mixnet service.
+
+License: GPL-3.0-or-later WITH Classpath-exception-2.0
diff --git a/substrate/client/mixnet/src/api.rs b/substrate/client/mixnet/src/api.rs
new file mode 100644
index 0000000000000..42a2e395345dc
--- /dev/null
+++ b/substrate/client/mixnet/src/api.rs
@@ -0,0 +1,69 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+use super::{config::Config, error::Error, request::Request};
+use futures::{
+	channel::{mpsc, oneshot},
+	SinkExt,
+};
+use sp_core::Bytes;
+use std::future::Future;
+
+/// The other end of an [`Api`]. This should be passed to [`run`](super::run::run).
+pub struct ApiBackend {
+	pub(super) request_receiver: mpsc::Receiver<Request>,
+}
+
+/// Interface to the mixnet service.
+#[derive(Clone)]
+pub struct Api {
+	request_sender: mpsc::Sender<Request>,
+}
+
+impl Api {
+	/// Create a new `Api`. The [`ApiBackend`] should be passed to [`run`](super::run::run).
+	pub fn new(config: &Config) -> (Self, ApiBackend) {
+		let (request_sender, request_receiver) = mpsc::channel(config.substrate.request_buffer);
+		(Self { request_sender }, ApiBackend { request_receiver })
+	}
+
+	/// Submit an extrinsic via the mixnet.
+	///
+	/// Returns a [`Future`] which returns another `Future`.
+	///
+	/// The first `Future` resolves as soon as there is space in the mixnet service queue. The
+	/// second `Future` resolves once a reply is received over the mixnet (or sooner if there is an
+	/// error).
+	///
+	/// The first `Future` references `self`, but the second does not. This makes it possible to
+	/// submit concurrent mixnet requests using a single `Api` instance.
+	pub async fn submit_extrinsic(
+		&mut self,
+		extrinsic: Bytes,
+	) -> impl Future<Output = Result<(), Error>> {
+		let (reply_sender, reply_receiver) = oneshot::channel();
+		let res = self
+			.request_sender
+			.feed(Request::SubmitExtrinsic { extrinsic, reply_sender })
+			.await;
+		async move {
+			res.map_err(|_| Error::ServiceUnavailable)?;
+			reply_receiver.await.map_err(|_| Error::ServiceUnavailable)?
+		}
+	}
+}
diff --git a/substrate/client/mixnet/src/config.rs b/substrate/client/mixnet/src/config.rs
new file mode 100644
index 0000000000000..b716237ab7b53
--- /dev/null
+++ b/substrate/client/mixnet/src/config.rs
@@ -0,0 +1,88 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+pub use mixnet::core::Config as CoreConfig;
+use std::time::Duration;
+
+/// Substrate-specific mixnet configuration.
+#[derive(Clone, Debug)]
+pub struct SubstrateConfig {
+	/// Attempt to register the local node as a mixnode?
+	pub register: bool,
+	/// Maximum number of incoming mixnet connections to accept from non-mixnodes. If the local
+	/// node will never be a mixnode, this can be set to 0.
+	pub num_gateway_slots: u32,
+
+	/// Number of requests to the mixnet service that can be buffered, in addition to the one per
+	/// [`Api`](super::api::Api) instance. Note that this does not include requests that are being
+	/// actively handled.
+	pub request_buffer: usize,
+	/// Used to determine the number of SURBs to include in request messages: the maximum number of
+	/// SURBs needed for a single reply is multiplied by this. This should not be set to 0.
+	pub surb_factor: usize,
+
+	/// Maximum number of submit extrinsic requests waiting for their delay to elapse. When at the
+	/// limit, any submit extrinsic requests that arrive will simply be dropped.
+	pub extrinsic_queue_capacity: usize,
+	/// Mean delay between receiving a submit extrinsic request and actually submitting the
+	/// extrinsic. This should really be the same for all nodes!
+	pub mean_extrinsic_delay: Duration,
+	/// Maximum number of extrinsics being actively submitted. If a submit extrinsic request's
+	/// delay elapses and we are already at this limit, the request will simply be dropped.
+	pub max_pending_extrinsics: usize,
+}
+
+impl Default for SubstrateConfig {
+	fn default() -> Self {
+		Self {
+			register: true,
+			num_gateway_slots: 150,
+
+			request_buffer: 4,
+			surb_factor: 2,
+
+			extrinsic_queue_capacity: 50,
+			mean_extrinsic_delay: Duration::from_secs(1),
+			max_pending_extrinsics: 20,
+		}
+	}
+}
+
+/// Mixnet configuration.
+#[derive(Clone, Debug)]
+pub struct Config {
+	/// Core configuration.
+	pub core: CoreConfig,
+	/// Request manager configuration.
+	pub request_manager: mixnet::request_manager::Config,
+	/// Reply manager configuration.
+	pub reply_manager: mixnet::reply_manager::Config,
+	/// Substrate-specific configuration.
+	pub substrate: SubstrateConfig,
+}
+
+impl Default for Config {
+	fn default() -> Self {
+		Self {
+			core: Default::default(),
+			request_manager: Default::default(),
+			reply_manager: Default::default(),
+			substrate: Default::default(),
+		}
+	}
+}
diff --git a/substrate/client/mixnet/src/error.rs b/substrate/client/mixnet/src/error.rs
new file mode 100644
index 0000000000000..88942dbe3b36f
--- /dev/null
+++ b/substrate/client/mixnet/src/error.rs
@@ -0,0 +1,56 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+use codec::{Decode, Encode};
+use mixnet::core::PostErr;
+
+/// Error handling a request. Sent in replies over the mixnet.
+#[derive(Debug, thiserror::Error, Decode, Encode)]
+pub enum RemoteErr {
+	/// An error that doesn't map to any of the other variants.
+	#[error("{0}")]
+	Other(String),
+	/// Failed to decode the request.
+	#[error("Failed to decode the request: {0}")]
+	Decode(String),
+}
+
+/// Mixnet error.
+#[derive(Debug, thiserror::Error)]
+pub enum Error {
+	/// Failed to communicate with the mixnet service. Possibly it panicked. The node probably
+	/// needs to be restarted.
+	#[error(
+		"Failed to communicate with the mixnet service; the node probably needs to be restarted"
+	)]
+	ServiceUnavailable,
+	/// Did not receive a reply after the configured number of attempts.
+	#[error("Did not receive a reply from the mixnet after the configured number of attempts")]
+	NoReply,
+	/// Received a malformed reply.
+	#[error("Received a malformed reply from the mixnet")]
+	BadReply,
+	/// Failed to post the request to the mixnet. Note that some [`PostErr`] variants, eg
+	/// [`PostErr::NotEnoughSpaceInQueue`], are handled internally and will never be returned from
+	/// the top-level API.
+	#[error("Failed to post the request to the mixnet: {0}")]
+	Post(#[from] PostErr),
+	/// Error reported by destination mixnode.
+	#[error("Error reported by the destination mixnode: {0}")]
+	Remote(#[from] RemoteErr),
+}
diff --git a/substrate/client/mixnet/src/extrinsic_queue.rs b/substrate/client/mixnet/src/extrinsic_queue.rs
new file mode 100644
index 0000000000000..b6f6f9ebae998
--- /dev/null
+++ b/substrate/client/mixnet/src/extrinsic_queue.rs
@@ -0,0 +1,94 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+//! [`ExtrinsicQueue`] is a queue for extrinsics received from the mixnet. These extrinsics are
+//! explicitly delayed by a random amount, to decorrelate the times at which they are received from
+//! the times at which they are broadcast to peers.
+
+use mixnet::reply_manager::ReplyContext;
+use std::{cmp::Ordering, collections::BinaryHeap, time::Instant};
+
+/// An extrinsic that should be submitted to the transaction pool after `deadline`. `Eq` and `Ord`
+/// are implemented for this to support use in `BinaryHeap`s. Only `deadline` is compared.
+struct DelayedExtrinsic<E> {
+	/// When the extrinsic should actually be submitted to the pool.
+	deadline: Instant,
+	extrinsic: E,
+	reply_context: ReplyContext,
+}
+
+impl<E> PartialEq for DelayedExtrinsic<E> {
+	fn eq(&self, other: &Self) -> bool {
+		self.deadline == other.deadline
+	}
+}
+
+impl<E> Eq for DelayedExtrinsic<E> {}
+
+impl<E> PartialOrd for DelayedExtrinsic<E> {
+	fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+		Some(self.cmp(other))
+	}
+}
+
+impl<E> Ord for DelayedExtrinsic<E> {
+	fn cmp(&self, other: &Self) -> Ordering {
+		// Extrinsics with the earliest deadline considered greatest
+		self.deadline.cmp(&other.deadline).reverse()
+	}
+}
+
+pub struct ExtrinsicQueue<E> {
+	capacity: usize,
+	queue: BinaryHeap<DelayedExtrinsic<E>>,
+	next_deadline_changed: bool,
+}
+
+impl<E> ExtrinsicQueue<E> {
+	pub fn new(capacity: usize) -> Self {
+		Self { capacity, queue: BinaryHeap::with_capacity(capacity), next_deadline_changed: false }
+	}
+
+	pub fn next_deadline(&self) -> Option<Instant> {
+		self.queue.peek().map(|extrinsic| extrinsic.deadline)
+	}
+
+	pub fn next_deadline_changed(&mut self) -> bool {
+		let changed = self.next_deadline_changed;
+		self.next_deadline_changed = false;
+		changed
+	}
+
+	pub fn has_space(&self) -> bool {
+		self.queue.len() < self.capacity
+	}
+
+	pub fn insert(&mut self, deadline: Instant, extrinsic: E, reply_context: ReplyContext) {
+		debug_assert!(self.has_space());
+		let prev_deadline = self.next_deadline();
+		self.queue.push(DelayedExtrinsic { deadline, extrinsic, reply_context });
+		if self.next_deadline() != prev_deadline {
+			self.next_deadline_changed = true;
+		}
+	}
+
+	pub fn pop(&mut self) -> Option<(E, ReplyContext)> {
+		self.next_deadline_changed = true;
+		self.queue.pop().map(|extrinsic| (extrinsic.extrinsic, extrinsic.reply_context))
+	}
+}
diff --git a/substrate/client/mixnet/src/lib.rs b/substrate/client/mixnet/src/lib.rs
new file mode 100644
index 0000000000000..dfbb50dad6b49
--- /dev/null
+++ b/substrate/client/mixnet/src/lib.rs
@@ -0,0 +1,44 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+//! Substrate mixnet service. This implements the [Substrate Mix Network
+//! Specification](https://paritytech.github.io/mixnet-spec/).
+
+#![warn(missing_docs)]
+#![forbid(unsafe_code)]
+
+mod api;
+mod config;
+mod error;
+mod extrinsic_queue;
+mod maybe_inf_delay;
+mod packet_dispatcher;
+mod peer_id;
+mod protocol;
+mod request;
+mod run;
+mod sync_with_runtime;
+
+pub use self::{
+	api::{Api, ApiBackend},
+	config::{Config, CoreConfig, SubstrateConfig},
+	error::{Error, RemoteErr},
+	protocol::{peers_set_config, protocol_name},
+	run::run,
+};
+pub use mixnet::core::{KxSecret, PostErr, TopologyErr};
diff --git a/substrate/client/mixnet/src/maybe_inf_delay.rs b/substrate/client/mixnet/src/maybe_inf_delay.rs
new file mode 100644
index 0000000000000..feb0d038560a3
--- /dev/null
+++ b/substrate/client/mixnet/src/maybe_inf_delay.rs
@@ -0,0 +1,111 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+use futures::{future::FusedFuture, FutureExt};
+use futures_timer::Delay;
+use std::{
+	future::Future,
+	pin::Pin,
+	task::{Context, Poll, Waker},
+	time::Duration,
+};
+
+enum Inner {
+	Infinite {
+		/// Waker from the most recent `poll` call. If `None`, either `poll` has not been called
+		/// yet, we returned `Poll::Ready` from the last call, or the waker is attached to `delay`.
+		waker: Option<Waker>,
+		delay: Option<Delay>,
+	},
+	Finite(Delay),
+}
+
+/// Like [`Delay`] but the duration can be infinite (in which case the future will never fire).
+/// Unlike [`Delay`], implements [`FusedFuture`], with [`is_terminated`](Self::is_terminated)
+/// returning `true` when the delay is infinite. As with [`Delay`], once [`poll`](Self::poll)
+/// returns [`Poll::Ready`], it will continue to do so until [`reset`](Self::reset) is called.
+pub struct MaybeInfDelay(Inner);
+
+impl MaybeInfDelay {
+	/// Create a new `MaybeInfDelay` future. If `duration` is [`Some`], the future will fire after
+	/// the given duration has elapsed. If `duration` is [`None`], the future will "never" fire
+	/// (although see [`reset`](Self::reset)).
+	pub fn new(duration: Option<Duration>) -> Self {
+		match duration {
+			Some(duration) => Self(Inner::Finite(Delay::new(duration))),
+			None => Self(Inner::Infinite { waker: None, delay: None }),
+		}
+	}
+
+	/// Reset the timer. `duration` is handled just like in [`new`](Self::new). Note that while
+	/// this is similar to `std::mem::replace(&mut self, MaybeInfDelay::new(duration))`, with
+	/// `replace` you would have to manually ensure [`poll`](Self::poll) was called again; with
+	/// `reset` this is not necessary.
+	pub fn reset(&mut self, duration: Option<Duration>) {
+		match duration {
+			Some(duration) => match &mut self.0 {
+				Inner::Infinite { waker, delay } => {
+					let mut delay = match delay.take() {
+						Some(mut delay) => {
+							delay.reset(duration);
+							delay
+						},
+						None => Delay::new(duration),
+					};
+					if let Some(waker) = waker.take() {
+						let mut cx = Context::from_waker(&waker);
+						match delay.poll_unpin(&mut cx) {
+							Poll::Pending => (), // Waker attached to delay
+							Poll::Ready(_) => waker.wake(),
+						}
+					}
+					self.0 = Inner::Finite(delay);
+				},
+				Inner::Finite(delay) => delay.reset(duration),
+			},
+			None =>
+				self.0 = match std::mem::replace(
+					&mut self.0,
+					Inner::Infinite { waker: None, delay: None },
+				) {
+					Inner::Finite(delay) => Inner::Infinite { waker: None, delay: Some(delay) },
+					infinite => infinite,
+				},
+		}
+	}
+}
+
+impl Future for MaybeInfDelay {
+	type Output = ();
+
+	fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+		match &mut self.0 {
+			Inner::Infinite { waker, .. } => {
+				*waker = Some(cx.waker().clone());
+				Poll::Pending
+			},
+			Inner::Finite(delay) => delay.poll_unpin(cx),
+		}
+	}
+}
+
+impl FusedFuture for MaybeInfDelay {
+	fn is_terminated(&self) -> bool {
+		matches!(self.0, Inner::Infinite { .. })
+	}
+}
diff --git a/substrate/client/mixnet/src/packet_dispatcher.rs b/substrate/client/mixnet/src/packet_dispatcher.rs
new file mode 100644
index 0000000000000..856208ecb3426
--- /dev/null
+++ b/substrate/client/mixnet/src/packet_dispatcher.rs
@@ -0,0 +1,198 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+//! [`AddressedPacket`] dispatching.
+
+use super::peer_id::{from_core_peer_id, to_core_peer_id};
+use arrayvec::ArrayVec;
+use libp2p_identity::PeerId;
+use log::{debug, warn};
+use mixnet::core::{AddressedPacket, NetworkStatus, Packet, PeerId as CorePeerId};
+use parking_lot::Mutex;
+use sc_network::{NetworkNotification, ProtocolName};
+use std::{collections::HashMap, future::Future, sync::Arc};
+
+const LOG_TARGET: &str = "mixnet";
+
+/// Packet queue for a peer.
+///
+/// Ideally we would use `Rc<RefCell<_>>`, but that would prevent the top-level future from being
+/// automatically marked `Send`. I believe it would be safe to manually mark it `Send`, but using
+/// `Arc<Mutex<_>>` here is not really a big deal.
+struct PeerQueue(Mutex<ArrayVec<Box<Packet>, 2>>);
+
+impl PeerQueue {
+	fn new() -> Self {
+		Self(Mutex::new(ArrayVec::new()))
+	}
+
+	/// Push `packet` onto the queue. Returns `true` if the queue was previously empty. Fails if
+	/// the queue is full.
+	fn push(&self, packet: Box<Packet>) -> Result<bool, ()> {
+		let mut queue = self.0.lock();
+		if queue.is_full() {
+			Err(())
+		} else {
+			let was_empty = queue.is_empty();
+			queue.push(packet);
+			Ok(was_empty)
+		}
+	}
+
+	/// Drop all packets from the queue.
+	fn clear(&self) {
+		let mut queue = self.0.lock();
+		queue.clear();
+	}
+
+	/// Pop the packet at the head of the queue and return it, or, if the queue is empty, return
+	/// `None`. Also returns `true` if there are more packets in the queue.
+	fn pop(&self) -> (Option<Box<Packet>>, bool) {
+		let mut queue = self.0.lock();
+		let packet = queue.pop();
+		(packet, !queue.is_empty())
+	}
+}
+
+/// A peer which has packets ready to send but is not currently being serviced.
+pub struct ReadyPeer {
+	id: PeerId,
+	/// The peer's packet queue. Not empty.
+	queue: Arc<PeerQueue>,
+}
+
+impl ReadyPeer {
+	/// If a future is returned, and if that future returns `Some`, this function should be called
+	/// again to send the next packet queued for the peer; `self` is placed in the `Some` to make
+	/// this straightforward. Otherwise, we have either sent or dropped all packets queued for the
+	/// peer, and it can be forgotten about for the time being.
+	pub fn send_packet(
+		self,
+		network: &impl NetworkNotification,
+		protocol_name: ProtocolName,
+	) -> Option<impl Future<Output = Option<Self>>> {
+		match network.notification_sender(self.id, protocol_name) {
+			Err(err) => {
+				debug!(
+					target: LOG_TARGET,
+					"Failed to get notification sender for peer ID {}: {err}", self.id
+				);
+				self.queue.clear();
+				None
+			},
+			Ok(sender) => Some(async move {
+				match sender.ready().await.and_then(|mut ready| {
+					let (packet, more_packets) = self.queue.pop();
+					let packet =
+						packet.expect("Should only be called if there is a packet to send");
+					ready.send((packet as Box<[_]>).into())?;
+					Ok(more_packets)
+				}) {
+					Err(err) => {
+						debug!(
+							target: LOG_TARGET,
+							"Notification sender for peer ID {} failed: {err}", self.id
+						);
+						self.queue.clear();
+						None
+					},
+					Ok(more_packets) => more_packets.then(|| self),
+				}
+			}),
+		}
+	}
+}
+
+pub struct PacketDispatcher {
+	/// Peer ID of the local node. Only used to implement [`NetworkStatus`].
+	local_peer_id: CorePeerId,
+	/// Packet queue for each connected peer. These queues are very short and only exist to give
+	/// packets somewhere to sit while waiting for notification senders to be ready.
+	peer_queues: HashMap<CorePeerId, Arc<PeerQueue>>,
+}
+
+impl PacketDispatcher {
+	pub fn new(local_peer_id: &CorePeerId) -> Self {
+		Self { local_peer_id: *local_peer_id, peer_queues: HashMap::new() }
+	}
+
+	pub fn add_peer(&mut self, id: &PeerId) {
+		let Some(core_id) = to_core_peer_id(id) else {
+			debug!(target: LOG_TARGET,
+				"Cannot add peer; failed to convert libp2p peer ID {id} to mixnet peer ID");
+			return
+		};
+		if self.peer_queues.insert(core_id, Arc::new(PeerQueue::new())).is_some() {
+			warn!(target: LOG_TARGET, "Two stream opened notifications for peer ID {id}");
+		}
+	}
+
+	pub fn remove_peer(&mut self, id: &PeerId) {
+		let Some(core_id) = to_core_peer_id(id) else {
+			debug!(target: LOG_TARGET,
+				"Cannot remove peer; failed to convert libp2p peer ID {id} to mixnet peer ID");
+			return
+		};
+		if self.peer_queues.remove(&core_id).is_none() {
+			warn!(target: LOG_TARGET, "Stream closed notification for unknown peer ID {id}");
+		}
+	}
+
+	/// If the peer is not connected or the peer's packet queue is full, the packet is dropped.
+	/// Otherwise the packet is pushed onto the peer's queue, and if the queue was previously empty
+	/// a [`ReadyPeer`] is returned.
+	pub fn dispatch(&mut self, packet: AddressedPacket) -> Option<ReadyPeer> {
+		let Some(queue) = self.peer_queues.get_mut(&packet.peer_id) else {
+			debug!(target: LOG_TARGET, "Dropped packet to mixnet peer ID {:x?}; not connected",
+				packet.peer_id);
+			return None
+		};
+
+		match queue.push(packet.packet) {
+			Err(_) => {
+				debug!(
+					target: LOG_TARGET,
+					"Dropped packet to mixnet peer ID {:x?}; peer queue full", packet.peer_id
+				);
+				None
+			},
+			Ok(true) => {
+				// Queue was empty. Construct and return a ReadyPeer.
+				let Some(id) = from_core_peer_id(&packet.peer_id) else {
+					debug!(target: LOG_TARGET, "Cannot send packet; \
+						failed to convert mixnet peer ID {:x?} to libp2p peer ID",
+						packet.peer_id);
+					queue.clear();
+					return None
+				};
+				Some(ReadyPeer { id, queue: queue.clone() })
+			},
+			Ok(false) => None, // Queue was not empty
+		}
+	}
+}
+
+impl NetworkStatus for PacketDispatcher {
+	fn local_peer_id(&self) -> CorePeerId {
+		self.local_peer_id
+	}
+
+	fn is_connected(&self, peer_id: &CorePeerId) -> bool {
+		self.peer_queues.contains_key(peer_id)
+	}
+}
diff --git a/substrate/client/mixnet/src/peer_id.rs b/substrate/client/mixnet/src/peer_id.rs
new file mode 100644
index 0000000000000..7984da8c75be7
--- /dev/null
+++ b/substrate/client/mixnet/src/peer_id.rs
@@ -0,0 +1,44 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+use libp2p_identity::PeerId;
+use mixnet::core::PeerId as CorePeerId;
+
+/// Convert a libp2p [`PeerId`] into a mixnet core [`PeerId`](CorePeerId).
+///
+/// This will succeed only if `peer_id` is an Ed25519 public key ("hashed" using the identity
+/// hasher). Returns `None` on failure.
+pub fn to_core_peer_id(peer_id: &PeerId) -> Option<CorePeerId> {
+	let hash = peer_id.as_ref();
+	if hash.code() != 0 {
+		// Hash is not identity
+		return None
+	}
+	let public = libp2p_identity::PublicKey::try_decode_protobuf(hash.digest()).ok()?;
+	public.try_into_ed25519().ok().map(|public| public.to_bytes())
+}
+
+/// Convert a mixnet core [`PeerId`](CorePeerId) into a libp2p [`PeerId`].
+///
+/// This will succeed only if `peer_id` represents a point on the Ed25519 curve. Returns `None` on
+/// failure.
+pub fn from_core_peer_id(core_peer_id: &CorePeerId) -> Option<PeerId> {
+	let public = libp2p_identity::ed25519::PublicKey::try_from_bytes(core_peer_id).ok()?;
+	let public: libp2p_identity::PublicKey = public.into();
+	Some(public.into())
+}
diff --git a/substrate/client/mixnet/src/protocol.rs b/substrate/client/mixnet/src/protocol.rs
new file mode 100644
index 0000000000000..555c267b86e0c
--- /dev/null
+++ b/substrate/client/mixnet/src/protocol.rs
@@ -0,0 +1,42 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+use super::config::Config;
+use mixnet::core::PACKET_SIZE;
+use sc_network::{config::NonDefaultSetConfig, ProtocolName};
+
+/// Returns the protocol name to use for the mixnet controlled by the given chain.
+pub fn protocol_name(genesis_hash: &[u8], fork_id: Option<&str>) -> ProtocolName {
+	let name = if let Some(fork_id) = fork_id {
+		format!("/{}/{}/mixnet/1", array_bytes::bytes2hex("", genesis_hash), fork_id)
+	} else {
+		format!("/{}/mixnet/1", array_bytes::bytes2hex("", genesis_hash))
+	};
+	name.into()
+}
+
+/// Returns the peers set configuration for the mixnet protocol.
+pub fn peers_set_config(name: ProtocolName, config: &Config) -> NonDefaultSetConfig {
+	let mut set_config = NonDefaultSetConfig::new(name, PACKET_SIZE as u64);
+	if config.substrate.num_gateway_slots != 0 {
+		// out_peers is always 0; we are only interested in connecting to mixnodes, which we do by
+		// setting them as reserved nodes
+		set_config.allow_non_reserved(config.substrate.num_gateway_slots, 0);
+	}
+	set_config
+}
diff --git a/substrate/client/mixnet/src/request.rs b/substrate/client/mixnet/src/request.rs
new file mode 100644
index 0000000000000..18a74c7ea5cf1
--- /dev/null
+++ b/substrate/client/mixnet/src/request.rs
@@ -0,0 +1,119 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+//! Sender-side request logic. Some things from this module are also used on the receiver side, eg
+//! [`extrinsic_delay`], but most of the receiver-side request logic lives elsewhere.
+
+use super::{config::SubstrateConfig, error::Error};
+use blake2::{
+	digest::{consts::U16, Mac},
+	Blake2bMac,
+};
+use codec::{Decode, DecodeAll};
+use futures::channel::oneshot;
+use log::debug;
+use mixnet::core::{Delay, MessageId, PostErr, Scattered};
+use sp_core::Bytes;
+use std::time::Duration;
+
+const LOG_TARGET: &str = "mixnet";
+
+fn send_err<T>(reply_sender: oneshot::Sender<Result<T, Error>>, err: Error) {
+	if let Err(Err(err)) = reply_sender.send(Err(err)) {
+		debug!(target: LOG_TARGET, "Failed to inform requester of error: {err}");
+	}
+}
+
+fn send_reply<T: Decode>(reply_sender: oneshot::Sender<Result<T, Error>>, mut data: &[u8]) {
+	let res = match Result::decode_all(&mut data) {
+		Ok(res) => res.map_err(Error::Remote),
+		Err(_) => Err(Error::BadReply),
+	};
+	match reply_sender.send(res) {
+		Ok(_) => (),
+		Err(Ok(_)) => debug!(target: LOG_TARGET, "Failed to send reply to requester"),
+		Err(Err(err)) => debug!(target: LOG_TARGET, "Failed to inform requester of error: {err}"),
+	}
+}
+
+/// First byte of a submit extrinsic request, identifying it as such.
+pub const SUBMIT_EXTRINSIC: u8 = 1;
+
+const EXTRINSIC_DELAY_PERSONA: &[u8; 16] = b"submit-extrn-dly";
+
+/// Returns the artificial delay that should be inserted between receipt of a submit extrinsic
+/// request with the given message ID and import of the extrinsic into the local transaction pool.
+pub fn extrinsic_delay(message_id: &MessageId, config: &SubstrateConfig) -> Duration {
+	let h = Blake2bMac::<U16>::new_with_salt_and_personal(message_id, b"", EXTRINSIC_DELAY_PERSONA)
+		.expect("Key, salt, and persona sizes are fixed and small enough");
+	let delay = Delay::exp(h.finalize().into_bytes().as_ref());
+	delay.to_duration(config.mean_extrinsic_delay)
+}
+
+/// Request parameters and local reply channel. Stored by the
+/// [`RequestManager`](mixnet::request_manager::RequestManager).
+pub enum Request {
+	SubmitExtrinsic { extrinsic: Bytes, reply_sender: oneshot::Sender<Result<(), Error>> },
+}
+
+impl Request {
+	/// Forward an error to the user of the mixnet service.
+	fn send_err(self, err: Error) {
+		match self {
+			Request::SubmitExtrinsic { reply_sender, .. } => send_err(reply_sender, err),
+		}
+	}
+
+	/// Forward a reply to the user of the mixnet service.
+	pub fn send_reply(self, data: &[u8]) {
+		match self {
+			Request::SubmitExtrinsic { reply_sender, .. } => send_reply(reply_sender, data),
+		}
+	}
+}
+
+impl mixnet::request_manager::Request for Request {
+	type Context = SubstrateConfig;
+
+	fn with_data<T>(&self, f: impl FnOnce(Scattered<u8>) -> T, _context: &Self::Context) -> T {
+		match self {
+			Request::SubmitExtrinsic { extrinsic, .. } =>
+				f([&[SUBMIT_EXTRINSIC], extrinsic.as_ref()].as_slice().into()),
+		}
+	}
+
+	fn num_surbs(&self, context: &Self::Context) -> usize {
+		match self {
+			Request::SubmitExtrinsic { .. } => context.surb_factor,
+		}
+	}
+
+	fn handling_delay(&self, message_id: &MessageId, context: &Self::Context) -> Duration {
+		match self {
+			Request::SubmitExtrinsic { .. } => extrinsic_delay(message_id, context),
+		}
+	}
+
+	fn handle_post_err(self, err: PostErr, _context: &Self::Context) {
+		self.send_err(err.into());
+	}
+
+	fn handle_retry_limit_reached(self, _context: &Self::Context) {
+		self.send_err(Error::NoReply);
+	}
+}
diff --git a/substrate/client/mixnet/src/run.rs b/substrate/client/mixnet/src/run.rs
new file mode 100644
index 0000000000000..09020469d5eee
--- /dev/null
+++ b/substrate/client/mixnet/src/run.rs
@@ -0,0 +1,388 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+//! Top-level mixnet service function.
+
+use super::{
+	api::ApiBackend,
+	config::{Config, SubstrateConfig},
+	error::RemoteErr,
+	extrinsic_queue::ExtrinsicQueue,
+	maybe_inf_delay::MaybeInfDelay,
+	packet_dispatcher::PacketDispatcher,
+	peer_id::to_core_peer_id,
+	request::{extrinsic_delay, Request, SUBMIT_EXTRINSIC},
+	sync_with_runtime::sync_with_runtime,
+};
+use codec::{Decode, DecodeAll, Encode};
+use futures::{
+	future::{pending, Either},
+	stream::FuturesUnordered,
+	StreamExt,
+};
+use log::{debug, error, trace, warn};
+use mixnet::{
+	core::{Events, Message, Mixnet, Packet},
+	reply_manager::{ReplyContext, ReplyManager},
+	request_manager::RequestManager,
+};
+use sc_client_api::{BlockchainEvents, HeaderBackend};
+use sc_network::{
+	Event::{NotificationStreamClosed, NotificationStreamOpened, NotificationsReceived},
+	NetworkEventStream, NetworkNotification, NetworkPeers, NetworkStateInfo, ProtocolName,
+};
+use sc_transaction_pool_api::{
+	LocalTransactionPool, OffchainTransactionPoolFactory, TransactionPool,
+};
+use sp_api::{ApiExt, ProvideRuntimeApi};
+use sp_consensus::SyncOracle;
+use sp_keystore::{KeystoreExt, KeystorePtr};
+use sp_mixnet::{runtime_api::MixnetApi, types::Mixnode};
+use sp_runtime::{
+	traits::{Block, Header},
+	transaction_validity::TransactionSource,
+	Saturating,
+};
+use std::{
+	sync::Arc,
+	time::{Duration, Instant},
+};
+
+const LOG_TARGET: &str = "mixnet";
+
+const MIN_BLOCKS_BETWEEN_REGISTRATION_ATTEMPTS: u32 = 3;
+
+fn complete_submit_extrinsic<X>(
+	reply_manager: &mut ReplyManager,
+	reply_context: ReplyContext,
+	data: Result<(), RemoteErr>,
+	mixnet: &mut Mixnet<X>,
+) {
+	reply_manager.complete(reply_context, data.encode(), mixnet);
+}
+
+fn handle_packet<X, E: Decode>(
+	packet: &Packet,
+	mixnet: &mut Mixnet<X>,
+	request_manager: &mut RequestManager<Request>,
+	reply_manager: &mut ReplyManager,
+	extrinsic_queue: &mut ExtrinsicQueue<E>,
+	config: &SubstrateConfig,
+) {
+	match mixnet.handle_packet(packet) {
+		Some(Message::Request(message)) => {
+			let Some((reply_context, data)) = reply_manager.insert(message, mixnet) else { return };
+
+			match data.as_slice() {
+				[SUBMIT_EXTRINSIC, encoded_extrinsic @ ..] => {
+					if !extrinsic_queue.has_space() {
+						debug!(target: LOG_TARGET, "No space in extrinsic queue; dropping request");
+						// We don't send a reply in this case; we want the requester to retry
+						reply_manager.abandon(reply_context);
+						return
+					}
+
+					// Decode the extrinsic
+					let mut encoded_extrinsic = encoded_extrinsic;
+					let extrinsic = match E::decode_all(&mut encoded_extrinsic) {
+						Ok(extrinsic) => extrinsic,
+						Err(err) => {
+							complete_submit_extrinsic(
+								reply_manager,
+								reply_context,
+								Err(RemoteErr::Decode(format!("Bad extrinsic: {}", err))),
+								mixnet,
+							);
+							return
+						},
+					};
+
+					let deadline =
+						Instant::now() + extrinsic_delay(reply_context.message_id(), config);
+					extrinsic_queue.insert(deadline, extrinsic, reply_context);
+				},
+				_ => {
+					debug!(target: LOG_TARGET, "Unrecognised request; discarding");
+					// To keep things simple we don't bother sending a reply in this case. The
+					// requester will give up and try another mixnode eventually.
+					reply_manager.abandon(reply_context);
+				},
+			}
+		},
+		Some(Message::Reply(message)) => {
+			let Some(request) = request_manager.remove(&message.request_id) else {
+				trace!(
+					target: LOG_TARGET,
+					"Received reply to already-completed request with message ID {:x?}",
+					message.request_id
+				);
+				return
+			};
+			request.send_reply(&message.data);
+		},
+		None => (),
+	}
+}
+
+fn time_until(instant: Instant) -> Duration {
+	instant.saturating_duration_since(Instant::now())
+}
+
+/// Run the mixnet service. If `keystore` is `None`, the service will not attempt to register the
+/// local node as a mixnode, even if `config.register` is `true`.
+pub async fn run<B, C, S, N, P>(
+	config: Config,
+	mut api_backend: ApiBackend,
+	client: Arc<C>,
+	sync: Arc<S>,
+	network: Arc<N>,
+	protocol_name: ProtocolName,
+	transaction_pool: Arc<P>,
+	keystore: Option<KeystorePtr>,
+) where
+	B: Block,
+	C: BlockchainEvents<B> + ProvideRuntimeApi<B> + HeaderBackend<B>,
+	C::Api: MixnetApi<B>,
+	S: SyncOracle,
+	N: NetworkStateInfo + NetworkEventStream + NetworkNotification + NetworkPeers,
+	P: TransactionPool<Block = B> + LocalTransactionPool<Block = B> + 'static,
+{
+	let local_peer_id = network.local_peer_id();
+	let Some(local_peer_id) = to_core_peer_id(&local_peer_id) else {
+		error!(target: LOG_TARGET,
+			"Failed to convert libp2p local peer ID {local_peer_id} to mixnet peer ID; \
+			mixnet not running");
+		return
+	};
+
+	let offchain_transaction_pool_factory =
+		OffchainTransactionPoolFactory::new(transaction_pool.clone());
+
+	let mut mixnet = Mixnet::new(config.core);
+	// It would make sense to reset this to 0 when the session changes, but registrations aren't
+	// allowed at the start of a session anyway, so it doesn't really matter
+	let mut min_register_block = 0u32.into();
+	let mut packet_dispatcher = PacketDispatcher::new(&local_peer_id);
+	let mut request_manager = RequestManager::new(config.request_manager);
+	let mut reply_manager = ReplyManager::new(config.reply_manager);
+	let mut extrinsic_queue = ExtrinsicQueue::new(config.substrate.extrinsic_queue_capacity);
+
+	let mut finality_notifications = client.finality_notification_stream();
+	// Import notifications only used for triggering registration attempts
+	let mut import_notifications = if config.substrate.register && keystore.is_some() {
+		Some(client.import_notification_stream())
+	} else {
+		None
+	};
+	let mut network_events = network.event_stream("mixnet").fuse();
+	let mut next_forward_packet_delay = MaybeInfDelay::new(None);
+	let mut next_authored_packet_delay = MaybeInfDelay::new(None);
+	let mut ready_peers = FuturesUnordered::new();
+	let mut next_retry_delay = MaybeInfDelay::new(None);
+	let mut next_extrinsic_delay = MaybeInfDelay::new(None);
+	let mut submit_extrinsic_results = FuturesUnordered::new();
+
+	loop {
+		let mut next_request = if request_manager.has_space() {
+			Either::Left(api_backend.request_receiver.select_next_some())
+		} else {
+			Either::Right(pending())
+		};
+
+		let mut next_import_notification = import_notifications.as_mut().map_or_else(
+			|| Either::Right(pending()),
+			|notifications| Either::Left(notifications.select_next_some()),
+		);
+
+		futures::select! {
+			request = next_request =>
+				request_manager.insert(request, &mut mixnet, &packet_dispatcher, &config.substrate),
+
+			notification = finality_notifications.select_next_some() => {
+				// To avoid trying to connect to old mixnodes, ignore finality notifications while
+				// offline or major syncing. This is a bit racy but should be good enough.
+				if !sync.is_offline() && !sync.is_major_syncing() {
+					let api = client.runtime_api();
+					sync_with_runtime(&mut mixnet, api, notification.hash);
+					request_manager.update_session_status(
+						&mut mixnet, &packet_dispatcher, &config.substrate);
+				}
+			}
+
+			notification = next_import_notification => {
+				if notification.is_new_best && (*notification.header.number() >= min_register_block) {
+					let mut api = client.runtime_api();
+					api.register_extension(KeystoreExt(keystore.clone().expect(
+						"Import notification stream only setup if we have a keystore")));
+					api.register_extension(offchain_transaction_pool_factory
+						.offchain_transaction_pool(notification.hash));
+					let session_index = mixnet.session_status().current_index;
+					let mixnode = Mixnode {
+						kx_public: *mixnet.next_kx_public(),
+						peer_id: local_peer_id,
+						external_addresses: network.external_addresses().into_iter()
+							.map(|addr| addr.to_string().into_bytes()).collect(),
+					};
+					match api.maybe_register(notification.hash, session_index, mixnode) {
+						Ok(true) => min_register_block = notification.header.number().saturating_add(
+							MIN_BLOCKS_BETWEEN_REGISTRATION_ATTEMPTS.into()),
+						Ok(false) => (),
+						Err(err) => debug!(target: LOG_TARGET,
+							"Error trying to register for the next session: {err}"),
+					}
+				}
+			}
+
+			event = network_events.select_next_some() => match event {
+				NotificationStreamOpened { remote, protocol, .. }
+					if protocol == protocol_name => packet_dispatcher.add_peer(&remote),
+				NotificationStreamClosed { remote, protocol }
+					if protocol == protocol_name => packet_dispatcher.remove_peer(&remote),
+				NotificationsReceived { remote, messages } => {
+					for message in messages {
+						if message.0 == protocol_name {
+							match message.1.as_ref().try_into() {
+								Ok(packet) => handle_packet(packet,
+									&mut mixnet, &mut request_manager, &mut reply_manager,
+									&mut extrinsic_queue, &config.substrate),
+								Err(_) => debug!(target: LOG_TARGET,
+									"Dropped incorrectly sized packet ({} bytes) from {remote}",
+									message.1.len(),
+								),
+							}
+						}
+					}
+				}
+				_ => ()
+			},
+
+			_ = next_forward_packet_delay => {
+				if let Some(packet) = mixnet.pop_next_forward_packet() {
+					if let Some(ready_peer) = packet_dispatcher.dispatch(packet) {
+						if let Some(fut) = ready_peer.send_packet(&*network, protocol_name.clone()) {
+							ready_peers.push(fut);
+						}
+					}
+				} else {
+					warn!(target: LOG_TARGET,
+						"Next forward packet deadline reached, but no packet in queue; \
+						this is a bug");
+				}
+			}
+
+			_ = next_authored_packet_delay => {
+				if let Some(packet) = mixnet.pop_next_authored_packet(&packet_dispatcher) {
+					if let Some(ready_peer) = packet_dispatcher.dispatch(packet) {
+						if let Some(fut) = ready_peer.send_packet(&*network, protocol_name.clone()) {
+							ready_peers.push(fut);
+						}
+					}
+				}
+			}
+
+			ready_peer = ready_peers.select_next_some() => {
+				if let Some(ready_peer) = ready_peer {
+					if let Some(fut) = ready_peer.send_packet(&*network, protocol_name.clone()) {
+						ready_peers.push(fut);
+					}
+				}
+			}
+
+			_ = next_retry_delay => {
+				if !request_manager.pop_next_retry(&mut mixnet, &packet_dispatcher, &config.substrate) {
+					warn!(target: LOG_TARGET,
+						"Next retry deadline reached, but no request in retry queue; \
+						this is a bug");
+				}
+			}
+
+			_ = next_extrinsic_delay => {
+				if let Some((extrinsic, reply_context)) = extrinsic_queue.pop() {
+					if submit_extrinsic_results.len() < config.substrate.max_pending_extrinsics {
+						let fut = transaction_pool.submit_one(
+							client.info().best_hash,
+							TransactionSource::External,
+							extrinsic);
+						submit_extrinsic_results.push(async move {
+							(fut.await, reply_context)
+						});
+					} else {
+						// There are already too many pending extrinsics, just drop this one. We
+						// don't send a reply; we want the requester to retry.
+						debug!(target: LOG_TARGET,
+							"Too many pending extrinsics; dropped submit extrinsic request");
+						reply_manager.abandon(reply_context);
+					}
+				} else {
+					warn!(target: LOG_TARGET,
+						"Next extrinsic deadline reached, but no extrinsic in queue; \
+						this is a bug");
+				}
+			}
+
+			res_reply_context = submit_extrinsic_results.select_next_some() => {
+				let (res, reply_context) = res_reply_context;
+				let res = match res {
+					Ok(_) => Ok(()),
+					Err(err) => Err(RemoteErr::Other(err.to_string())),
+				};
+				complete_submit_extrinsic(&mut reply_manager, reply_context, res, &mut mixnet);
+			}
+		}
+
+		let events = mixnet.take_events();
+		if !events.is_empty() {
+			if events.contains(Events::RESERVED_PEERS_CHANGED) {
+				let reserved_peer_addrs = mixnet
+					.reserved_peers()
+					.flat_map(|mixnode| mixnode.extra.iter()) // External addresses
+					.cloned()
+					.collect();
+				if let Err(err) =
+					network.set_reserved_peers(protocol_name.clone(), reserved_peer_addrs)
+				{
+					debug!(target: LOG_TARGET, "Setting reserved peers failed: {err}");
+				}
+			}
+			if events.contains(Events::NEXT_FORWARD_PACKET_DEADLINE_CHANGED) {
+				next_forward_packet_delay
+					.reset(mixnet.next_forward_packet_deadline().map(time_until));
+			}
+			if events.contains(Events::NEXT_AUTHORED_PACKET_DEADLINE_CHANGED) {
+				next_authored_packet_delay.reset(mixnet.next_authored_packet_delay());
+			}
+			if events.contains(Events::SPACE_IN_AUTHORED_PACKET_QUEUE) {
+				// Note this may cause the next retry deadline to change, but should not trigger
+				// any mixnet events
+				request_manager.process_post_queues(
+					&mut mixnet,
+					&packet_dispatcher,
+					&config.substrate,
+				);
+			}
+		}
+
+		if request_manager.next_retry_deadline_changed() {
+			next_retry_delay.reset(request_manager.next_retry_deadline().map(time_until));
+		}
+
+		if extrinsic_queue.next_deadline_changed() {
+			next_extrinsic_delay.reset(extrinsic_queue.next_deadline().map(time_until));
+		}
+	}
+}
diff --git a/substrate/client/mixnet/src/sync_with_runtime.rs b/substrate/client/mixnet/src/sync_with_runtime.rs
new file mode 100644
index 0000000000000..4a80b3c75f43d
--- /dev/null
+++ b/substrate/client/mixnet/src/sync_with_runtime.rs
@@ -0,0 +1,228 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+//! [`sync_with_runtime`] synchronises the session status and mixnode sets from the blockchain
+//! runtime to the core mixnet state. It is called every time a block is finalised.
+
+use super::peer_id::from_core_peer_id;
+use libp2p_identity::PeerId;
+use log::{debug, info};
+use mixnet::core::{
+	Mixnet, Mixnode as CoreMixnode, MixnodesErr as CoreMixnodesErr, RelSessionIndex,
+	SessionPhase as CoreSessionPhase, SessionStatus as CoreSessionStatus,
+};
+use multiaddr::{multiaddr, Multiaddr, Protocol};
+use sp_api::{ApiError, ApiRef};
+use sp_mixnet::{
+	runtime_api::MixnetApi,
+	types::{
+		Mixnode as RuntimeMixnode, MixnodesErr as RuntimeMixnodesErr,
+		SessionPhase as RuntimeSessionPhase, SessionStatus as RuntimeSessionStatus,
+	},
+};
+use sp_runtime::traits::Block;
+
+const LOG_TARGET: &str = "mixnet";
+
+/// Convert a [`RuntimeSessionStatus`] to a [`CoreSessionStatus`].
+///
+/// The [`RuntimeSessionStatus`] and [`CoreSessionStatus`] types are effectively the same.
+/// [`RuntimeSessionStatus`] is used in the runtime to avoid depending on the [`mixnet`] crate
+/// there.
+fn to_core_session_status(status: RuntimeSessionStatus) -> CoreSessionStatus {
+	CoreSessionStatus {
+		current_index: status.current_index,
+		phase: match status.phase {
+			RuntimeSessionPhase::CoverToCurrent => CoreSessionPhase::CoverToCurrent,
+			RuntimeSessionPhase::RequestsToCurrent => CoreSessionPhase::RequestsToCurrent,
+			RuntimeSessionPhase::CoverToPrev => CoreSessionPhase::CoverToPrev,
+			RuntimeSessionPhase::DisconnectFromPrev => CoreSessionPhase::DisconnectFromPrev,
+		},
+	}
+}
+
+fn parse_external_addresses(external_addresses: Vec<Vec<u8>>) -> Vec<Multiaddr> {
+	external_addresses
+		.into_iter()
+		.flat_map(|addr| {
+			let addr = match String::from_utf8(addr) {
+				Ok(addr) => addr,
+				Err(addr) => {
+					debug!(
+						target: LOG_TARGET,
+						"Mixnode external address {:x?} is not valid UTF-8",
+						addr.into_bytes(),
+					);
+					return None
+				},
+			};
+			match addr.parse() {
+				Ok(addr) => Some(addr),
+				Err(err) => {
+					debug!(
+						target: LOG_TARGET,
+						"Could not parse mixnode address {addr}: {err}",
+					);
+					None
+				},
+			}
+		})
+		.collect()
+}
+
+/// Modify `external_addresses` such that there is at least one address and the final component of
+/// each address matches `peer_id`.
+fn fixup_external_addresses(external_addresses: &mut Vec<Multiaddr>, peer_id: &PeerId) {
+	// Ensure the final component of each address matches peer_id
+	external_addresses.retain_mut(|addr| match PeerId::try_from_multiaddr(addr) {
+		Some(addr_peer_id) if addr_peer_id == *peer_id => true,
+		Some(_) => {
+			debug!(
+				target: LOG_TARGET,
+				"Mixnode address {} does not match mixnode peer ID {}, ignoring",
+				addr,
+				peer_id
+			);
+			false
+		},
+		None if matches!(addr.iter().last(), Some(Protocol::P2p(_))) => {
+			debug!(
+				target: LOG_TARGET,
+				"Mixnode address {} has unrecognised P2P protocol, ignoring",
+				addr
+			);
+			false
+		},
+		None => {
+			addr.push(Protocol::P2p(*peer_id.as_ref()));
+			true
+		},
+	});
+
+	// If there are no addresses, insert one consisting of just the peer ID
+	if external_addresses.is_empty() {
+		external_addresses.push(multiaddr!(P2p(*peer_id.as_ref())));
+	}
+}
+
+/// Convert a [`RuntimeMixnode`] to a [`CoreMixnode`]. If the conversion fails, an error message is
+/// logged, but a [`CoreMixnode`] is still returned.
+///
+/// It would be possible to handle conversion failure in a better way, but this would complicate
+/// things for what should be a rare case. Note that even if the conversion here succeeds, there is
+/// no guarantee that we will be able to connect to the mixnode or send packets to it. The most
+/// common failure case is expected to be that a mixnode is simply unreachable over the network.
+fn into_core_mixnode(mixnode: RuntimeMixnode) -> CoreMixnode<Vec<Multiaddr>> {
+	let external_addresses = if let Some(peer_id) = from_core_peer_id(&mixnode.peer_id) {
+		let mut external_addresses = parse_external_addresses(mixnode.external_addresses);
+		fixup_external_addresses(&mut external_addresses, &peer_id);
+		external_addresses
+	} else {
+		debug!(
+			target: LOG_TARGET,
+			"Failed to convert mixnet peer ID {:x?} to libp2p peer ID",
+			mixnode.peer_id,
+		);
+		Vec::new()
+	};
+
+	CoreMixnode {
+		kx_public: mixnode.kx_public,
+		peer_id: mixnode.peer_id,
+		extra: external_addresses,
+	}
+}
+
+fn maybe_set_mixnodes(
+	mixnet: &mut Mixnet<Vec<Multiaddr>>,
+	rel_session_index: RelSessionIndex,
+	mixnodes: &dyn Fn() -> Result<Result<Vec<RuntimeMixnode>, RuntimeMixnodesErr>, ApiError>,
+) {
+	let current_session_index = mixnet.session_status().current_index;
+	mixnet.maybe_set_mixnodes(rel_session_index, &mut || {
+		// Note that RelSessionIndex::Prev + 0 would panic, but this closure will not get called in
+		// that case so we are fine. Do not move this out of the closure!
+		let session_index = rel_session_index + current_session_index;
+		match mixnodes() {
+			Ok(Ok(mixnodes)) => Ok(mixnodes.into_iter().map(into_core_mixnode).collect()),
+			Ok(Err(err)) => {
+				info!(target: LOG_TARGET, "Session {session_index}: Mixnet disabled: {err}");
+				Err(CoreMixnodesErr::Permanent) // Disable the session slot
+			},
+			Err(err) => {
+				debug!(
+					target: LOG_TARGET,
+					"Session {session_index}: Error getting mixnodes from runtime: {err}"
+				);
+				Err(CoreMixnodesErr::Transient) // Just leave the session slot empty; try again next block
+			},
+		}
+	});
+}
+
+pub fn sync_with_runtime<B, A>(mixnet: &mut Mixnet<Vec<Multiaddr>>, api: ApiRef<A>, hash: B::Hash)
+where
+	B: Block,
+	A: MixnetApi<B>,
+{
+	let session_status = match api.session_status(hash) {
+		Ok(session_status) => session_status,
+		Err(err) => {
+			debug!(target: LOG_TARGET, "Error getting session status from runtime: {err}");
+			return
+		},
+	};
+	mixnet.set_session_status(to_core_session_status(session_status));
+
+	maybe_set_mixnodes(mixnet, RelSessionIndex::Prev, &|| api.prev_mixnodes(hash));
+	maybe_set_mixnodes(mixnet, RelSessionIndex::Current, &|| api.current_mixnodes(hash));
+}
+
+#[cfg(test)]
+mod tests {
+	use super::*;
+
+	#[test]
+	fn fixup_empty_external_addresses() {
+		let peer_id = PeerId::random();
+		let mut external_addresses = Vec::new();
+		fixup_external_addresses(&mut external_addresses, &peer_id);
+		assert_eq!(external_addresses, vec![multiaddr!(P2p(peer_id))]);
+	}
+
+	#[test]
+	fn fixup_misc_external_addresses() {
+		let peer_id = PeerId::random();
+		let other_peer_id = PeerId::random();
+		let mut external_addresses = vec![
+			multiaddr!(Tcp(0u16), P2p(peer_id)),
+			multiaddr!(Tcp(1u16), P2p(other_peer_id)),
+			multiaddr!(Tcp(2u16)),
+			Multiaddr::empty(),
+		];
+		fixup_external_addresses(&mut external_addresses, &peer_id);
+		assert_eq!(
+			external_addresses,
+			vec![
+				multiaddr!(Tcp(0u16), P2p(peer_id)),
+				multiaddr!(Tcp(2u16), P2p(peer_id)),
+				multiaddr!(P2p(peer_id)),
+			]
+		);
+	}
+}
diff --git a/substrate/client/network/common/src/role.rs b/substrate/client/network/common/src/role.rs
index cd43f6655b72c..fd02c00e2324a 100644
--- a/substrate/client/network/common/src/role.rs
+++ b/substrate/client/network/common/src/role.rs
@@ -16,6 +16,10 @@
 // You should have received a copy of the GNU General Public License
 // along with this program. If not, see <https://www.gnu.org/licenses/>.
 
+// file-level lint whitelist to avoid problem with bitflags macro below
+// TODO: can be dropped after an update to bitflags 2.4
+#![allow(clippy::bad_bit_mask)]
+
 use codec::{self, Encode, EncodeLike, Input, Output};
 
 /// Role that the peer sent to us during the handshake, with the addition of what our local node
diff --git a/substrate/client/network/src/protocol/notifications/behaviour.rs b/substrate/client/network/src/protocol/notifications/behaviour.rs
index 89513e004c6df..b78f15f8529c6 100644
--- a/substrate/client/network/src/protocol/notifications/behaviour.rs
+++ b/substrate/client/network/src/protocol/notifications/behaviour.rs
@@ -1423,7 +1423,6 @@ impl NetworkBehaviour for Notifications {
 									let delay_id = self.next_delay_id;
 									self.next_delay_id.0 += 1;
 									let delay = futures_timer::Delay::new(ban_duration);
-									let peer_id = peer_id;
 									self.delays.push(
 										async move {
 											delay.await;
diff --git a/substrate/client/network/src/protocol_controller.rs b/substrate/client/network/src/protocol_controller.rs
index c9baa0a77d4ba..3a305011ded02 100644
--- a/substrate/client/network/src/protocol_controller.rs
+++ b/substrate/client/network/src/protocol_controller.rs
@@ -493,8 +493,8 @@ impl ProtocolController {
 		}
 	}
 
-	/// Remove the peer from the set of reserved peers. The peer is moved to the set of regular
-	/// nodes.
+	/// Remove the peer from the set of reserved peers. The peer is either moved to the set of
+	/// regular nodes or disconnected.
 	fn on_remove_reserved_peer(&mut self, peer_id: PeerId) {
 		let state = match self.reserved_nodes.remove(&peer_id) {
 			Some(state) => state,
@@ -508,7 +508,14 @@ impl ProtocolController {
 		};
 
 		if let PeerState::Connected(direction) = state {
-			if self.reserved_only {
+			// Disconnect if we're at (or over) the regular node limit
+			let disconnect = self.reserved_only ||
+				match direction {
+					Direction::Inbound => self.num_in >= self.max_in,
+					Direction::Outbound => self.num_out >= self.max_out,
+				};
+
+			if disconnect {
 				// Disconnect the node.
 				trace!(
 					target: LOG_TARGET,
diff --git a/substrate/client/network/sync/src/blocks.rs b/substrate/client/network/sync/src/blocks.rs
index 240c1ca1f8b26..cad50fef3e321 100644
--- a/substrate/client/network/sync/src/blocks.rs
+++ b/substrate/client/network/sync/src/blocks.rs
@@ -212,6 +212,31 @@ impl<B: BlockT> BlockCollection<B> {
 		ready
 	}
 
+	/// Returns the block header of the first block that is ready for importing.
+	/// `from` is the maximum block number for the start of the range that we are interested in.
+	/// The function will return None if the first block ready is higher than `from`.
+	/// The logic is structured to be consistent with ready_blocks().
+	pub fn first_ready_block_header(&self, from: NumberFor<B>) -> Option<B::Header> {
+		let mut prev = from;
+		for (&start, range_data) in &self.blocks {
+			if start > prev {
+				break
+			}
+
+			match range_data {
+				BlockRangeState::Complete(blocks) => {
+					let len = (blocks.len() as u32).into();
+					prev = start + len;
+					if let Some(BlockData { block, .. }) = blocks.first() {
+						return block.header.clone()
+					}
+				},
+				_ => continue,
+			}
+		}
+		None
+	}
+
 	pub fn clear_queued(&mut self, hash: &B::Hash) {
 		if let Some((from, to)) = self.queued_blocks.remove(hash) {
 			let mut block_num = from;
diff --git a/substrate/client/network/sync/src/lib.rs b/substrate/client/network/sync/src/lib.rs
index 10eaa2450518e..a291da4a90d58 100644
--- a/substrate/client/network/sync/src/lib.rs
+++ b/substrate/client/network/sync/src/lib.rs
@@ -1405,8 +1405,27 @@ where
 
 	/// Get the set of downloaded blocks that are ready to be queued for import.
 	fn ready_blocks(&mut self) -> Vec<IncomingBlock<B>> {
+		let start_block = self.best_queued_number + One::one();
+
+		// Verify that the parent of the first available block is in the chain.
+		// If not, we are downloading from a fork. In this case, wait until
+		// the start block has a parent on chain.
+		let parent_on_chain =
+			self.blocks.first_ready_block_header(start_block).map_or(false, |hdr| {
+				std::matches!(
+					self.block_status(hdr.parent_hash()).unwrap_or(BlockStatus::Unknown),
+					BlockStatus::InChainWithState |
+						BlockStatus::InChainPruned |
+						BlockStatus::Queued
+				)
+			});
+
+		if !parent_on_chain {
+			return vec![]
+		}
+
 		self.blocks
-			.ready_blocks(self.best_queued_number + One::one())
+			.ready_blocks(start_block)
 			.into_iter()
 			.map(|block_data| {
 				let justifications = block_data
@@ -3364,4 +3383,380 @@ mod test {
 		pending_responses.remove(&peers[1]);
 		assert_eq!(pending_responses.len(), 0);
 	}
+
+	#[test]
+	fn syncs_fork_with_partial_response_extends_tip() {
+		sp_tracing::try_init_simple();
+
+		// Set up: the two chains share the first 15 blocks before
+		// diverging. The other(canonical) chain fork is longer.
+		let max_blocks_per_request = 64;
+		let common_ancestor = 15;
+		let non_canonical_chain_length = common_ancestor + 3;
+		let canonical_chain_length = common_ancestor + max_blocks_per_request + 10;
+
+		let (_chain_sync_network_provider, chain_sync_network_handle) =
+			NetworkServiceProvider::new();
+		let mut client = Arc::new(TestClientBuilder::new().build());
+
+		// Blocks on the non-canonical chain.
+		let non_canonical_blocks = (0..non_canonical_chain_length)
+			.map(|_| build_block(&mut client, None, false))
+			.collect::<Vec<_>>();
+
+		// Blocks on the canonical chain.
+		let canonical_blocks = {
+			let mut client = Arc::new(TestClientBuilder::new().build());
+			let common_blocks = non_canonical_blocks[..common_ancestor as usize]
+				.into_iter()
+				.inspect(|b| block_on(client.import(BlockOrigin::Own, (*b).clone())).unwrap())
+				.cloned()
+				.collect::<Vec<_>>();
+
+			common_blocks
+				.into_iter()
+				.chain(
+					(0..(canonical_chain_length - common_ancestor as u32))
+						.map(|_| build_block(&mut client, None, true)),
+				)
+				.collect::<Vec<_>>()
+		};
+
+		let mut sync = ChainSync::new(
+			SyncMode::Full,
+			client.clone(),
+			ProtocolName::from("test-block-announce-protocol"),
+			1,
+			max_blocks_per_request,
+			None,
+			chain_sync_network_handle,
+		)
+		.unwrap();
+
+		// Connect the node we will sync from
+		let peer_id = PeerId::random();
+		let canonical_tip = canonical_blocks.last().unwrap().clone();
+		let mut request = sync
+			.new_peer(peer_id, canonical_tip.hash(), *canonical_tip.header().number())
+			.unwrap()
+			.unwrap();
+		assert_eq!(FromBlock::Number(client.info().best_number), request.from);
+		assert_eq!(Some(1), request.max);
+
+		// Do the ancestor search
+		loop {
+			let block =
+				&canonical_blocks[unwrap_from_block_number(request.from.clone()) as usize - 1];
+			let response = create_block_response(vec![block.clone()]);
+
+			let on_block_data = sync.on_block_data(&peer_id, Some(request), response).unwrap();
+			request = if let OnBlockData::Request(_peer, request) = on_block_data {
+				request
+			} else {
+				// We found the ancestor
+				break
+			};
+
+			log::trace!(target: LOG_TARGET, "Request: {request:?}");
+		}
+
+		// The response for the 64 blocks is returned in two parts:
+		// part 1: last 61 blocks [19..79], part 2: first 3 blocks [16-18].
+		// Even though the  first part extends the current chain ending at 18,
+		// it should not result in an import yet.
+		let resp_1_from = common_ancestor as u64 + max_blocks_per_request as u64;
+		let resp_2_from = common_ancestor as u64 + 3;
+
+		// No import expected.
+		let request = get_block_request(
+			&mut sync,
+			FromBlock::Number(resp_1_from),
+			max_blocks_per_request as u32,
+			&peer_id,
+		);
+
+		let from = unwrap_from_block_number(request.from.clone());
+		let mut resp_blocks = canonical_blocks[18..from as usize].to_vec();
+		resp_blocks.reverse();
+		let response = create_block_response(resp_blocks.clone());
+		let res = sync.on_block_data(&peer_id, Some(request), response).unwrap();
+		assert!(matches!(
+			res,
+			OnBlockData::Import(ImportBlocksAction{ origin: _, blocks }) if blocks.is_empty()
+		),);
+
+		// Gap filled, expect max_blocks_per_request being imported now.
+		let request = get_block_request(&mut sync, FromBlock::Number(resp_2_from), 3, &peer_id);
+		let mut resp_blocks = canonical_blocks[common_ancestor as usize..18].to_vec();
+		resp_blocks.reverse();
+		let response = create_block_response(resp_blocks.clone());
+		let res = sync.on_block_data(&peer_id, Some(request), response).unwrap();
+		let to_import: Vec<_> = match &res {
+			OnBlockData::Import(ImportBlocksAction { origin: _, blocks }) => {
+				assert_eq!(blocks.len(), sync.max_blocks_per_request as usize);
+				blocks
+					.iter()
+					.map(|b| {
+						let num = *b.header.as_ref().unwrap().number() as usize;
+						canonical_blocks[num - 1].clone()
+					})
+					.collect()
+			},
+			_ => {
+				panic!("Unexpected response: {res:?}");
+			},
+		};
+
+		let _ = sync.on_blocks_processed(
+			max_blocks_per_request as usize,
+			resp_blocks.len(),
+			resp_blocks
+				.iter()
+				.rev()
+				.map(|b| {
+					(
+						Ok(BlockImportStatus::ImportedUnknown(
+							*b.header().number(),
+							Default::default(),
+							Some(peer_id),
+						)),
+						b.hash(),
+					)
+				})
+				.collect(),
+		);
+		to_import.into_iter().for_each(|b| {
+			assert!(matches!(client.block(*b.header.parent_hash()), Ok(Some(_))));
+			block_on(client.import(BlockOrigin::Own, b)).unwrap();
+		});
+		let expected_number = common_ancestor as u32 + max_blocks_per_request as u32;
+		assert_eq!(sync.best_queued_number as u32, expected_number);
+		assert_eq!(sync.best_queued_hash, canonical_blocks[expected_number as usize - 1].hash());
+		// Sync rest of the chain.
+		let request =
+			get_block_request(&mut sync, FromBlock::Hash(canonical_tip.hash()), 10_u32, &peer_id);
+		let mut resp_blocks = canonical_blocks
+			[(canonical_chain_length - 10) as usize..canonical_chain_length as usize]
+			.to_vec();
+		resp_blocks.reverse();
+		let response = create_block_response(resp_blocks.clone());
+		let res = sync.on_block_data(&peer_id, Some(request), response).unwrap();
+		assert!(matches!(
+			res,
+			OnBlockData::Import(ImportBlocksAction{ origin: _, blocks }) if blocks.len() == 10 as usize
+		),);
+		let _ = sync.on_blocks_processed(
+			max_blocks_per_request as usize,
+			resp_blocks.len(),
+			resp_blocks
+				.iter()
+				.rev()
+				.map(|b| {
+					(
+						Ok(BlockImportStatus::ImportedUnknown(
+							*b.header().number(),
+							Default::default(),
+							Some(peer_id),
+						)),
+						b.hash(),
+					)
+				})
+				.collect(),
+		);
+		resp_blocks.into_iter().rev().for_each(|b| {
+			assert!(matches!(client.block(*b.header.parent_hash()), Ok(Some(_))));
+			block_on(client.import(BlockOrigin::Own, b)).unwrap();
+		});
+		let expected_number = canonical_chain_length as u32;
+		assert_eq!(sync.best_queued_number as u32, expected_number);
+		assert_eq!(sync.best_queued_hash, canonical_blocks[expected_number as usize - 1].hash());
+	}
+
+	#[test]
+	fn syncs_fork_with_partial_response_does_not_extend_tip() {
+		sp_tracing::try_init_simple();
+
+		// Set up: the two chains share the first 15 blocks before
+		// diverging. The other(canonical) chain fork is longer.
+		let max_blocks_per_request = 64;
+		let common_ancestor = 15;
+		let non_canonical_chain_length = common_ancestor + 3;
+		let canonical_chain_length = common_ancestor + max_blocks_per_request + 10;
+
+		let (_chain_sync_network_provider, chain_sync_network_handle) =
+			NetworkServiceProvider::new();
+		let mut client = Arc::new(TestClientBuilder::new().build());
+
+		// Blocks on the non-canonical chain.
+		let non_canonical_blocks = (0..non_canonical_chain_length)
+			.map(|_| build_block(&mut client, None, false))
+			.collect::<Vec<_>>();
+
+		// Blocks on the canonical chain.
+		let canonical_blocks = {
+			let mut client = Arc::new(TestClientBuilder::new().build());
+			let common_blocks = non_canonical_blocks[..common_ancestor as usize]
+				.into_iter()
+				.inspect(|b| block_on(client.import(BlockOrigin::Own, (*b).clone())).unwrap())
+				.cloned()
+				.collect::<Vec<_>>();
+
+			common_blocks
+				.into_iter()
+				.chain(
+					(0..(canonical_chain_length - common_ancestor as u32))
+						.map(|_| build_block(&mut client, None, true)),
+				)
+				.collect::<Vec<_>>()
+		};
+
+		let mut sync = ChainSync::new(
+			SyncMode::Full,
+			client.clone(),
+			ProtocolName::from("test-block-announce-protocol"),
+			1,
+			max_blocks_per_request,
+			None,
+			chain_sync_network_handle,
+		)
+		.unwrap();
+
+		// Connect the node we will sync from
+		let peer_id = PeerId::random();
+		let canonical_tip = canonical_blocks.last().unwrap().clone();
+		let mut request = sync
+			.new_peer(peer_id, canonical_tip.hash(), *canonical_tip.header().number())
+			.unwrap()
+			.unwrap();
+		assert_eq!(FromBlock::Number(client.info().best_number), request.from);
+		assert_eq!(Some(1), request.max);
+
+		// Do the ancestor search
+		loop {
+			let block =
+				&canonical_blocks[unwrap_from_block_number(request.from.clone()) as usize - 1];
+			let response = create_block_response(vec![block.clone()]);
+
+			let on_block_data = sync.on_block_data(&peer_id, Some(request), response).unwrap();
+			request = if let OnBlockData::Request(_peer, request) = on_block_data {
+				request
+			} else {
+				// We found the ancestor
+				break
+			};
+
+			log::trace!(target: LOG_TARGET, "Request: {request:?}");
+		}
+
+		// The response for the 64 blocks is returned in two parts:
+		// part 1: last 62 blocks [18..79], part 2: first 2 blocks [16-17].
+		// Even though the  first part extends the current chain ending at 18,
+		// it should not result in an import yet.
+		let resp_1_from = common_ancestor as u64 + max_blocks_per_request as u64;
+		let resp_2_from = common_ancestor as u64 + 2;
+
+		// No import expected.
+		let request = get_block_request(
+			&mut sync,
+			FromBlock::Number(resp_1_from),
+			max_blocks_per_request as u32,
+			&peer_id,
+		);
+
+		let from = unwrap_from_block_number(request.from.clone());
+		let mut resp_blocks = canonical_blocks[17..from as usize].to_vec();
+		resp_blocks.reverse();
+		let response = create_block_response(resp_blocks.clone());
+		let res = sync.on_block_data(&peer_id, Some(request), response).unwrap();
+		assert!(matches!(
+			res,
+			OnBlockData::Import(ImportBlocksAction{ origin: _, blocks }) if blocks.is_empty()
+		),);
+
+		// Gap filled, expect max_blocks_per_request being imported now.
+		let request = get_block_request(&mut sync, FromBlock::Number(resp_2_from), 2, &peer_id);
+		let mut resp_blocks = canonical_blocks[common_ancestor as usize..17].to_vec();
+		resp_blocks.reverse();
+		let response = create_block_response(resp_blocks.clone());
+		let res = sync.on_block_data(&peer_id, Some(request), response).unwrap();
+		let to_import: Vec<_> = match &res {
+			OnBlockData::Import(ImportBlocksAction { origin: _, blocks }) => {
+				assert_eq!(blocks.len(), sync.max_blocks_per_request as usize);
+				blocks
+					.iter()
+					.map(|b| {
+						let num = *b.header.as_ref().unwrap().number() as usize;
+						canonical_blocks[num - 1].clone()
+					})
+					.collect()
+			},
+			_ => {
+				panic!("Unexpected response: {res:?}");
+			},
+		};
+
+		let _ = sync.on_blocks_processed(
+			max_blocks_per_request as usize,
+			resp_blocks.len(),
+			resp_blocks
+				.iter()
+				.rev()
+				.map(|b| {
+					(
+						Ok(BlockImportStatus::ImportedUnknown(
+							*b.header().number(),
+							Default::default(),
+							Some(peer_id),
+						)),
+						b.hash(),
+					)
+				})
+				.collect(),
+		);
+		to_import.into_iter().for_each(|b| {
+			assert!(matches!(client.block(*b.header.parent_hash()), Ok(Some(_))));
+			block_on(client.import(BlockOrigin::Own, b)).unwrap();
+		});
+		let expected_number = common_ancestor as u32 + max_blocks_per_request as u32;
+		assert_eq!(sync.best_queued_number as u32, expected_number);
+		assert_eq!(sync.best_queued_hash, canonical_blocks[expected_number as usize - 1].hash());
+		// Sync rest of the chain.
+		let request =
+			get_block_request(&mut sync, FromBlock::Hash(canonical_tip.hash()), 10_u32, &peer_id);
+		let mut resp_blocks = canonical_blocks
+			[(canonical_chain_length - 10) as usize..canonical_chain_length as usize]
+			.to_vec();
+		resp_blocks.reverse();
+		let response = create_block_response(resp_blocks.clone());
+		let res = sync.on_block_data(&peer_id, Some(request), response).unwrap();
+		assert!(matches!(
+			res,
+			OnBlockData::Import(ImportBlocksAction{ origin: _, blocks }) if blocks.len() == 10 as usize
+		),);
+		let _ = sync.on_blocks_processed(
+			max_blocks_per_request as usize,
+			resp_blocks.len(),
+			resp_blocks
+				.iter()
+				.rev()
+				.map(|b| {
+					(
+						Ok(BlockImportStatus::ImportedUnknown(
+							*b.header().number(),
+							Default::default(),
+							Some(peer_id),
+						)),
+						b.hash(),
+					)
+				})
+				.collect(),
+		);
+		resp_blocks.into_iter().rev().for_each(|b| {
+			assert!(matches!(client.block(*b.header.parent_hash()), Ok(Some(_))));
+			block_on(client.import(BlockOrigin::Own, b)).unwrap();
+		});
+		let expected_number = canonical_chain_length as u32;
+		assert_eq!(sync.best_queued_number as u32, expected_number);
+		assert_eq!(sync.best_queued_hash, canonical_blocks[expected_number as usize - 1].hash());
+	}
 }
diff --git a/substrate/client/rpc-api/Cargo.toml b/substrate/client/rpc-api/Cargo.toml
index a2ee090b1c239..9dca2e72fcdd5 100644
--- a/substrate/client/rpc-api/Cargo.toml
+++ b/substrate/client/rpc-api/Cargo.toml
@@ -19,6 +19,7 @@ serde = { version = "1.0.188", features = ["derive"] }
 serde_json = "1.0.107"
 thiserror = "1.0"
 sc-chain-spec = { path = "../chain-spec" }
+sc-mixnet = { path = "../mixnet" }
 sc-transaction-pool-api = { path = "../transaction-pool/api" }
 sp-core = { path = "../../primitives/core" }
 sp-rpc = { path = "../../primitives/rpc" }
diff --git a/substrate/client/rpc-api/src/error.rs b/substrate/client/rpc-api/src/error.rs
index 72941e3145b94..58b75b8fb1691 100644
--- a/substrate/client/rpc-api/src/error.rs
+++ b/substrate/client/rpc-api/src/error.rs
@@ -25,4 +25,5 @@ pub mod base {
 	pub const OFFCHAIN: i32 = 5000;
 	pub const DEV: i32 = 6000;
 	pub const STATEMENT: i32 = 7000;
+	pub const MIXNET: i32 = 8000;
 }
diff --git a/substrate/client/rpc-api/src/lib.rs b/substrate/client/rpc-api/src/lib.rs
index b99c237dc859b..32120d37902dd 100644
--- a/substrate/client/rpc-api/src/lib.rs
+++ b/substrate/client/rpc-api/src/lib.rs
@@ -31,6 +31,7 @@ pub mod author;
 pub mod chain;
 pub mod child_state;
 pub mod dev;
+pub mod mixnet;
 pub mod offchain;
 pub mod state;
 pub mod statement;
diff --git a/substrate/client/rpc-api/src/mixnet/error.rs b/substrate/client/rpc-api/src/mixnet/error.rs
new file mode 100644
index 0000000000000..0dde5f32e6139
--- /dev/null
+++ b/substrate/client/rpc-api/src/mixnet/error.rs
@@ -0,0 +1,48 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+//! Mixnet RPC module errors.
+
+use jsonrpsee::types::error::{CallError, ErrorObject};
+use sc_mixnet::{PostErr, RemoteErr, TopologyErr};
+
+/// Mixnet RPC error type.
+pub struct Error(pub sc_mixnet::Error);
+
+/// Base code for all mixnet errors.
+const BASE_ERROR: i32 = crate::error::base::MIXNET;
+
+impl From<Error> for jsonrpsee::core::Error {
+	fn from(err: Error) -> Self {
+		let code = match err.0 {
+			sc_mixnet::Error::ServiceUnavailable => BASE_ERROR + 1,
+			sc_mixnet::Error::NoReply => BASE_ERROR + 2,
+			sc_mixnet::Error::BadReply => BASE_ERROR + 3,
+			sc_mixnet::Error::Post(PostErr::TooManyFragments) => BASE_ERROR + 101,
+			sc_mixnet::Error::Post(PostErr::SessionMixnodesNotKnown(_)) => BASE_ERROR + 102,
+			sc_mixnet::Error::Post(PostErr::SessionDisabled(_)) => BASE_ERROR + 103,
+			sc_mixnet::Error::Post(PostErr::Topology(TopologyErr::NoConnectedGatewayMixnodes)) =>
+				BASE_ERROR + 151,
+			sc_mixnet::Error::Post(PostErr::Topology(_)) => BASE_ERROR + 150,
+			sc_mixnet::Error::Post(_) => BASE_ERROR + 100,
+			sc_mixnet::Error::Remote(RemoteErr::Other(_)) => BASE_ERROR + 200,
+			sc_mixnet::Error::Remote(RemoteErr::Decode(_)) => BASE_ERROR + 201,
+		};
+		CallError::Custom(ErrorObject::owned(code, err.0.to_string(), None::<()>)).into()
+	}
+}
diff --git a/substrate/client/rpc-api/src/mixnet/mod.rs b/substrate/client/rpc-api/src/mixnet/mod.rs
new file mode 100644
index 0000000000000..bc478cf3bf334
--- /dev/null
+++ b/substrate/client/rpc-api/src/mixnet/mod.rs
@@ -0,0 +1,31 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+//! Substrate mixnet API.
+
+pub mod error;
+
+use jsonrpsee::{core::RpcResult, proc_macros::rpc};
+use sp_core::Bytes;
+
+#[rpc(client, server)]
+pub trait MixnetApi {
+	/// Submit encoded extrinsic over the mixnet for inclusion in block.
+	#[method(name = "mixnet_submitExtrinsic")]
+	async fn submit_extrinsic(&self, extrinsic: Bytes) -> RpcResult<()>;
+}
diff --git a/substrate/client/rpc/Cargo.toml b/substrate/client/rpc/Cargo.toml
index 64aaa7c94aacb..dd1120e5b0f86 100644
--- a/substrate/client/rpc/Cargo.toml
+++ b/substrate/client/rpc/Cargo.toml
@@ -22,6 +22,7 @@ serde_json = "1.0.107"
 sc-block-builder = { path = "../block-builder" }
 sc-chain-spec = { path = "../chain-spec" }
 sc-client-api = { path = "../api" }
+sc-mixnet = { path = "../mixnet" }
 sc-rpc-api = { path = "../rpc-api" }
 sc-tracing = { path = "../tracing" }
 sc-transaction-pool-api = { path = "../transaction-pool/api" }
diff --git a/substrate/client/rpc/src/lib.rs b/substrate/client/rpc/src/lib.rs
index 475fc77a9b5bd..94fdb2d734ff0 100644
--- a/substrate/client/rpc/src/lib.rs
+++ b/substrate/client/rpc/src/lib.rs
@@ -34,6 +34,7 @@ pub use sc_rpc_api::DenyUnsafe;
 pub mod author;
 pub mod chain;
 pub mod dev;
+pub mod mixnet;
 pub mod offchain;
 pub mod state;
 pub mod statement;
diff --git a/substrate/client/rpc/src/mixnet/mod.rs b/substrate/client/rpc/src/mixnet/mod.rs
new file mode 100644
index 0000000000000..3f3d9c5aa4526
--- /dev/null
+++ b/substrate/client/rpc/src/mixnet/mod.rs
@@ -0,0 +1,47 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+//! Substrate mixnet API.
+
+use jsonrpsee::core::{async_trait, RpcResult};
+use sc_mixnet::Api;
+use sc_rpc_api::mixnet::error::Error;
+pub use sc_rpc_api::mixnet::MixnetApiServer;
+use sp_core::Bytes;
+
+/// Mixnet API.
+pub struct Mixnet(futures::lock::Mutex<Api>);
+
+impl Mixnet {
+	/// Create a new mixnet API instance.
+	pub fn new(api: Api) -> Self {
+		Self(futures::lock::Mutex::new(api))
+	}
+}
+
+#[async_trait]
+impl MixnetApiServer for Mixnet {
+	async fn submit_extrinsic(&self, extrinsic: Bytes) -> RpcResult<()> {
+		// We only hold the lock while pushing the request into the requests channel
+		let fut = {
+			let mut api = self.0.lock().await;
+			api.submit_extrinsic(extrinsic).await
+		};
+		Ok(fut.await.map_err(Error)?)
+	}
+}
diff --git a/substrate/client/service/test/src/client/mod.rs b/substrate/client/service/test/src/client/mod.rs
index c40ac33da4bb9..f82008755874b 100644
--- a/substrate/client/service/test/src/client/mod.rs
+++ b/substrate/client/service/test/src/client/mod.rs
@@ -39,7 +39,6 @@ use sp_runtime::{
 };
 use sp_state_machine::{backend::Backend as _, InMemoryBackend, OverlayedChanges, StateMachine};
 use sp_storage::{ChildInfo, StorageKey};
-use sp_trie::{LayoutV0, TrieConfiguration};
 use std::{collections::HashSet, sync::Arc};
 use substrate_test_runtime::TestAPI;
 use substrate_test_runtime_client::{
@@ -62,22 +61,17 @@ fn construct_block(
 	backend: &InMemoryBackend<BlakeTwo256>,
 	number: BlockNumber,
 	parent_hash: Hash,
-	state_root: Hash,
 	txs: Vec<Transfer>,
-) -> (Vec<u8>, Hash) {
+) -> Vec<u8> {
 	let transactions = txs.into_iter().map(|tx| tx.into_unchecked_extrinsic()).collect::<Vec<_>>();
 
-	let iter = transactions.iter().map(Encode::encode);
-	let extrinsics_root = LayoutV0::<BlakeTwo256>::ordered_trie_root(iter).into();
-
 	let mut header = Header {
 		parent_hash,
 		number,
-		state_root,
-		extrinsics_root,
+		state_root: Default::default(),
+		extrinsics_root: Default::default(),
 		digest: Digest { logs: vec![] },
 	};
-	let hash = header.hash();
 	let mut overlay = OverlayedChanges::default();
 	let backend_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(backend);
 	let runtime_code = backend_runtime_code.runtime_code().expect("Code is part of the backend");
@@ -124,19 +118,16 @@ fn construct_block(
 	.unwrap();
 	header = Header::decode(&mut &ret_data[..]).unwrap();
 
-	(vec![].and(&Block { header, extrinsics: transactions }), hash)
+	vec![].and(&Block { header, extrinsics: transactions })
 }
 
-fn block1(genesis_hash: Hash, backend: &InMemoryBackend<BlakeTwo256>) -> (Vec<u8>, Hash) {
+fn block1(genesis_hash: Hash, backend: &InMemoryBackend<BlakeTwo256>) -> Vec<u8> {
 	construct_block(
 		backend,
 		1,
 		genesis_hash,
-		array_bytes::hex_n_into_unchecked(
-			"25e5b37074063ab75c889326246640729b40d0c86932edc527bc80db0e04fe5c",
-		),
 		vec![Transfer {
-			from: AccountKeyring::Alice.into(),
+			from: AccountKeyring::One.into(),
 			to: AccountKeyring::Two.into(),
 			amount: 69 * DOLLARS,
 			nonce: 0,
@@ -175,7 +166,7 @@ fn construct_genesis_should_work_with_native() {
 	let genesis_hash = insert_genesis_block(&mut storage);
 
 	let backend = InMemoryBackend::from((storage, StateVersion::default()));
-	let (b1data, _b1hash) = block1(genesis_hash, &backend);
+	let b1data = block1(genesis_hash, &backend);
 	let backend_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&backend);
 	let runtime_code = backend_runtime_code.runtime_code().expect("Code is part of the backend");
 
@@ -206,7 +197,7 @@ fn construct_genesis_should_work_with_wasm() {
 	let genesis_hash = insert_genesis_block(&mut storage);
 
 	let backend = InMemoryBackend::from((storage, StateVersion::default()));
-	let (b1data, _b1hash) = block1(genesis_hash, &backend);
+	let b1data = block1(genesis_hash, &backend);
 	let backend_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&backend);
 	let runtime_code = backend_runtime_code.runtime_code().expect("Code is part of the backend");
 
diff --git a/substrate/client/storage-monitor/Cargo.toml b/substrate/client/storage-monitor/Cargo.toml
index 7538f5ba602ce..021ee76240b98 100644
--- a/substrate/client/storage-monitor/Cargo.toml
+++ b/substrate/client/storage-monitor/Cargo.toml
@@ -9,7 +9,7 @@ description = "Storage monitor service for substrate"
 homepage = "https://substrate.io"
 
 [dependencies]
-clap = { version = "4.4.4", features = ["derive", "string"] }
+clap = { version = "4.4.6", features = ["derive", "string"] }
 log = "0.4.17"
 fs4 = "0.6.3"
 sc-client-db = { path = "../db", default-features = false}
diff --git a/substrate/client/tracing/proc-macro/Cargo.toml b/substrate/client/tracing/proc-macro/Cargo.toml
index f18e0aacd3776..b134cbce3ccf4 100644
--- a/substrate/client/tracing/proc-macro/Cargo.toml
+++ b/substrate/client/tracing/proc-macro/Cargo.toml
@@ -18,4 +18,4 @@ proc-macro = true
 proc-macro-crate = "1.1.3"
 proc-macro2 = "1.0.56"
 quote = { version = "1.0.28", features = ["proc-macro"] }
-syn = { version = "2.0.37", features = ["proc-macro", "full", "extra-traits", "parsing"] }
+syn = { version = "2.0.38", features = ["proc-macro", "full", "extra-traits", "parsing"] }
diff --git a/substrate/frame/asset-rate/src/lib.rs b/substrate/frame/asset-rate/src/lib.rs
index c3dc551f876d0..d4afca8b73c4b 100644
--- a/substrate/frame/asset-rate/src/lib.rs
+++ b/substrate/frame/asset-rate/src/lib.rs
@@ -240,4 +240,9 @@ where
 			.ok_or(pallet::Error::<T>::UnknownAssetKind.into())?;
 		Ok(rate.saturating_mul_int(balance))
 	}
+	/// Set a conversion rate to `1` for the `asset_id`.
+	#[cfg(feature = "runtime-benchmarks")]
+	fn ensure_successful(asset_id: AssetKindOf<T>) {
+		pallet::ConversionRateToNative::<T>::set(asset_id.clone(), Some(1.into()));
+	}
 }
diff --git a/substrate/frame/authorship/src/lib.rs b/substrate/frame/authorship/src/lib.rs
index a9bd0c38cb67c..56a516894dec2 100644
--- a/substrate/frame/authorship/src/lib.rs
+++ b/substrate/frame/authorship/src/lib.rs
@@ -97,16 +97,10 @@ mod tests {
 	use super::*;
 	use crate as pallet_authorship;
 	use codec::{Decode, Encode};
-	use frame_support::{
-		traits::{ConstU32, ConstU64},
-		ConsensusEngineId,
-	};
+	use frame_support::{derive_impl, ConsensusEngineId};
 	use sp_core::H256;
 	use sp_runtime::{
-		generic::DigestItem,
-		testing::Header,
-		traits::{BlakeTwo256, Header as HeaderT, IdentityLookup},
-		BuildStorage,
+		generic::DigestItem, testing::Header, traits::Header as HeaderT, BuildStorage,
 	};
 
 	type Block = frame_system::mocking::MockBlock<Test>;
@@ -119,30 +113,9 @@ mod tests {
 		}
 	);
 
+	#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)]
 	impl frame_system::Config for Test {
-		type BaseCallFilter = frame_support::traits::Everything;
-		type BlockWeights = ();
-		type BlockLength = ();
-		type DbWeight = ();
-		type RuntimeOrigin = RuntimeOrigin;
-		type Nonce = u64;
-		type RuntimeCall = RuntimeCall;
-		type Hash = H256;
-		type Hashing = BlakeTwo256;
-		type AccountId = u64;
-		type Lookup = IdentityLookup<Self::AccountId>;
 		type Block = Block;
-		type RuntimeEvent = RuntimeEvent;
-		type BlockHashCount = ConstU64<250>;
-		type Version = ();
-		type PalletInfo = PalletInfo;
-		type AccountData = ();
-		type OnNewAccount = ();
-		type OnKilledAccount = ();
-		type SystemWeightInfo = ();
-		type SS58Prefix = ();
-		type OnSetCode = ();
-		type MaxConsumers = ConstU32<16>;
 	}
 
 	impl pallet::Config for Test {
diff --git a/substrate/frame/babe/src/mock.rs b/substrate/frame/babe/src/mock.rs
index dbffe9f312e60..a3f755902b598 100644
--- a/substrate/frame/babe/src/mock.rs
+++ b/substrate/frame/babe/src/mock.rs
@@ -24,7 +24,7 @@ use frame_election_provider_support::{
 	onchain, SequentialPhragmen,
 };
 use frame_support::{
-	parameter_types,
+	derive_impl, parameter_types,
 	traits::{ConstU128, ConstU32, ConstU64, KeyOwnerProofSystem, OnInitialize},
 };
 use pallet_session::historical as pallet_session_historical;
@@ -32,14 +32,14 @@ use pallet_staking::FixedNominationsQuota;
 use sp_consensus_babe::{AuthorityId, AuthorityPair, Randomness, Slot, VrfSignature};
 use sp_core::{
 	crypto::{KeyTypeId, Pair, VrfSecret},
-	H256, U256,
+	U256,
 };
 use sp_io;
 use sp_runtime::{
 	curve::PiecewiseLinear,
 	impl_opaque_keys,
 	testing::{Digest, DigestItem, Header, TestXt},
-	traits::{Header as _, IdentityLookup, OpaqueKeys},
+	traits::{Header as _, OpaqueKeys},
 	BuildStorage, Perbill,
 };
 use sp_staking::{EraIndex, SessionIndex};
@@ -63,30 +63,10 @@ frame_support::construct_runtime!(
 	}
 );
 
+#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)]
 impl frame_system::Config for Test {
-	type BaseCallFilter = frame_support::traits::Everything;
-	type BlockWeights = ();
-	type BlockLength = ();
-	type DbWeight = ();
-	type RuntimeOrigin = RuntimeOrigin;
-	type Nonce = u64;
-	type RuntimeCall = RuntimeCall;
-	type Hash = H256;
-	type Version = ();
-	type Hashing = sp_runtime::traits::BlakeTwo256;
-	type AccountId = DummyValidatorId;
-	type Lookup = IdentityLookup<Self::AccountId>;
 	type Block = Block;
-	type RuntimeEvent = RuntimeEvent;
-	type BlockHashCount = ConstU64<250>;
-	type PalletInfo = PalletInfo;
 	type AccountData = pallet_balances::AccountData<u128>;
-	type OnNewAccount = ();
-	type OnKilledAccount = ();
-	type SystemWeightInfo = ();
-	type SS58Prefix = ();
-	type OnSetCode = ();
-	type MaxConsumers = frame_support::traits::ConstU32<16>;
 }
 
 impl<C> frame_system::offchain::SendTransactionTypes<C> for Test
diff --git a/substrate/frame/bags-list/remote-tests/src/snapshot.rs b/substrate/frame/bags-list/remote-tests/src/snapshot.rs
index 13922cd3ca618..81a8905e6b4b2 100644
--- a/substrate/frame/bags-list/remote-tests/src/snapshot.rs
+++ b/substrate/frame/bags-list/remote-tests/src/snapshot.rs
@@ -42,8 +42,8 @@ where
 				.to_string()],
 			at: None,
 			hashed_prefixes: vec![
-				<pallet_staking::Bonded<Runtime>>::prefix_hash(),
-				<pallet_staking::Ledger<Runtime>>::prefix_hash(),
+				<pallet_staking::Bonded<Runtime>>::prefix_hash().to_vec(),
+				<pallet_staking::Ledger<Runtime>>::prefix_hash().to_vec(),
 				<pallet_staking::Validators<Runtime>>::map_storage_final_prefix(),
 				<pallet_staking::Nominators<Runtime>>::map_storage_final_prefix(),
 			],
diff --git a/substrate/frame/bags-list/remote-tests/src/try_state.rs b/substrate/frame/bags-list/remote-tests/src/try_state.rs
index 338be50a93f79..83930024c89a5 100644
--- a/substrate/frame/bags-list/remote-tests/src/try_state.rs
+++ b/substrate/frame/bags-list/remote-tests/src/try_state.rs
@@ -39,8 +39,8 @@ pub async fn execute<Runtime, Block>(
 			pallets: vec![pallet_bags_list::Pallet::<Runtime, pallet_bags_list::Instance1>::name()
 				.to_string()],
 			hashed_prefixes: vec![
-				<pallet_staking::Bonded<Runtime>>::prefix_hash(),
-				<pallet_staking::Ledger<Runtime>>::prefix_hash(),
+				<pallet_staking::Bonded<Runtime>>::prefix_hash().to_vec(),
+				<pallet_staking::Ledger<Runtime>>::prefix_hash().to_vec(),
 			],
 			..Default::default()
 		}))
diff --git a/substrate/frame/bounties/src/tests.rs b/substrate/frame/bounties/src/tests.rs
index a6fb89bb86012..4083b05b629cd 100644
--- a/substrate/frame/bounties/src/tests.rs
+++ b/substrate/frame/bounties/src/tests.rs
@@ -24,7 +24,10 @@ use crate as pallet_bounties;
 
 use frame_support::{
 	assert_noop, assert_ok, parameter_types,
-	traits::{ConstU32, ConstU64, OnInitialize},
+	traits::{
+		tokens::{PayFromAccount, UnityAssetBalanceConversion},
+		ConstU32, ConstU64, OnInitialize,
+	},
 	PalletId,
 };
 
@@ -104,6 +107,8 @@ parameter_types! {
 	pub const TreasuryPalletId2: PalletId = PalletId(*b"py/trsr2");
 	pub static SpendLimit: Balance = u64::MAX;
 	pub static SpendLimit1: Balance = u64::MAX;
+	pub TreasuryAccount: u128 = Treasury::account_id();
+	pub TreasuryInstance1Account: u128 = Treasury1::account_id();
 }
 
 impl pallet_treasury::Config for Test {
@@ -123,6 +128,14 @@ impl pallet_treasury::Config for Test {
 	type SpendFunds = Bounties;
 	type MaxApprovals = ConstU32<100>;
 	type SpendOrigin = frame_system::EnsureRootWithSuccess<Self::AccountId, SpendLimit>;
+	type AssetKind = ();
+	type Beneficiary = Self::AccountId;
+	type BeneficiaryLookup = IdentityLookup<Self::Beneficiary>;
+	type Paymaster = PayFromAccount<Balances, TreasuryAccount>;
+	type BalanceConverter = UnityAssetBalanceConversion;
+	type PayoutPeriod = ConstU64<10>;
+	#[cfg(feature = "runtime-benchmarks")]
+	type BenchmarkHelper = ();
 }
 
 impl pallet_treasury::Config<Instance1> for Test {
@@ -142,6 +155,14 @@ impl pallet_treasury::Config<Instance1> for Test {
 	type SpendFunds = Bounties1;
 	type MaxApprovals = ConstU32<100>;
 	type SpendOrigin = frame_system::EnsureRootWithSuccess<Self::AccountId, SpendLimit1>;
+	type AssetKind = ();
+	type Beneficiary = Self::AccountId;
+	type BeneficiaryLookup = IdentityLookup<Self::Beneficiary>;
+	type Paymaster = PayFromAccount<Balances, TreasuryInstance1Account>;
+	type BalanceConverter = UnityAssetBalanceConversion;
+	type PayoutPeriod = ConstU64<10>;
+	#[cfg(feature = "runtime-benchmarks")]
+	type BenchmarkHelper = ();
 }
 
 parameter_types! {
diff --git a/substrate/frame/child-bounties/src/tests.rs b/substrate/frame/child-bounties/src/tests.rs
index 24a6410f29f78..1fa3d944f3de1 100644
--- a/substrate/frame/child-bounties/src/tests.rs
+++ b/substrate/frame/child-bounties/src/tests.rs
@@ -24,7 +24,10 @@ use crate as pallet_child_bounties;
 
 use frame_support::{
 	assert_noop, assert_ok, parameter_types,
-	traits::{ConstU32, ConstU64, OnInitialize},
+	traits::{
+		tokens::{PayFromAccount, UnityAssetBalanceConversion},
+		ConstU32, ConstU64, OnInitialize,
+	},
 	weights::Weight,
 	PalletId,
 };
@@ -104,6 +107,7 @@ parameter_types! {
 	pub const ProposalBond: Permill = Permill::from_percent(5);
 	pub const Burn: Permill = Permill::from_percent(50);
 	pub const TreasuryPalletId: PalletId = PalletId(*b"py/trsry");
+	pub TreasuryAccount: u128 = Treasury::account_id();
 	pub const SpendLimit: Balance = u64::MAX;
 }
 
@@ -124,6 +128,14 @@ impl pallet_treasury::Config for Test {
 	type SpendFunds = Bounties;
 	type MaxApprovals = ConstU32<100>;
 	type SpendOrigin = frame_system::EnsureRootWithSuccess<Self::AccountId, SpendLimit>;
+	type AssetKind = ();
+	type Beneficiary = Self::AccountId;
+	type BeneficiaryLookup = IdentityLookup<Self::Beneficiary>;
+	type Paymaster = PayFromAccount<Balances, TreasuryAccount>;
+	type BalanceConverter = UnityAssetBalanceConversion;
+	type PayoutPeriod = ConstU64<10>;
+	#[cfg(feature = "runtime-benchmarks")]
+	type BenchmarkHelper = ();
 }
 parameter_types! {
 	// This will be 50% of the bounty fee.
diff --git a/substrate/frame/contracts/proc-macro/Cargo.toml b/substrate/frame/contracts/proc-macro/Cargo.toml
index a04f554406709..3ada9e0c23dd9 100644
--- a/substrate/frame/contracts/proc-macro/Cargo.toml
+++ b/substrate/frame/contracts/proc-macro/Cargo.toml
@@ -17,7 +17,7 @@ proc-macro = true
 [dependencies]
 proc-macro2 = "1.0.56"
 quote = "1.0.28"
-syn = { version = "2.0.37", features = ["full"] }
+syn = { version = "2.0.38", features = ["full"] }
 
 [dev-dependencies]
 
diff --git a/substrate/frame/contracts/src/wasm/prepare.rs b/substrate/frame/contracts/src/wasm/prepare.rs
index b129c17e13eca..dfe8c4f8f9b91 100644
--- a/substrate/frame/contracts/src/wasm/prepare.rs
+++ b/substrate/frame/contracts/src/wasm/prepare.rs
@@ -79,8 +79,7 @@ impl LoadedModule {
 		}
 
 		let engine = Engine::new(&config);
-		let module =
-			Module::new(&engine, code.clone()).map_err(|_| "Can't load the module into wasmi!")?;
+		let module = Module::new(&engine, code).map_err(|_| "Can't load the module into wasmi!")?;
 
 		// Return a `LoadedModule` instance with
 		// __valid__ module.
diff --git a/substrate/frame/election-provider-support/solution-type/Cargo.toml b/substrate/frame/election-provider-support/solution-type/Cargo.toml
index 39e535c6c3ee6..f4ea4ef6e361f 100644
--- a/substrate/frame/election-provider-support/solution-type/Cargo.toml
+++ b/substrate/frame/election-provider-support/solution-type/Cargo.toml
@@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"]
 proc-macro = true
 
 [dependencies]
-syn = { version = "2.0.37", features = ["full", "visit"] }
+syn = { version = "2.0.38", features = ["full", "visit"] }
 quote = "1.0.28"
 proc-macro2 = "1.0.56"
 proc-macro-crate = "1.1.3"
diff --git a/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml b/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml
index 6ac09dd45c601..e485920145416 100644
--- a/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml
+++ b/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml
@@ -13,7 +13,7 @@ publish = false
 targets = ["x86_64-unknown-linux-gnu"]
 
 [dependencies]
-clap = { version = "4.4.4", features = ["derive"] }
+clap = { version = "4.4.6", features = ["derive"] }
 honggfuzz = "0.5"
 rand = { version = "0.8", features = ["std", "small_rng"] }
 
diff --git a/substrate/frame/elections-phragmen/src/benchmarking.rs b/substrate/frame/elections-phragmen/src/benchmarking.rs
index 56ea19578c8f5..9878f7fd41c06 100644
--- a/substrate/frame/elections-phragmen/src/benchmarking.rs
+++ b/substrate/frame/elections-phragmen/src/benchmarking.rs
@@ -379,7 +379,7 @@ benchmarks! {
 		let root = RawOrigin::Root;
 	}: _(root, v, d)
 	verify {
-		assert_eq!(<Voting<T>>::iter().count() as u32, 0);
+		assert_eq!(<Voting<T>>::iter().count() as u32, v - d);
 	}
 
 	election_phragmen {
diff --git a/substrate/frame/elections-phragmen/src/lib.rs b/substrate/frame/elections-phragmen/src/lib.rs
index 6912649bd122d..93f9fc2b6d241 100644
--- a/substrate/frame/elections-phragmen/src/lib.rs
+++ b/substrate/frame/elections-phragmen/src/lib.rs
@@ -591,15 +591,18 @@ pub mod pallet {
 		/// ## Complexity
 		/// - Check is_defunct_voter() details.
 		#[pallet::call_index(5)]
-		#[pallet::weight(T::WeightInfo::clean_defunct_voters(*_num_voters, *_num_defunct))]
+		#[pallet::weight(T::WeightInfo::clean_defunct_voters(*num_voters, *num_defunct))]
 		pub fn clean_defunct_voters(
 			origin: OriginFor<T>,
-			_num_voters: u32,
-			_num_defunct: u32,
+			num_voters: u32,
+			num_defunct: u32,
 		) -> DispatchResult {
 			let _ = ensure_root(origin)?;
+
 			<Voting<T>>::iter()
+				.take(num_voters as usize)
 				.filter(|(_, x)| Self::is_defunct_voter(&x.votes))
+				.take(num_defunct as usize)
 				.for_each(|(dv, _)| Self::do_remove_voter(&dv));
 
 			Ok(())
diff --git a/substrate/frame/examples/default-config/src/lib.rs b/substrate/frame/examples/default-config/src/lib.rs
index d2eade0ccff1e..8a1f6f9d6a82c 100644
--- a/substrate/frame/examples/default-config/src/lib.rs
+++ b/substrate/frame/examples/default-config/src/lib.rs
@@ -26,7 +26,7 @@
 //! Study the following types:
 //!
 //! - [`pallet::DefaultConfig`], and how it differs from [`pallet::Config`].
-//! - [`pallet::config_preludes::TestDefaultConfig`] and how it implements
+//! - [`struct@pallet::config_preludes::TestDefaultConfig`] and how it implements
 //!   [`pallet::DefaultConfig`].
 //! - Notice how [`pallet::DefaultConfig`] is independent of [`frame_system::Config`].
 
@@ -83,11 +83,12 @@ pub mod pallet {
 		// This will help use not need to disambiguate anything when using `derive_impl`.
 		use super::*;
 		use frame_support::derive_impl;
+		use frame_system::config_preludes::TestDefaultConfig as SystemTestDefaultConfig;
 
 		/// A type providing default configurations for this pallet in testing environment.
 		pub struct TestDefaultConfig;
 
-		#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig, no_aggregated_types)]
+		#[derive_impl(SystemTestDefaultConfig as frame_system::DefaultConfig, no_aggregated_types)]
 		impl frame_system::DefaultConfig for TestDefaultConfig {}
 
 		#[frame_support::register_default_impl(TestDefaultConfig)]
@@ -109,7 +110,7 @@ pub mod pallet {
 		/// example, we simple derive `frame_system::config_preludes::TestDefaultConfig` again.
 		pub struct OtherDefaultConfig;
 
-		#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig, no_aggregated_types)]
+		#[derive_impl(SystemTestDefaultConfig as frame_system::DefaultConfig, no_aggregated_types)]
 		impl frame_system::DefaultConfig for OtherDefaultConfig {}
 
 		#[frame_support::register_default_impl(OtherDefaultConfig)]
diff --git a/substrate/frame/identity/src/benchmarking.rs b/substrate/frame/identity/src/benchmarking.rs
index 4b51d23f6b34f..059de204bbf77 100644
--- a/substrate/frame/identity/src/benchmarking.rs
+++ b/substrate/frame/identity/src/benchmarking.rs
@@ -22,7 +22,9 @@
 use super::*;
 
 use crate::Pallet as Identity;
-use frame_benchmarking::v1::{account, benchmarks, whitelisted_caller, BenchmarkError};
+use frame_benchmarking::{
+	account, impl_benchmark_test_suite, v2::*, whitelisted_caller, BenchmarkError,
+};
 use frame_support::{
 	ensure,
 	traits::{EnsureOrigin, Get},
@@ -118,110 +120,128 @@ fn create_identity_info<T: Config>(num_fields: u32) -> IdentityInfo<T::MaxAdditi
 	}
 }
 
-benchmarks! {
-	add_registrar {
-		let r in 1 .. T::MaxRegistrars::get() - 1 => add_registrars::<T>(r)?;
+#[benchmarks]
+mod benchmarks {
+	use super::*;
+
+	#[benchmark]
+	fn add_registrar(r: Linear<1, { T::MaxRegistrars::get() - 1 }>) -> Result<(), BenchmarkError> {
+		add_registrars::<T>(r)?;
 		ensure!(Registrars::<T>::get().len() as u32 == r, "Registrars not set up correctly.");
 		let origin =
 			T::RegistrarOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?;
 		let account = T::Lookup::unlookup(account("registrar", r + 1, SEED));
-	}: _<T::RuntimeOrigin>(origin, account)
-	verify {
+
+		#[extrinsic_call]
+		_(origin as T::RuntimeOrigin, account);
+
 		ensure!(Registrars::<T>::get().len() as u32 == r + 1, "Registrars not added.");
+		Ok(())
 	}
 
-	set_identity {
-		let r in 1 .. T::MaxRegistrars::get() => add_registrars::<T>(r)?;
-		let x in 0 .. T::MaxAdditionalFields::get();
-		let caller = {
-			// The target user
-			let caller: T::AccountId = whitelisted_caller();
-			let caller_lookup = T::Lookup::unlookup(caller.clone());
-			let caller_origin: <T as frame_system::Config>::RuntimeOrigin = RawOrigin::Signed(caller.clone()).into();
-			let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::<T>::max_value());
-
-			// Add an initial identity
-			let initial_info = create_identity_info::<T>(1);
-			Identity::<T>::set_identity(caller_origin.clone(), Box::new(initial_info.clone()))?;
-
-			// User requests judgement from all the registrars, and they approve
-			for i in 0..r {
-				let registrar: T::AccountId = account("registrar", i, SEED);
-				let registrar_lookup = T::Lookup::unlookup(registrar.clone());
-				let balance_to_use =  T::Currency::minimum_balance() * 10u32.into();
-				let _ = T::Currency::make_free_balance_be(&registrar, balance_to_use);
-
-				Identity::<T>::request_judgement(caller_origin.clone(), i, 10u32.into())?;
-				Identity::<T>::provide_judgement(
-					RawOrigin::Signed(registrar).into(),
-					i,
-					caller_lookup.clone(),
-					Judgement::Reasonable,
-					T::Hashing::hash_of(&initial_info),
-				)?;
-			}
-			caller
-		};
-	}: _(RawOrigin::Signed(caller.clone()), Box::new(create_identity_info::<T>(x)))
-	verify {
+	#[benchmark]
+	fn set_identity(
+		r: Linear<1, { T::MaxRegistrars::get() }>,
+		x: Linear<0, { T::MaxAdditionalFields::get() }>,
+	) -> Result<(), BenchmarkError> {
+		add_registrars::<T>(r)?;
+
+		let caller: T::AccountId = whitelisted_caller();
+		let caller_lookup = T::Lookup::unlookup(caller.clone());
+		let caller_origin: <T as frame_system::Config>::RuntimeOrigin =
+			RawOrigin::Signed(caller.clone()).into();
+		let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::<T>::max_value());
+
+		// Add an initial identity
+		let initial_info = create_identity_info::<T>(1);
+		Identity::<T>::set_identity(caller_origin.clone(), Box::new(initial_info.clone()))?;
+
+		// User requests judgement from all the registrars, and they approve
+		for i in 0..r {
+			let registrar: T::AccountId = account("registrar", i, SEED);
+			let _ = T::Lookup::unlookup(registrar.clone());
+			let balance_to_use = T::Currency::minimum_balance() * 10u32.into();
+			let _ = T::Currency::make_free_balance_be(&registrar, balance_to_use);
+
+			Identity::<T>::request_judgement(caller_origin.clone(), i, 10u32.into())?;
+			Identity::<T>::provide_judgement(
+				RawOrigin::Signed(registrar).into(),
+				i,
+				caller_lookup.clone(),
+				Judgement::Reasonable,
+				T::Hashing::hash_of(&initial_info),
+			)?;
+		}
+
+		#[extrinsic_call]
+		_(RawOrigin::Signed(caller.clone()), Box::new(create_identity_info::<T>(x)));
+
 		assert_last_event::<T>(Event::<T>::IdentitySet { who: caller }.into());
+		Ok(())
 	}
 
 	// We need to split `set_subs` into two benchmarks to accurately isolate the potential
 	// writes caused by new or old sub accounts. The actual weight should simply be
 	// the sum of these two weights.
-	set_subs_new {
+	#[benchmark]
+	fn set_subs_new(s: Linear<0, { T::MaxSubAccounts::get() }>) -> Result<(), BenchmarkError> {
 		let caller: T::AccountId = whitelisted_caller();
-		// Create a new subs vec with s sub accounts
-		let s in 0 .. T::MaxSubAccounts::get() => ();
+
+		// Create a new subs vec with sub accounts
 		let subs = create_sub_accounts::<T>(&caller, s)?;
 		ensure!(SubsOf::<T>::get(&caller).1.len() == 0, "Caller already has subs");
-	}: set_subs(RawOrigin::Signed(caller.clone()), subs)
-	verify {
+
+		#[extrinsic_call]
+		set_subs(RawOrigin::Signed(caller.clone()), subs);
+
 		ensure!(SubsOf::<T>::get(&caller).1.len() as u32 == s, "Subs not added");
+		Ok(())
 	}
 
-	set_subs_old {
+	#[benchmark]
+	fn set_subs_old(p: Linear<0, { T::MaxSubAccounts::get() }>) -> Result<(), BenchmarkError> {
 		let caller: T::AccountId = whitelisted_caller();
+
 		// Give them p many previous sub accounts.
-		let p in 0 .. T::MaxSubAccounts::get() => {
-			let _ = add_sub_accounts::<T>(&caller, p)?;
-		};
+		let _ = add_sub_accounts::<T>(&caller, p)?;
+
 		// Remove all subs.
 		let subs = create_sub_accounts::<T>(&caller, 0)?;
-		ensure!(
-			SubsOf::<T>::get(&caller).1.len() as u32 == p,
-			"Caller does have subs",
-		);
-	}: set_subs(RawOrigin::Signed(caller.clone()), subs)
-	verify {
+		ensure!(SubsOf::<T>::get(&caller).1.len() as u32 == p, "Caller does have subs",);
+
+		#[extrinsic_call]
+		set_subs(RawOrigin::Signed(caller.clone()), subs);
+
 		ensure!(SubsOf::<T>::get(&caller).1.len() == 0, "Subs not removed");
+		Ok(())
 	}
 
-	clear_identity {
+	#[benchmark]
+	fn clear_identity(
+		r: Linear<1, { T::MaxRegistrars::get() }>,
+		s: Linear<0, { T::MaxSubAccounts::get() }>,
+		x: Linear<0, { T::MaxAdditionalFields::get() }>,
+	) -> Result<(), BenchmarkError> {
 		let caller: T::AccountId = whitelisted_caller();
-		let caller_origin = <T as frame_system::Config>::RuntimeOrigin::from(RawOrigin::Signed(caller.clone()));
+		let caller_origin =
+			<T as frame_system::Config>::RuntimeOrigin::from(RawOrigin::Signed(caller.clone()));
 		let caller_lookup = <T::Lookup as StaticLookup>::unlookup(caller.clone());
 		let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::<T>::max_value());
 
-		let r in 1 .. T::MaxRegistrars::get() => add_registrars::<T>(r)?;
-		let s in 0 .. T::MaxSubAccounts::get() => {
-			// Give them s many sub accounts
-			let caller: T::AccountId = whitelisted_caller();
-			let _ = add_sub_accounts::<T>(&caller, s)?;
-		};
-		let x in 0 .. T::MaxAdditionalFields::get();
+		// Register the registrars
+		add_registrars::<T>(r)?;
+
+		// Add sub accounts
+		let _ = add_sub_accounts::<T>(&caller, s)?;
 
 		// Create their main identity with x additional fields
 		let info = create_identity_info::<T>(x);
-		let caller: T::AccountId = whitelisted_caller();
-		let caller_origin = <T as frame_system::Config>::RuntimeOrigin::from(RawOrigin::Signed(caller.clone()));
 		Identity::<T>::set_identity(caller_origin.clone(), Box::new(info.clone()))?;
 
 		// User requests judgement from all the registrars, and they approve
 		for i in 0..r {
 			let registrar: T::AccountId = account("registrar", i, SEED);
-			let balance_to_use =  T::Currency::minimum_balance() * 10u32.into();
+			let balance_to_use = T::Currency::minimum_balance() * 10u32.into();
 			let _ = T::Currency::make_free_balance_be(&registrar, balance_to_use);
 
 			Identity::<T>::request_judgement(caller_origin.clone(), i, 10u32.into())?;
@@ -233,111 +253,175 @@ benchmarks! {
 				T::Hashing::hash_of(&info),
 			)?;
 		}
+
 		ensure!(IdentityOf::<T>::contains_key(&caller), "Identity does not exist.");
-	}: _(RawOrigin::Signed(caller.clone()))
-	verify {
+
+		#[extrinsic_call]
+		_(RawOrigin::Signed(caller.clone()));
+
 		ensure!(!IdentityOf::<T>::contains_key(&caller), "Identity not cleared.");
+		Ok(())
 	}
 
-	request_judgement {
+	#[benchmark]
+	fn request_judgement(
+		r: Linear<1, { T::MaxRegistrars::get() }>,
+		x: Linear<0, { T::MaxAdditionalFields::get() }>,
+	) -> Result<(), BenchmarkError> {
 		let caller: T::AccountId = whitelisted_caller();
 		let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::<T>::max_value());
 
-		let r in 1 .. T::MaxRegistrars::get() => add_registrars::<T>(r)?;
-		let x in 0 .. T::MaxAdditionalFields::get() => {
-			// Create their main identity with x additional fields
-			let info = create_identity_info::<T>(x);
-			let caller: T::AccountId = whitelisted_caller();
-			let caller_origin = <T as frame_system::Config>::RuntimeOrigin::from(RawOrigin::Signed(caller));
-			Identity::<T>::set_identity(caller_origin, Box::new(info))?;
-		};
-	}: _(RawOrigin::Signed(caller.clone()), r - 1, 10u32.into())
-	verify {
-		assert_last_event::<T>(Event::<T>::JudgementRequested { who: caller, registrar_index: r-1 }.into());
+		// Register the registrars
+		add_registrars::<T>(r)?;
+
+		// Create their main identity with x additional fields
+		let info = create_identity_info::<T>(x);
+		let caller_origin =
+			<T as frame_system::Config>::RuntimeOrigin::from(RawOrigin::Signed(caller.clone()));
+		Identity::<T>::set_identity(caller_origin.clone(), Box::new(info))?;
+
+		#[extrinsic_call]
+		_(RawOrigin::Signed(caller.clone()), r - 1, 10u32.into());
+
+		assert_last_event::<T>(
+			Event::<T>::JudgementRequested { who: caller, registrar_index: r - 1 }.into(),
+		);
+
+		Ok(())
 	}
 
-	cancel_request {
+	#[benchmark]
+	fn cancel_request(
+		r: Linear<1, { T::MaxRegistrars::get() }>,
+		x: Linear<0, { T::MaxAdditionalFields::get() }>,
+	) -> Result<(), BenchmarkError> {
 		let caller: T::AccountId = whitelisted_caller();
-		let caller_origin = <T as frame_system::Config>::RuntimeOrigin::from(RawOrigin::Signed(caller.clone()));
 		let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::<T>::max_value());
 
-		let r in 1 .. T::MaxRegistrars::get() => add_registrars::<T>(r)?;
-		let x in 0 .. T::MaxAdditionalFields::get() => {
-			// Create their main identity with x additional fields
-			let info = create_identity_info::<T>(x);
-			let caller: T::AccountId = whitelisted_caller();
-			let caller_origin = <T as frame_system::Config>::RuntimeOrigin::from(RawOrigin::Signed(caller));
-			Identity::<T>::set_identity(caller_origin, Box::new(info))?;
-		};
-
-		Identity::<T>::request_judgement(caller_origin, r - 1, 10u32.into())?;
-	}: _(RawOrigin::Signed(caller.clone()), r - 1)
-	verify {
-		assert_last_event::<T>(Event::<T>::JudgementUnrequested { who: caller, registrar_index: r-1 }.into());
+		// Register the registrars
+		add_registrars::<T>(r)?;
+
+		// Create their main identity with x additional fields
+		let info = create_identity_info::<T>(x);
+		let caller_origin =
+			<T as frame_system::Config>::RuntimeOrigin::from(RawOrigin::Signed(caller.clone()));
+		Identity::<T>::set_identity(caller_origin.clone(), Box::new(info))?;
+
+		Identity::<T>::request_judgement(caller_origin.clone(), r - 1, 10u32.into())?;
+
+		#[extrinsic_call]
+		_(RawOrigin::Signed(caller.clone()), r - 1);
+
+		assert_last_event::<T>(
+			Event::<T>::JudgementUnrequested { who: caller, registrar_index: r - 1 }.into(),
+		);
+
+		Ok(())
 	}
 
-	set_fee {
+	#[benchmark]
+	fn set_fee(r: Linear<1, { T::MaxRegistrars::get() - 1 }>) -> Result<(), BenchmarkError> {
 		let caller: T::AccountId = whitelisted_caller();
 		let caller_lookup = T::Lookup::unlookup(caller.clone());
 
-		let r in 1 .. T::MaxRegistrars::get() - 1 => add_registrars::<T>(r)?;
+		add_registrars::<T>(r)?;
 
 		let registrar_origin = T::RegistrarOrigin::try_successful_origin()
 			.expect("RegistrarOrigin has no successful origin required for the benchmark");
 		Identity::<T>::add_registrar(registrar_origin, caller_lookup)?;
+
 		let registrars = Registrars::<T>::get();
 		ensure!(registrars[r as usize].as_ref().unwrap().fee == 0u32.into(), "Fee already set.");
-	}: _(RawOrigin::Signed(caller), r, 100u32.into())
-	verify {
-		let registrars = Registrars::<T>::get();
-		ensure!(registrars[r as usize].as_ref().unwrap().fee == 100u32.into(), "Fee not changed.");
+
+		#[extrinsic_call]
+		_(RawOrigin::Signed(caller), r, 100u32.into());
+
+		let updated_registrars = Registrars::<T>::get();
+		ensure!(
+			updated_registrars[r as usize].as_ref().unwrap().fee == 100u32.into(),
+			"Fee not changed."
+		);
+
+		Ok(())
 	}
 
-	set_account_id {
+	#[benchmark]
+	fn set_account_id(r: Linear<1, { T::MaxRegistrars::get() - 1 }>) -> Result<(), BenchmarkError> {
 		let caller: T::AccountId = whitelisted_caller();
 		let caller_lookup = T::Lookup::unlookup(caller.clone());
 		let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::<T>::max_value());
 
-		let r in 1 .. T::MaxRegistrars::get() - 1 => add_registrars::<T>(r)?;
+		add_registrars::<T>(r)?;
 
 		let registrar_origin = T::RegistrarOrigin::try_successful_origin()
 			.expect("RegistrarOrigin has no successful origin required for the benchmark");
 		Identity::<T>::add_registrar(registrar_origin, caller_lookup)?;
+
 		let registrars = Registrars::<T>::get();
 		ensure!(registrars[r as usize].as_ref().unwrap().account == caller, "id not set.");
+
 		let new_account = T::Lookup::unlookup(account("new", 0, SEED));
-	}: _(RawOrigin::Signed(caller), r, new_account)
-	verify {
-		let registrars = Registrars::<T>::get();
-		ensure!(registrars[r as usize].as_ref().unwrap().account == account("new", 0, SEED), "id not changed.");
+
+		#[extrinsic_call]
+		_(RawOrigin::Signed(caller), r, new_account);
+
+		let updated_registrars = Registrars::<T>::get();
+		ensure!(
+			updated_registrars[r as usize].as_ref().unwrap().account == account("new", 0, SEED),
+			"id not changed."
+		);
+
+		Ok(())
 	}
 
-	set_fields {
+	#[benchmark]
+	fn set_fields(r: Linear<1, { T::MaxRegistrars::get() - 1 }>) -> Result<(), BenchmarkError> {
 		let caller: T::AccountId = whitelisted_caller();
 		let caller_lookup = T::Lookup::unlookup(caller.clone());
 		let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::<T>::max_value());
 
-		let r in 1 .. T::MaxRegistrars::get() - 1 => add_registrars::<T>(r)?;
+		add_registrars::<T>(r)?;
 
 		let registrar_origin = T::RegistrarOrigin::try_successful_origin()
 			.expect("RegistrarOrigin has no successful origin required for the benchmark");
 		Identity::<T>::add_registrar(registrar_origin, caller_lookup)?;
-		let fields = IdentityFields(
-			IdentityField::Display | IdentityField::Legal | IdentityField::Web | IdentityField::Riot
-			| IdentityField::Email | IdentityField::PgpFingerprint | IdentityField::Image | IdentityField::Twitter
-		);
-		let registrars = Registrars::<T>::get();
-		ensure!(registrars[r as usize].as_ref().unwrap().fields == Default::default(), "fields already set.");
-	}: _(RawOrigin::Signed(caller), r, fields)
-	verify {
+
+		let fields =
+			IdentityFields(
+				IdentityField::Display |
+					IdentityField::Legal | IdentityField::Web |
+					IdentityField::Riot | IdentityField::Email |
+					IdentityField::PgpFingerprint |
+					IdentityField::Image | IdentityField::Twitter,
+			);
+
 		let registrars = Registrars::<T>::get();
-		ensure!(registrars[r as usize].as_ref().unwrap().fields != Default::default(), "fields not set.");
+		ensure!(
+			registrars[r as usize].as_ref().unwrap().fields == Default::default(),
+			"fields already set."
+		);
+
+		#[extrinsic_call]
+		_(RawOrigin::Signed(caller), r, fields);
+
+		let updated_registrars = Registrars::<T>::get();
+		ensure!(
+			updated_registrars[r as usize].as_ref().unwrap().fields != Default::default(),
+			"fields not set."
+		);
+
+		Ok(())
 	}
 
-	provide_judgement {
+	#[benchmark]
+	fn provide_judgement(
+		r: Linear<1, { T::MaxRegistrars::get() - 1 }>,
+		x: Linear<0, { T::MaxAdditionalFields::get() }>,
+	) -> Result<(), BenchmarkError> {
 		// The user
 		let user: T::AccountId = account("user", r, SEED);
-		let user_origin = <T as frame_system::Config>::RuntimeOrigin::from(RawOrigin::Signed(user.clone()));
+		let user_origin =
+			<T as frame_system::Config>::RuntimeOrigin::from(RawOrigin::Signed(user.clone()));
 		let user_lookup = <T::Lookup as StaticLookup>::unlookup(user.clone());
 		let _ = T::Currency::make_free_balance_be(&user, BalanceOf::<T>::max_value());
 
@@ -345,8 +429,7 @@ benchmarks! {
 		let caller_lookup = T::Lookup::unlookup(caller.clone());
 		let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::<T>::max_value());
 
-		let r in 1 .. T::MaxRegistrars::get() - 1 => add_registrars::<T>(r)?;
-		let x in 0 .. T::MaxAdditionalFields::get();
+		add_registrars::<T>(r)?;
 
 		let info = create_identity_info::<T>(x);
 		let info_hash = T::Hashing::hash_of(&info);
@@ -356,18 +439,28 @@ benchmarks! {
 			.expect("RegistrarOrigin has no successful origin required for the benchmark");
 		Identity::<T>::add_registrar(registrar_origin, caller_lookup)?;
 		Identity::<T>::request_judgement(user_origin, r, 10u32.into())?;
-	}: _(RawOrigin::Signed(caller), r, user_lookup, Judgement::Reasonable, info_hash)
-	verify {
-		assert_last_event::<T>(Event::<T>::JudgementGiven { target: user, registrar_index: r }.into())
+
+		#[extrinsic_call]
+		_(RawOrigin::Signed(caller), r, user_lookup, Judgement::Reasonable, info_hash);
+
+		assert_last_event::<T>(
+			Event::<T>::JudgementGiven { target: user, registrar_index: r }.into(),
+		);
+
+		Ok(())
 	}
 
-	kill_identity {
-		let r in 1 .. T::MaxRegistrars::get() => add_registrars::<T>(r)?;
-		let s in 0 .. T::MaxSubAccounts::get();
-		let x in 0 .. T::MaxAdditionalFields::get();
+	#[benchmark]
+	fn kill_identity(
+		r: Linear<1, { T::MaxRegistrars::get() }>,
+		s: Linear<0, { T::MaxSubAccounts::get() }>,
+		x: Linear<0, { T::MaxAdditionalFields::get() }>,
+	) -> Result<(), BenchmarkError> {
+		add_registrars::<T>(r)?;
 
 		let target: T::AccountId = account("target", 0, SEED);
-		let target_origin: <T as frame_system::Config>::RuntimeOrigin = RawOrigin::Signed(target.clone()).into();
+		let target_origin: <T as frame_system::Config>::RuntimeOrigin =
+			RawOrigin::Signed(target.clone()).into();
 		let target_lookup = T::Lookup::unlookup(target.clone());
 		let _ = T::Currency::make_free_balance_be(&target, BalanceOf::<T>::max_value());
 
@@ -378,7 +471,7 @@ benchmarks! {
 		// User requests judgement from all the registrars, and they approve
 		for i in 0..r {
 			let registrar: T::AccountId = account("registrar", i, SEED);
-			let balance_to_use =  T::Currency::minimum_balance() * 10u32.into();
+			let balance_to_use = T::Currency::minimum_balance() * 10u32.into();
 			let _ = T::Currency::make_free_balance_be(&registrar, balance_to_use);
 
 			Identity::<T>::request_judgement(target_origin.clone(), i, 10u32.into())?;
@@ -390,62 +483,86 @@ benchmarks! {
 				T::Hashing::hash_of(&info),
 			)?;
 		}
+
 		ensure!(IdentityOf::<T>::contains_key(&target), "Identity not set");
+
 		let origin =
 			T::ForceOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?;
-	}: _<T::RuntimeOrigin>(origin, target_lookup)
-	verify {
+
+		#[extrinsic_call]
+		_(origin as T::RuntimeOrigin, target_lookup);
+
 		ensure!(!IdentityOf::<T>::contains_key(&target), "Identity not removed");
-	}
 
-	add_sub {
-		let s in 0 .. T::MaxSubAccounts::get() - 1;
+		Ok(())
+	}
 
+	#[benchmark]
+	fn add_sub(s: Linear<0, { T::MaxSubAccounts::get() - 1 }>) -> Result<(), BenchmarkError> {
 		let caller: T::AccountId = whitelisted_caller();
 		let _ = add_sub_accounts::<T>(&caller, s)?;
 		let sub = account("new_sub", 0, SEED);
 		let data = Data::Raw(vec![0; 32].try_into().unwrap());
+
 		ensure!(SubsOf::<T>::get(&caller).1.len() as u32 == s, "Subs not set.");
-	}: _(RawOrigin::Signed(caller.clone()), T::Lookup::unlookup(sub), data)
-	verify {
+
+		#[extrinsic_call]
+		_(RawOrigin::Signed(caller.clone()), T::Lookup::unlookup(sub), data);
+
 		ensure!(SubsOf::<T>::get(&caller).1.len() as u32 == s + 1, "Subs not added.");
-	}
 
-	rename_sub {
-		let s in 1 .. T::MaxSubAccounts::get();
+		Ok(())
+	}
 
+	#[benchmark]
+	fn rename_sub(s: Linear<1, { T::MaxSubAccounts::get() }>) -> Result<(), BenchmarkError> {
 		let caller: T::AccountId = whitelisted_caller();
 		let (sub, _) = add_sub_accounts::<T>(&caller, s)?.remove(0);
 		let data = Data::Raw(vec![1; 32].try_into().unwrap());
+
 		ensure!(SuperOf::<T>::get(&sub).unwrap().1 != data, "data already set");
-	}: _(RawOrigin::Signed(caller), T::Lookup::unlookup(sub.clone()), data.clone())
-	verify {
+
+		#[extrinsic_call]
+		_(RawOrigin::Signed(caller), T::Lookup::unlookup(sub.clone()), data.clone());
+
 		ensure!(SuperOf::<T>::get(&sub).unwrap().1 == data, "data not set");
-	}
 
-	remove_sub {
-		let s in 1 .. T::MaxSubAccounts::get();
+		Ok(())
+	}
 
+	#[benchmark]
+	fn remove_sub(s: Linear<1, { T::MaxSubAccounts::get() }>) -> Result<(), BenchmarkError> {
 		let caller: T::AccountId = whitelisted_caller();
 		let (sub, _) = add_sub_accounts::<T>(&caller, s)?.remove(0);
 		ensure!(SuperOf::<T>::contains_key(&sub), "Sub doesn't exists");
-	}: _(RawOrigin::Signed(caller), T::Lookup::unlookup(sub.clone()))
-	verify {
+
+		#[extrinsic_call]
+		_(RawOrigin::Signed(caller), T::Lookup::unlookup(sub.clone()));
+
 		ensure!(!SuperOf::<T>::contains_key(&sub), "Sub not removed");
-	}
 
-	quit_sub {
-		let s in 0 .. T::MaxSubAccounts::get() - 1;
+		Ok(())
+	}
 
+	#[benchmark]
+	fn quit_sub(s: Linear<0, { T::MaxSubAccounts::get() - 1 }>) -> Result<(), BenchmarkError> {
 		let caller: T::AccountId = whitelisted_caller();
 		let sup = account("super", 0, SEED);
 		let _ = add_sub_accounts::<T>(&sup, s)?;
 		let sup_origin = RawOrigin::Signed(sup).into();
-		Identity::<T>::add_sub(sup_origin, T::Lookup::unlookup(caller.clone()), Data::Raw(vec![0; 32].try_into().unwrap()))?;
+		Identity::<T>::add_sub(
+			sup_origin,
+			T::Lookup::unlookup(caller.clone()),
+			Data::Raw(vec![0; 32].try_into().unwrap()),
+		)?;
 		ensure!(SuperOf::<T>::contains_key(&caller), "Sub doesn't exists");
-	}: _(RawOrigin::Signed(caller.clone()))
-	verify {
+
+		#[extrinsic_call]
+		_(RawOrigin::Signed(caller.clone()));
+
 		ensure!(!SuperOf::<T>::contains_key(&caller), "Sub not removed");
+
+		Ok(())
 	}
 
 	impl_benchmark_test_suite!(Identity, crate::tests::new_test_ext(), crate::tests::Test);
diff --git a/substrate/frame/mixnet/Cargo.toml b/substrate/frame/mixnet/Cargo.toml
new file mode 100644
index 0000000000000..68ffdad20fcbb
--- /dev/null
+++ b/substrate/frame/mixnet/Cargo.toml
@@ -0,0 +1,57 @@
+[package]
+description = "FRAME's mixnet pallet"
+name = "pallet-mixnet"
+version = "0.1.0-dev"
+license = "Apache-2.0"
+authors = ["Parity Technologies <admin@parity.io>"]
+edition = "2021"
+homepage = "https://substrate.io"
+repository = "https://github.com/paritytech/substrate/"
+readme = "README.md"
+
+[package.metadata.docs.rs]
+targets = ["x86_64-unknown-linux-gnu"]
+
+[dependencies]
+codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] }
+frame-benchmarking = { default-features = false, optional = true, path = "../benchmarking" }
+frame-support = { default-features = false, path = "../support" }
+frame-system = { default-features = false, path = "../system" }
+log = { version = "0.4.17", default-features = false }
+scale-info = { version = "2.5.0", default-features = false, features = ["derive"] }
+serde = { version = "1.0.188", default-features = false, features = ["derive"] }
+sp-application-crypto = { default-features = false, path = "../../primitives/application-crypto" }
+sp-arithmetic = { default-features = false, path = "../../primitives/arithmetic" }
+sp-io = { default-features = false, path = "../../primitives/io" }
+sp-mixnet = { default-features = false, path = "../../primitives/mixnet" }
+sp-runtime = { default-features = false, path = "../../primitives/runtime" }
+sp-std = { default-features = false, path = "../../primitives/std" }
+
+[features]
+default = [ "std" ]
+std = [
+	"codec/std",
+	"frame-benchmarking?/std",
+	"frame-support/std",
+	"frame-system/std",
+	"log/std",
+	"scale-info/std",
+	"serde/std",
+	"sp-application-crypto/std",
+	"sp-arithmetic/std",
+	"sp-io/std",
+	"sp-mixnet/std",
+	"sp-runtime/std",
+	"sp-std/std",
+]
+runtime-benchmarks = [
+	"frame-benchmarking/runtime-benchmarks",
+	"frame-support/runtime-benchmarks",
+	"frame-system/runtime-benchmarks",
+	"sp-runtime/runtime-benchmarks",
+]
+try-runtime = [
+	"frame-support/try-runtime",
+	"frame-system/try-runtime",
+	"sp-runtime/try-runtime",
+]
diff --git a/substrate/frame/mixnet/README.md b/substrate/frame/mixnet/README.md
new file mode 100644
index 0000000000000..59b81851ed11f
--- /dev/null
+++ b/substrate/frame/mixnet/README.md
@@ -0,0 +1,4 @@
+This pallet is responsible for determining the current mixnet session and phase, and the mixnode
+set for each session.
+
+License: Apache-2.0
diff --git a/substrate/frame/mixnet/src/lib.rs b/substrate/frame/mixnet/src/lib.rs
new file mode 100644
index 0000000000000..c7a5b624157b8
--- /dev/null
+++ b/substrate/frame/mixnet/src/lib.rs
@@ -0,0 +1,598 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! This pallet is responsible for determining the current mixnet session and phase, and the
+//! mixnode set for each session.
+
+#![warn(missing_docs)]
+#![cfg_attr(not(feature = "std"), no_std)]
+
+use codec::{Decode, Encode, MaxEncodedLen};
+use frame_support::{
+	traits::{EstimateNextSessionRotation, Get, OneSessionHandler},
+	BoundedVec,
+};
+use frame_system::{
+	offchain::{SendTransactionTypes, SubmitTransaction},
+	pallet_prelude::BlockNumberFor,
+};
+pub use pallet::*;
+use scale_info::TypeInfo;
+use serde::{Deserialize, Serialize};
+use sp_application_crypto::RuntimeAppPublic;
+use sp_arithmetic::traits::{CheckedSub, Saturating, UniqueSaturatedInto, Zero};
+use sp_io::MultiRemovalResults;
+use sp_mixnet::types::{
+	AuthorityId, AuthoritySignature, KxPublic, Mixnode, MixnodesErr, PeerId, SessionIndex,
+	SessionPhase, SessionStatus, KX_PUBLIC_SIZE,
+};
+use sp_runtime::RuntimeDebug;
+use sp_std::{cmp::Ordering, vec::Vec};
+
+const LOG_TARGET: &str = "runtime::mixnet";
+
+/// Index of an authority in the authority list for a session.
+pub type AuthorityIndex = u32;
+
+////////////////////////////////////////////////////////////////////////////////
+// Bounded mixnode type
+////////////////////////////////////////////////////////////////////////////////
+
+/// Like [`Mixnode`], but encoded size is bounded.
+#[derive(
+	Clone, Decode, Encode, MaxEncodedLen, PartialEq, TypeInfo, RuntimeDebug, Serialize, Deserialize,
+)]
+pub struct BoundedMixnode<ExternalAddresses> {
+	/// Key-exchange public key for the mixnode.
+	pub kx_public: KxPublic,
+	/// libp2p peer ID of the mixnode.
+	pub peer_id: PeerId,
+	/// External addresses for the mixnode, in multiaddr format, UTF-8 encoded.
+	pub external_addresses: ExternalAddresses,
+}
+
+impl<MaxExternalAddressSize, MaxExternalAddresses> Into<Mixnode>
+	for BoundedMixnode<BoundedVec<BoundedVec<u8, MaxExternalAddressSize>, MaxExternalAddresses>>
+{
+	fn into(self) -> Mixnode {
+		Mixnode {
+			kx_public: self.kx_public,
+			peer_id: self.peer_id,
+			external_addresses: self
+				.external_addresses
+				.into_iter()
+				.map(BoundedVec::into_inner)
+				.collect(),
+		}
+	}
+}
+
+impl<MaxExternalAddressSize: Get<u32>, MaxExternalAddresses: Get<u32>> From<Mixnode>
+	for BoundedMixnode<BoundedVec<BoundedVec<u8, MaxExternalAddressSize>, MaxExternalAddresses>>
+{
+	fn from(mixnode: Mixnode) -> Self {
+		Self {
+			kx_public: mixnode.kx_public,
+			peer_id: mixnode.peer_id,
+			external_addresses: mixnode
+				.external_addresses
+				.into_iter()
+				.flat_map(|addr| match addr.try_into() {
+					Ok(addr) => Some(addr),
+					Err(addr) => {
+						log::debug!(
+							target: LOG_TARGET,
+							"Mixnode external address {addr:x?} too long; discarding",
+						);
+						None
+					},
+				})
+				.take(MaxExternalAddresses::get() as usize)
+				.collect::<Vec<_>>()
+				.try_into()
+				.expect("Excess external addresses discarded with take()"),
+		}
+	}
+}
+
+/// [`BoundedMixnode`] type for the given configuration.
+pub type BoundedMixnodeFor<T> = BoundedMixnode<
+	BoundedVec<
+		BoundedVec<u8, <T as Config>::MaxExternalAddressSize>,
+		<T as Config>::MaxExternalAddressesPerMixnode,
+	>,
+>;
+
+////////////////////////////////////////////////////////////////////////////////
+// Registration type
+////////////////////////////////////////////////////////////////////////////////
+
+/// A mixnode registration. A registration transaction is formed from one of these plus an
+/// [`AuthoritySignature`].
+#[derive(Clone, Decode, Encode, PartialEq, TypeInfo, RuntimeDebug)]
+pub struct Registration<BlockNumber, BoundedMixnode> {
+	/// Block number at the time of creation. When a registration transaction fails to make it on
+	/// to the chain for whatever reason, we send out another one. We want this one to have a
+	/// different hash in case the earlier transaction got banned somewhere; including the block
+	/// number is a simple way of achieving this.
+	pub block_number: BlockNumber,
+	/// The session during which this registration should be processed. Note that on success the
+	/// mixnode is registered for the _following_ session.
+	pub session_index: SessionIndex,
+	/// The index in the next session's authority list of the authority registering the mixnode.
+	pub authority_index: AuthorityIndex,
+	/// Mixnode information to register for the following session.
+	pub mixnode: BoundedMixnode,
+}
+
+/// [`Registration`] type for the given configuration.
+pub type RegistrationFor<T> = Registration<BlockNumberFor<T>, BoundedMixnodeFor<T>>;
+
+////////////////////////////////////////////////////////////////////////////////
+// Misc helper funcs
+////////////////////////////////////////////////////////////////////////////////
+
+fn check_removed_all(res: MultiRemovalResults) {
+	debug_assert!(res.maybe_cursor.is_none());
+}
+
+fn twox<BlockNumber: UniqueSaturatedInto<u64>>(
+	block_number: BlockNumber,
+	kx_public: &KxPublic,
+) -> u64 {
+	let block_number: u64 = block_number.unique_saturated_into();
+	let mut data = [0; 8 + KX_PUBLIC_SIZE];
+	data[..8].copy_from_slice(&block_number.to_le_bytes());
+	data[8..].copy_from_slice(kx_public);
+	u64::from_le_bytes(sp_io::hashing::twox_64(&data))
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// The pallet
+////////////////////////////////////////////////////////////////////////////////
+
+#[frame_support::pallet(dev_mode)]
+pub mod pallet {
+	use super::*;
+	use frame_support::pallet_prelude::*;
+	use frame_system::pallet_prelude::*;
+
+	#[pallet::pallet]
+	pub struct Pallet<T>(_);
+
+	#[pallet::config]
+	pub trait Config: frame_system::Config + SendTransactionTypes<Call<Self>> {
+		/// The maximum number of authorities per session.
+		#[pallet::constant]
+		type MaxAuthorities: Get<AuthorityIndex>;
+
+		/// The maximum size of one of a mixnode's external addresses.
+		#[pallet::constant]
+		type MaxExternalAddressSize: Get<u32>;
+
+		/// The maximum number of external addresses for a mixnode.
+		#[pallet::constant]
+		type MaxExternalAddressesPerMixnode: Get<u32>;
+
+		/// Session progress/length estimation. Used to determine when to send registration
+		/// transactions and the longevity of these transactions.
+		type NextSessionRotation: EstimateNextSessionRotation<BlockNumberFor<Self>>;
+
+		/// Length of the first phase of each session (`CoverToCurrent`), in blocks.
+		#[pallet::constant]
+		type NumCoverToCurrentBlocks: Get<BlockNumberFor<Self>>;
+
+		/// Length of the second phase of each session (`RequestsToCurrent`), in blocks.
+		#[pallet::constant]
+		type NumRequestsToCurrentBlocks: Get<BlockNumberFor<Self>>;
+
+		/// Length of the third phase of each session (`CoverToPrev`), in blocks.
+		#[pallet::constant]
+		type NumCoverToPrevBlocks: Get<BlockNumberFor<Self>>;
+
+		/// The number of "slack" blocks at the start of each session, during which
+		/// [`maybe_register`](Pallet::maybe_register) will not attempt to post registration
+		/// transactions.
+		#[pallet::constant]
+		type NumRegisterStartSlackBlocks: Get<BlockNumberFor<Self>>;
+
+		/// The number of "slack" blocks at the end of each session.
+		/// [`maybe_register`](Pallet::maybe_register) will try to register before this slack
+		/// period, but may post registration transactions during the slack period as a last
+		/// resort.
+		#[pallet::constant]
+		type NumRegisterEndSlackBlocks: Get<BlockNumberFor<Self>>;
+
+		/// Priority of unsigned transactions used to register mixnodes.
+		#[pallet::constant]
+		type RegistrationPriority: Get<TransactionPriority>;
+
+		/// Minimum number of mixnodes. If there are fewer than this many mixnodes registered for a
+		/// session, the mixnet will not be active during the session.
+		#[pallet::constant]
+		type MinMixnodes: Get<u32>;
+	}
+
+	/// Index of the current session. This may be offset relative to the session index tracked by
+	/// eg `pallet_session`; mixnet session indices are independent.
+	#[pallet::storage]
+	pub(crate) type CurrentSessionIndex<T> = StorageValue<_, SessionIndex, ValueQuery>;
+
+	/// Block in which the current session started.
+	#[pallet::storage]
+	pub(crate) type CurrentSessionStartBlock<T> = StorageValue<_, BlockNumberFor<T>, ValueQuery>;
+
+	/// Authority list for the next session.
+	#[pallet::storage]
+	pub(crate) type NextAuthorityIds<T> = StorageMap<_, Identity, AuthorityIndex, AuthorityId>;
+
+	/// Mixnode sets by session index. Only the mixnode sets for the previous, current, and next
+	/// sessions are kept; older sets are discarded.
+	///
+	/// The mixnodes in each set are keyed by authority index so we can easily check if an
+	/// authority has registered a mixnode. The authority indices should only be used during
+	/// registration; the authority indices for the very first session are made up.
+	#[pallet::storage]
+	pub(crate) type Mixnodes<T> =
+		StorageDoubleMap<_, Identity, SessionIndex, Identity, AuthorityIndex, BoundedMixnodeFor<T>>;
+
+	#[pallet::genesis_config]
+	#[derive(frame_support::DefaultNoBound)]
+	pub struct GenesisConfig<T: Config> {
+		/// The mixnode set for the very first session.
+		pub mixnodes: BoundedVec<BoundedMixnodeFor<T>, T::MaxAuthorities>,
+	}
+
+	#[pallet::genesis_build]
+	impl<T: Config> BuildGenesisConfig for GenesisConfig<T> {
+		fn build(&self) {
+			assert!(
+				Mixnodes::<T>::iter_prefix_values(0).next().is_none(),
+				"Initial mixnodes already set"
+			);
+			for (i, mixnode) in self.mixnodes.iter().enumerate() {
+				// We just make up authority indices here. This doesn't matter as authority indices
+				// are only used during registration to check an authority doesn't register twice.
+				Mixnodes::<T>::insert(0, i as AuthorityIndex, mixnode);
+			}
+		}
+	}
+
+	#[pallet::call]
+	impl<T: Config> Pallet<T> {
+		/// Register a mixnode for the following session.
+		#[pallet::call_index(0)]
+		#[pallet::weight(1)] // TODO
+		pub fn register(
+			origin: OriginFor<T>,
+			registration: RegistrationFor<T>,
+			_signature: AuthoritySignature,
+		) -> DispatchResult {
+			ensure_none(origin)?;
+
+			// Checked by ValidateUnsigned
+			debug_assert_eq!(registration.session_index, CurrentSessionIndex::<T>::get());
+			debug_assert!(registration.authority_index < T::MaxAuthorities::get());
+
+			Mixnodes::<T>::insert(
+				// Registering for the _following_ session
+				registration.session_index + 1,
+				registration.authority_index,
+				registration.mixnode,
+			);
+
+			Ok(())
+		}
+	}
+
+	#[pallet::validate_unsigned]
+	impl<T: Config> ValidateUnsigned for Pallet<T> {
+		type Call = Call<T>;
+
+		fn validate_unsigned(_source: TransactionSource, call: &Self::Call) -> TransactionValidity {
+			let Self::Call::register { registration, signature } = call else {
+				return InvalidTransaction::Call.into()
+			};
+
+			// Check session index matches
+			match registration.session_index.cmp(&CurrentSessionIndex::<T>::get()) {
+				Ordering::Greater => return InvalidTransaction::Future.into(),
+				Ordering::Less => return InvalidTransaction::Stale.into(),
+				Ordering::Equal => (),
+			}
+
+			// Check authority index is valid
+			if registration.authority_index >= T::MaxAuthorities::get() {
+				return InvalidTransaction::BadProof.into()
+			}
+			let Some(authority_id) = NextAuthorityIds::<T>::get(registration.authority_index)
+			else {
+				return InvalidTransaction::BadProof.into()
+			};
+
+			// Check the authority hasn't registered a mixnode yet
+			if Self::already_registered(registration.session_index, registration.authority_index) {
+				return InvalidTransaction::Stale.into()
+			}
+
+			// Check signature. Note that we don't use regular signed transactions for registration
+			// as we don't want validators to have to pay to register. Spam is prevented by only
+			// allowing one registration per session per validator (see above).
+			let signature_ok = registration.using_encoded(|encoded_registration| {
+				authority_id.verify(&encoded_registration, signature)
+			});
+			if !signature_ok {
+				return InvalidTransaction::BadProof.into()
+			}
+
+			ValidTransaction::with_tag_prefix("MixnetRegistration")
+				.priority(T::RegistrationPriority::get())
+				// Include both authority index _and_ ID in tag in case of forks with different
+				// authority lists
+				.and_provides((
+					registration.session_index,
+					registration.authority_index,
+					authority_id,
+				))
+				.longevity(
+					(T::NextSessionRotation::average_session_length() / 2_u32.into())
+						.try_into()
+						.unwrap_or(64_u64),
+				)
+				.build()
+		}
+	}
+}
+
+impl<T: Config> Pallet<T> {
+	/// Returns the phase of the current session.
+	fn session_phase() -> SessionPhase {
+		let block_in_phase = frame_system::Pallet::<T>::block_number()
+			.saturating_sub(CurrentSessionStartBlock::<T>::get());
+		let Some(block_in_phase) = block_in_phase.checked_sub(&T::NumCoverToCurrentBlocks::get())
+		else {
+			return SessionPhase::CoverToCurrent
+		};
+		let Some(block_in_phase) =
+			block_in_phase.checked_sub(&T::NumRequestsToCurrentBlocks::get())
+		else {
+			return SessionPhase::RequestsToCurrent
+		};
+		if block_in_phase < T::NumCoverToPrevBlocks::get() {
+			SessionPhase::CoverToPrev
+		} else {
+			SessionPhase::DisconnectFromPrev
+		}
+	}
+
+	/// Returns the index and phase of the current session.
+	pub fn session_status() -> SessionStatus {
+		SessionStatus {
+			current_index: CurrentSessionIndex::<T>::get(),
+			phase: Self::session_phase(),
+		}
+	}
+
+	/// Returns the mixnode set for the given session (which should be either the previous or the
+	/// current session).
+	fn mixnodes(session_index: SessionIndex) -> Result<Vec<Mixnode>, MixnodesErr> {
+		let mixnodes: Vec<_> =
+			Mixnodes::<T>::iter_prefix_values(session_index).map(Into::into).collect();
+		if mixnodes.len() < T::MinMixnodes::get() as usize {
+			Err(MixnodesErr::InsufficientRegistrations {
+				num: mixnodes.len() as u32,
+				min: T::MinMixnodes::get(),
+			})
+		} else {
+			Ok(mixnodes)
+		}
+	}
+
+	/// Returns the mixnode set for the previous session.
+	pub fn prev_mixnodes() -> Result<Vec<Mixnode>, MixnodesErr> {
+		let Some(prev_session_index) = CurrentSessionIndex::<T>::get().checked_sub(1) else {
+			return Err(MixnodesErr::InsufficientRegistrations {
+				num: 0,
+				min: T::MinMixnodes::get(),
+			})
+		};
+		Self::mixnodes(prev_session_index)
+	}
+
+	/// Returns the mixnode set for the current session.
+	pub fn current_mixnodes() -> Result<Vec<Mixnode>, MixnodesErr> {
+		Self::mixnodes(CurrentSessionIndex::<T>::get())
+	}
+
+	/// Is now a good time to register, considering only session progress?
+	fn should_register_by_session_progress(
+		block_number: BlockNumberFor<T>,
+		mixnode: &Mixnode,
+	) -> bool {
+		// At the start of each session there are some "slack" blocks during which we avoid
+		// registering
+		let block_in_session = block_number.saturating_sub(CurrentSessionStartBlock::<T>::get());
+		if block_in_session < T::NumRegisterStartSlackBlocks::get() {
+			return false
+		}
+
+		let (Some(end_block), _weight) =
+			T::NextSessionRotation::estimate_next_session_rotation(block_number)
+		else {
+			// Things aren't going to work terribly well in this case as all the authorities will
+			// just pile in after the slack period...
+			return true
+		};
+
+		let remaining_blocks = end_block
+			.saturating_sub(block_number)
+			.saturating_sub(T::NumRegisterEndSlackBlocks::get());
+		if remaining_blocks.is_zero() {
+			// Into the slack time at the end of the session. Not necessarily too late;
+			// registrations are accepted right up until the session ends.
+			return true
+		}
+
+		// Want uniform distribution over the remaining blocks, so pick this block with probability
+		// 1/remaining_blocks. maybe_register may be called multiple times per block; ensure the
+		// same decision gets made each time by using a hash of the block number and the mixnode's
+		// public key as the "random" source. This is slightly biased as remaining_blocks most
+		// likely won't divide into 2^64, but it doesn't really matter...
+		let random = twox(block_number, &mixnode.kx_public);
+		(random % remaining_blocks.try_into().unwrap_or(u64::MAX)) == 0
+	}
+
+	fn next_local_authority() -> Option<(AuthorityIndex, AuthorityId)> {
+		// In the case where multiple local IDs are in the next authority set, we just return the
+		// first one. There's (currently at least) no point in registering multiple times.
+		let mut local_ids = AuthorityId::all();
+		local_ids.sort();
+		NextAuthorityIds::<T>::iter().find(|(_index, id)| local_ids.binary_search(id).is_ok())
+	}
+
+	/// `session_index` should be the index of the current session. `authority_index` is the
+	/// authority index in the _next_ session.
+	fn already_registered(session_index: SessionIndex, authority_index: AuthorityIndex) -> bool {
+		Mixnodes::<T>::contains_key(session_index + 1, authority_index)
+	}
+
+	/// Try to register a mixnode for the next session.
+	///
+	/// If a registration extrinsic is submitted, `true` is returned. The caller should avoid
+	/// calling `maybe_register` again for a few blocks, to give the submitted extrinsic a chance
+	/// to get included.
+	///
+	/// With the above exception, `maybe_register` is designed to be called every block. Most of
+	/// the time it will not do anything, for example:
+	///
+	/// - If it is not an appropriate time to submit a registration extrinsic.
+	/// - If the local node has already registered a mixnode for the next session.
+	/// - If the local node is not permitted to register a mixnode for the next session.
+	///
+	/// `session_index` should match `session_status().current_index`; if it does not, `false` is
+	/// returned immediately.
+	pub fn maybe_register(session_index: SessionIndex, mixnode: Mixnode) -> bool {
+		let current_session_index = CurrentSessionIndex::<T>::get();
+		if session_index != current_session_index {
+			log::trace!(
+				target: LOG_TARGET,
+				"Session {session_index} registration attempted, \
+				but current session is {current_session_index}",
+			);
+			return false
+		}
+
+		let block_number = frame_system::Pallet::<T>::block_number();
+		if !Self::should_register_by_session_progress(block_number, &mixnode) {
+			log::trace!(
+				target: LOG_TARGET,
+				"Waiting for the session to progress further before registering",
+			);
+			return false
+		}
+
+		let Some((authority_index, authority_id)) = Self::next_local_authority() else {
+			log::trace!(
+				target: LOG_TARGET,
+				"Not an authority in the next session; cannot register a mixnode",
+			);
+			return false
+		};
+
+		if Self::already_registered(session_index, authority_index) {
+			log::trace!(
+				target: LOG_TARGET,
+				"Already registered a mixnode for the next session",
+			);
+			return false
+		}
+
+		let registration =
+			Registration { block_number, session_index, authority_index, mixnode: mixnode.into() };
+		let Some(signature) = authority_id.sign(&registration.encode()) else {
+			log::debug!(target: LOG_TARGET, "Failed to sign registration");
+			return false
+		};
+		let call = Call::register { registration, signature };
+		match SubmitTransaction::<T, Call<T>>::submit_unsigned_transaction(call.into()) {
+			Ok(()) => true,
+			Err(()) => {
+				log::debug!(
+					target: LOG_TARGET,
+					"Failed to submit registration transaction",
+				);
+				false
+			},
+		}
+	}
+}
+
+impl<T: Config> sp_runtime::BoundToRuntimeAppPublic for Pallet<T> {
+	type Public = AuthorityId;
+}
+
+impl<T: Config> OneSessionHandler<T::AccountId> for Pallet<T> {
+	type Key = AuthorityId;
+
+	fn on_genesis_session<'a, I: 'a>(validators: I)
+	where
+		I: Iterator<Item = (&'a T::AccountId, Self::Key)>,
+	{
+		assert!(
+			NextAuthorityIds::<T>::iter().next().is_none(),
+			"Initial authority IDs already set"
+		);
+		for (i, (_, authority_id)) in validators.enumerate() {
+			NextAuthorityIds::<T>::insert(i as AuthorityIndex, authority_id);
+		}
+	}
+
+	fn on_new_session<'a, I: 'a>(changed: bool, _validators: I, queued_validators: I)
+	where
+		I: Iterator<Item = (&'a T::AccountId, Self::Key)>,
+	{
+		let session_index = CurrentSessionIndex::<T>::mutate(|index| {
+			*index += 1;
+			*index
+		});
+		CurrentSessionStartBlock::<T>::put(frame_system::Pallet::<T>::block_number());
+
+		// Discard the previous previous mixnode set, which we don't need any more
+		if let Some(prev_prev_session_index) = session_index.checked_sub(2) {
+			check_removed_all(Mixnodes::<T>::clear_prefix(
+				prev_prev_session_index,
+				T::MaxAuthorities::get(),
+				None,
+			));
+		}
+
+		if changed {
+			// Save authority set for the next session. Note that we don't care about the authority
+			// set for the current session; we just care about the key-exchange public keys that
+			// were registered and are stored in Mixnodes.
+			check_removed_all(NextAuthorityIds::<T>::clear(T::MaxAuthorities::get(), None));
+			for (i, (_, authority_id)) in queued_validators.enumerate() {
+				NextAuthorityIds::<T>::insert(i as AuthorityIndex, authority_id);
+			}
+		}
+	}
+
+	fn on_disabled(_i: u32) {
+		// For now, to keep things simple, just ignore
+		// TODO
+	}
+}
diff --git a/substrate/frame/nfts/src/tests.rs b/substrate/frame/nfts/src/tests.rs
index 6e264048f11a6..a82fcca015121 100644
--- a/substrate/frame/nfts/src/tests.rs
+++ b/substrate/frame/nfts/src/tests.rs
@@ -17,7 +17,7 @@
 
 //! Tests for Nfts pallet.
 
-use crate::{mock::*, Event, *};
+use crate::{mock::*, Event, SystemConfig, *};
 use enumflags2::BitFlags;
 use frame_support::{
 	assert_noop, assert_ok,
diff --git a/substrate/frame/paged-list/src/paged_list.rs b/substrate/frame/paged-list/src/paged_list.rs
index 3597c3dea6823..beea8ecc64409 100644
--- a/substrate/frame/paged-list/src/paged_list.rs
+++ b/substrate/frame/paged-list/src/paged_list.rs
@@ -53,7 +53,7 @@ pub type ValueIndex = u32;
 /// [`Page`]s.
 ///
 /// Each [`Page`] holds at most `ValuesPerNewPage` values in its `values` vector. The last page is
-/// the only one that could have less than `ValuesPerNewPage` values.  
+/// the only one that could have less than `ValuesPerNewPage` values.
 /// **Iteration** happens by starting
 /// at [`first_page`][StoragePagedListMeta::first_page]/
 /// [`first_value_offset`][StoragePagedListMeta::first_value_offset] and incrementing these indices
@@ -373,11 +373,11 @@ where
 /// that are completely useless for prefix calculation.
 struct StoragePagedListPrefix<Prefix>(PhantomData<Prefix>);
 
-impl<Prefix> frame_support::storage::StoragePrefixedContainer for StoragePagedListPrefix<Prefix>
+impl<Prefix> StoragePrefixedContainer for StoragePagedListPrefix<Prefix>
 where
 	Prefix: StorageInstance,
 {
-	fn module_prefix() -> &'static [u8] {
+	fn pallet_prefix() -> &'static [u8] {
 		Prefix::pallet_prefix().as_bytes()
 	}
 
@@ -386,15 +386,15 @@ where
 	}
 }
 
-impl<Prefix, Value, ValuesPerNewPage> frame_support::storage::StoragePrefixedContainer
+impl<Prefix, Value, ValuesPerNewPage> StoragePrefixedContainer
 	for StoragePagedList<Prefix, Value, ValuesPerNewPage>
 where
 	Prefix: StorageInstance,
 	Value: FullCodec,
 	ValuesPerNewPage: Get<u32>,
 {
-	fn module_prefix() -> &'static [u8] {
-		StoragePagedListPrefix::<Prefix>::module_prefix()
+	fn pallet_prefix() -> &'static [u8] {
+		StoragePagedListPrefix::<Prefix>::pallet_prefix()
 	}
 
 	fn storage_prefix() -> &'static [u8] {
diff --git a/substrate/frame/preimage/src/migration.rs b/substrate/frame/preimage/src/migration.rs
index 821cb01bbaae5..a86109f892a4f 100644
--- a/substrate/frame/preimage/src/migration.rs
+++ b/substrate/frame/preimage/src/migration.rs
@@ -133,7 +133,7 @@ pub mod v1 {
 						None =>
 							OldRequestStatus::Requested { deposit: None, count: 1, len: Some(len) },
 					},
-					v0::OldRequestStatus::Requested(count) if count == 0 => {
+					v0::OldRequestStatus::Requested(0) => {
 						log::error!(target: TARGET, "preimage has counter of zero: {:?}", hash);
 						continue
 					},
diff --git a/substrate/frame/root-testing/src/lib.rs b/substrate/frame/root-testing/src/lib.rs
index e04c7bfa13d26..bbcda09c3065d 100644
--- a/substrate/frame/root-testing/src/lib.rs
+++ b/substrate/frame/root-testing/src/lib.rs
@@ -29,7 +29,7 @@ use sp_runtime::Perbill;
 
 pub use pallet::*;
 
-#[frame_support::pallet]
+#[frame_support::pallet(dev_mode)]
 pub mod pallet {
 	use super::*;
 	use frame_support::pallet_prelude::*;
diff --git a/substrate/frame/staking/reward-curve/Cargo.toml b/substrate/frame/staking/reward-curve/Cargo.toml
index 484afb6136bf0..0a72599611599 100644
--- a/substrate/frame/staking/reward-curve/Cargo.toml
+++ b/substrate/frame/staking/reward-curve/Cargo.toml
@@ -18,7 +18,7 @@ proc-macro = true
 proc-macro-crate = "1.1.3"
 proc-macro2 = "1.0.56"
 quote = "1.0.28"
-syn = { version = "2.0.37", features = ["full", "visit"] }
+syn = { version = "2.0.38", features = ["full", "visit"] }
 
 [dev-dependencies]
 sp-runtime = { path = "../../../primitives/runtime" }
diff --git a/substrate/frame/sudo/src/lib.rs b/substrate/frame/sudo/src/lib.rs
index 0c869bec7f076..fb29c0da42a99 100644
--- a/substrate/frame/sudo/src/lib.rs
+++ b/substrate/frame/sudo/src/lib.rs
@@ -204,14 +204,15 @@ pub mod pallet {
 		/// ## Complexity
 		/// - O(1).
 		#[pallet::call_index(1)]
-		#[pallet::weight((*_weight, call.get_dispatch_info().class))]
+		#[pallet::weight((*weight, call.get_dispatch_info().class))]
 		pub fn sudo_unchecked_weight(
 			origin: OriginFor<T>,
 			call: Box<<T as Config>::RuntimeCall>,
-			_weight: Weight,
+			weight: Weight,
 		) -> DispatchResultWithPostInfo {
 			// This is a public call, so we ensure that the origin is some signed account.
 			let sender = ensure_signed(origin)?;
+			let _ = weight; // We don't check the weight witness since it is a root call.
 			ensure!(Self::key().map_or(false, |k| sender == k), Error::<T>::RequireSudo);
 
 			let res = call.dispatch_bypass_filter(frame_system::RawOrigin::Root.into());
diff --git a/substrate/frame/support/Cargo.toml b/substrate/frame/support/Cargo.toml
index 5cb5d6d12ab78..65f4885b15999 100644
--- a/substrate/frame/support/Cargo.toml
+++ b/substrate/frame/support/Cargo.toml
@@ -30,7 +30,7 @@ sp-weights = { path = "../../primitives/weights", default-features = false}
 sp-debug-derive = { path = "../../primitives/debug-derive", default-features = false}
 sp-metadata-ir = { path = "../../primitives/metadata-ir", default-features = false}
 tt-call = "1.0.8"
-macro_magic = "0.4.2"
+macro_magic = "0.5.0"
 frame-support-procedural = { path = "procedural", default-features = false}
 paste = "1.0"
 sp-state-machine = { path = "../../primitives/state-machine", default-features = false, optional = true}
diff --git a/substrate/frame/support/procedural/Cargo.toml b/substrate/frame/support/procedural/Cargo.toml
index 6381e430f2baa..45ed1750a5287 100644
--- a/substrate/frame/support/procedural/Cargo.toml
+++ b/substrate/frame/support/procedural/Cargo.toml
@@ -21,11 +21,12 @@ cfg-expr = "0.15.5"
 itertools = "0.10.3"
 proc-macro2 = "1.0.56"
 quote = "1.0.28"
-syn = { version = "2.0.37", features = ["full"] }
+syn = { version = "2.0.38", features = ["full"] }
 frame-support-procedural-tools = { path = "tools" }
-proc-macro-warning = { version = "0.4.2", default-features = false }
-macro_magic = { version = "0.4.2", features = ["proc_support"] }
+macro_magic = { version = "0.5.0", features = ["proc_support"] }
+proc-macro-warning = { version = "1.0.0", default-features = false }
 expander = "2.0.0"
+sp-core-hashing = { path = "../../../primitives/core/hashing" }
 
 [features]
 default = [ "std" ]
diff --git a/substrate/frame/support/procedural/src/construct_runtime/mod.rs b/substrate/frame/support/procedural/src/construct_runtime/mod.rs
index f42dd837e3a95..c3d433643fd7e 100644
--- a/substrate/frame/support/procedural/src/construct_runtime/mod.rs
+++ b/substrate/frame/support/procedural/src/construct_runtime/mod.rs
@@ -211,6 +211,7 @@
 mod expand;
 mod parse;
 
+use crate::pallet::parse::helper::two128_str;
 use cfg_expr::Predicate;
 use frame_support_procedural_tools::{
 	generate_crate_access, generate_crate_access_2018, generate_hidden_includes,
@@ -403,17 +404,19 @@ fn construct_runtime_final_expansion(
 	let integrity_test = decl_integrity_test(&scrate);
 	let static_assertions = decl_static_assertions(&name, &pallets, &scrate);
 
-	let warning =
-		where_section.map_or(None, |where_section| {
-			Some(proc_macro_warning::Warning::new_deprecated("WhereSection")
-			.old("use a `where` clause in `construct_runtime`")
-			.new("use `frame_system::Config` to set the `Block` type and delete this clause. 
-				It is planned to be removed in December 2023")
-			.help_links(&["https://github.com/paritytech/substrate/pull/14437"])
-			.span(where_section.span)
-			.build(),
+	let warning = where_section.map_or(None, |where_section| {
+		Some(
+			proc_macro_warning::Warning::new_deprecated("WhereSection")
+				.old("use a `where` clause in `construct_runtime`")
+				.new(
+					"use `frame_system::Config` to set the `Block` type and delete this clause.
+				It is planned to be removed in December 2023",
+				)
+				.help_links(&["https://github.com/paritytech/substrate/pull/14437"])
+				.span(where_section.span)
+				.build_or_panic(),
 		)
-		});
+	});
 
 	let res = quote!(
 		#warning
@@ -659,7 +662,6 @@ fn decl_all_pallets<'a>(
 		#( #all_pallets_reversed_with_system_first )*
 	)
 }
-
 fn decl_pallet_runtime_setup(
 	runtime: &Ident,
 	pallet_declarations: &[Pallet],
@@ -667,6 +669,7 @@ fn decl_pallet_runtime_setup(
 ) -> TokenStream2 {
 	let names = pallet_declarations.iter().map(|d| &d.name).collect::<Vec<_>>();
 	let name_strings = pallet_declarations.iter().map(|d| d.name.to_string());
+	let name_hashes = pallet_declarations.iter().map(|d| two128_str(&d.name.to_string()));
 	let module_names = pallet_declarations.iter().map(|d| d.path.module_name());
 	let indices = pallet_declarations.iter().map(|pallet| pallet.index as usize);
 	let pallet_structs = pallet_declarations
@@ -699,6 +702,7 @@ fn decl_pallet_runtime_setup(
 		pub struct PalletInfo;
 
 		impl #scrate::traits::PalletInfo for PalletInfo {
+
 			fn index<P: 'static>() -> Option<usize> {
 				let type_id = #scrate::__private::sp_std::any::TypeId::of::<P>();
 				#(
@@ -723,6 +727,18 @@ fn decl_pallet_runtime_setup(
 				None
 			}
 
+			fn name_hash<P: 'static>() -> Option<[u8; 16]> {
+				let type_id = #scrate::__private::sp_std::any::TypeId::of::<P>();
+				#(
+					#pallet_attrs
+					if type_id == #scrate::__private::sp_std::any::TypeId::of::<#names>() {
+						return Some(#name_hashes)
+					}
+				)*
+
+				None
+			}
+
 			fn module_name<P: 'static>() -> Option<&'static str> {
 				let type_id = #scrate::__private::sp_std::any::TypeId::of::<P>();
 				#(
diff --git a/substrate/frame/support/procedural/src/lib.rs b/substrate/frame/support/procedural/src/lib.rs
index da4cb41fe4f2f..07b5a50da417b 100644
--- a/substrate/frame/support/procedural/src/lib.rs
+++ b/substrate/frame/support/procedural/src/lib.rs
@@ -34,7 +34,7 @@ mod transactional;
 mod tt_macro;
 
 use frame_support_procedural_tools::generate_crate_access_2018;
-use macro_magic::import_tokens_attr;
+use macro_magic::{import_tokens_attr, import_tokens_attr_verbatim};
 use proc_macro::TokenStream;
 use quote::{quote, ToTokens};
 use std::{cell::RefCell, str::FromStr};
@@ -751,7 +751,7 @@ pub fn storage_alias(attributes: TokenStream, input: TokenStream) -> TokenStream
 /// Items that lack a `syn::Ident` for whatever reason are first checked to see if they exist,
 /// verbatim, in the local/destination trait before they are copied over, so you should not need to
 /// worry about collisions between identical unnamed items.
-#[import_tokens_attr {
+#[import_tokens_attr_verbatim {
     format!(
         "{}::macro_magic",
         match generate_crate_access_2018("frame-support") {
@@ -864,7 +864,12 @@ pub fn register_default_impl(attrs: TokenStream, tokens: TokenStream) -> TokenSt
 	let item_impl = syn::parse_macro_input!(tokens as ItemImpl);
 
 	// internally wrap macro_magic's `#[export_tokens]` macro
-	match macro_magic::mm_core::export_tokens_internal(attrs, item_impl.to_token_stream(), true) {
+	match macro_magic::mm_core::export_tokens_internal(
+		attrs,
+		item_impl.to_token_stream(),
+		true,
+		false,
+	) {
 		Ok(tokens) => tokens.into(),
 		Err(err) => err.to_compile_error().into(),
 	}
@@ -1565,7 +1570,7 @@ pub fn pallet_section(attr: TokenStream, tokens: TokenStream) -> TokenStream {
 	let _mod = parse_macro_input!(tokens_clone as ItemMod);
 
 	// use macro_magic's export_tokens as the internal implementation otherwise
-	match macro_magic::mm_core::export_tokens_internal(attr, tokens, false) {
+	match macro_magic::mm_core::export_tokens_internal(attr, tokens, false, true) {
 		Ok(tokens) => tokens.into(),
 		Err(err) => err.to_compile_error().into(),
 	}
diff --git a/substrate/frame/support/procedural/src/pallet/expand/call.rs b/substrate/frame/support/procedural/src/pallet/expand/call.rs
index 3ed5509863e91..ed6335159cd6e 100644
--- a/substrate/frame/support/procedural/src/pallet/expand/call.rs
+++ b/substrate/frame/support/procedural/src/pallet/expand/call.rs
@@ -17,12 +17,14 @@
 
 use crate::{
 	pallet::{
+		expand::warnings::{weight_constant_warning, weight_witness_warning},
 		parse::call::{CallVariantDef, CallWeightDef},
 		Def,
 	},
 	COUNTER,
 };
 use proc_macro2::TokenStream as TokenStream2;
+use proc_macro_warning::Warning;
 use quote::{quote, ToTokens};
 use syn::spanned::Spanned;
 
@@ -68,7 +70,7 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream {
 			continue
 		}
 
-		let warning = proc_macro_warning::Warning::new_deprecated("ImplicitCallIndex")
+		let warning = Warning::new_deprecated("ImplicitCallIndex")
 			.index(call_index_warnings.len())
 			.old("use implicit call indices")
 			.new("ensure that all calls have a `pallet::call_index` attribute or put the pallet into `dev` mode")
@@ -77,7 +79,7 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream {
 				"https://github.com/paritytech/substrate/pull/11381"
 			])
 			.span(method.name.span())
-			.build();
+			.build_or_panic();
 		call_index_warnings.push(warning);
 	}
 
@@ -86,18 +88,12 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream {
 	for method in &methods {
 		match &method.weight {
 			CallWeightDef::DevModeDefault => fn_weight.push(syn::parse_quote!(0)),
-			CallWeightDef::Immediate(e @ syn::Expr::Lit(lit)) if !def.dev_mode => {
-				let warning = proc_macro_warning::Warning::new_deprecated("ConstantWeight")
-					.index(weight_warnings.len())
-					.old("use hard-coded constant as call weight")
-					.new("benchmark all calls or put the pallet into `dev` mode")
-					.help_link("https://github.com/paritytech/substrate/pull/13798")
-					.span(lit.span())
-					.build();
-				weight_warnings.push(warning);
+			CallWeightDef::Immediate(e) => {
+				weight_constant_warning(e, def.dev_mode, &mut weight_warnings);
+				weight_witness_warning(method, def.dev_mode, &mut weight_warnings);
+
 				fn_weight.push(e.into_token_stream());
 			},
-			CallWeightDef::Immediate(e) => fn_weight.push(e.into_token_stream()),
 			CallWeightDef::Inherited => {
 				let pallet_weight = def
 					.call
diff --git a/substrate/frame/support/procedural/src/pallet/expand/mod.rs b/substrate/frame/support/procedural/src/pallet/expand/mod.rs
index 2b998227c1d84..6f32e5697512f 100644
--- a/substrate/frame/support/procedural/src/pallet/expand/mod.rs
+++ b/substrate/frame/support/procedural/src/pallet/expand/mod.rs
@@ -34,6 +34,7 @@ mod store_trait;
 mod tt_default_parts;
 mod type_value;
 mod validate_unsigned;
+mod warnings;
 
 use crate::pallet::Def;
 use quote::ToTokens;
diff --git a/substrate/frame/support/procedural/src/pallet/expand/pallet_struct.rs b/substrate/frame/support/procedural/src/pallet/expand/pallet_struct.rs
index e519e34d1dfd9..c2102f0284dbe 100644
--- a/substrate/frame/support/procedural/src/pallet/expand/pallet_struct.rs
+++ b/substrate/frame/support/procedural/src/pallet/expand/pallet_struct.rs
@@ -246,6 +246,14 @@ pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream {
 						implemented by the runtime")
 			}
 
+			fn name_hash() -> [u8; 16] {
+				<
+					<T as #frame_system::Config>::PalletInfo as #frame_support::traits::PalletInfo
+				>::name_hash::<Self>()
+					.expect("Pallet is part of the runtime because pallet `Config` trait is \
+						implemented by the runtime")
+			}
+
 			fn module_name() -> &'static str {
 				<
 					<T as #frame_system::Config>::PalletInfo as #frame_support::traits::PalletInfo
diff --git a/substrate/frame/support/procedural/src/pallet/expand/storage.rs b/substrate/frame/support/procedural/src/pallet/expand/storage.rs
index c01f0f3926a69..e7f7cf548f0ea 100644
--- a/substrate/frame/support/procedural/src/pallet/expand/storage.rs
+++ b/substrate/frame/support/procedural/src/pallet/expand/storage.rs
@@ -18,7 +18,10 @@
 use crate::{
 	counter_prefix,
 	pallet::{
-		parse::storage::{Metadata, QueryKind, StorageDef, StorageGenerics},
+		parse::{
+			helper::two128_str,
+			storage::{Metadata, QueryKind, StorageDef, StorageGenerics},
+		},
 		Def,
 	},
 };
@@ -638,6 +641,7 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream {
 			Metadata::CountedMap { .. } => {
 				let counter_prefix_struct_ident = counter_prefix_ident(&storage_def.ident);
 				let counter_prefix_struct_const = counter_prefix(&prefix_struct_const);
+				let storage_prefix_hash = two128_str(&counter_prefix_struct_const);
 				quote::quote_spanned!(storage_def.attr_span =>
 					#(#cfg_attrs)*
 					#[doc(hidden)]
@@ -656,7 +660,19 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream {
 							>::name::<Pallet<#type_use_gen>>()
 								.expect("No name found for the pallet in the runtime! This usually means that the pallet wasn't added to `construct_runtime!`.")
 						}
+
+						fn pallet_prefix_hash() -> [u8; 16] {
+							<
+								<T as #frame_system::Config>::PalletInfo
+								as #frame_support::traits::PalletInfo
+							>::name_hash::<Pallet<#type_use_gen>>()
+								.expect("No name_hash found for the pallet in the runtime! This usually means that the pallet wasn't added to `construct_runtime!`.")
+						}
+
 						const STORAGE_PREFIX: &'static str = #counter_prefix_struct_const;
+						fn storage_prefix_hash() -> [u8; 16] {
+							#storage_prefix_hash
+						}
 					}
 					#(#cfg_attrs)*
 					impl<#type_impl_gen> #frame_support::storage::types::CountedStorageMapInstance
@@ -670,6 +686,7 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream {
 			Metadata::CountedNMap { .. } => {
 				let counter_prefix_struct_ident = counter_prefix_ident(&storage_def.ident);
 				let counter_prefix_struct_const = counter_prefix(&prefix_struct_const);
+				let storage_prefix_hash = two128_str(&counter_prefix_struct_const);
 				quote::quote_spanned!(storage_def.attr_span =>
 					#(#cfg_attrs)*
 					#[doc(hidden)]
@@ -688,7 +705,17 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream {
 							>::name::<Pallet<#type_use_gen>>()
 								.expect("No name found for the pallet in the runtime! This usually means that the pallet wasn't added to `construct_runtime!`.")
 						}
+						fn pallet_prefix_hash() -> [u8; 16] {
+							<
+								<T as #frame_system::Config>::PalletInfo
+								as #frame_support::traits::PalletInfo
+							>::name_hash::<Pallet<#type_use_gen>>()
+								.expect("No name_hash found for the pallet in the runtime! This usually means that the pallet wasn't added to `construct_runtime!`.")
+						}
 						const STORAGE_PREFIX: &'static str = #counter_prefix_struct_const;
+						fn storage_prefix_hash() -> [u8; 16] {
+							#storage_prefix_hash
+						}
 					}
 					#(#cfg_attrs)*
 					impl<#type_impl_gen> #frame_support::storage::types::CountedStorageNMapInstance
@@ -702,6 +729,7 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream {
 			_ => proc_macro2::TokenStream::default(),
 		};
 
+		let storage_prefix_hash = two128_str(&prefix_struct_const);
 		quote::quote_spanned!(storage_def.attr_span =>
 			#maybe_counter
 
@@ -722,7 +750,19 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream {
 					>::name::<Pallet<#type_use_gen>>()
 						.expect("No name found for the pallet in the runtime! This usually means that the pallet wasn't added to `construct_runtime!`.")
 				}
+
+				fn pallet_prefix_hash() -> [u8; 16] {
+					<
+						<T as #frame_system::Config>::PalletInfo
+						as #frame_support::traits::PalletInfo
+					>::name_hash::<Pallet<#type_use_gen>>()
+						.expect("No name_hash found for the pallet in the runtime! This usually means that the pallet wasn't added to `construct_runtime!`.")
+				}
+
 				const STORAGE_PREFIX: &'static str = #prefix_struct_const;
+				fn storage_prefix_hash() -> [u8; 16] {
+					#storage_prefix_hash
+				}
 			}
 		)
 	});
diff --git a/substrate/frame/support/procedural/src/pallet/expand/warnings.rs b/substrate/frame/support/procedural/src/pallet/expand/warnings.rs
new file mode 100644
index 0000000000000..ae5890878a2f6
--- /dev/null
+++ b/substrate/frame/support/procedural/src/pallet/expand/warnings.rs
@@ -0,0 +1,102 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Generates warnings for undesirable pallet code.
+
+use crate::pallet::parse::call::{CallVariantDef, CallWeightDef};
+use proc_macro_warning::Warning;
+use syn::{
+	spanned::Spanned,
+	visit::{self, Visit},
+};
+
+/// Warn if any of the call arguments starts with a underscore and is used in a weight formula.
+pub(crate) fn weight_witness_warning(
+	method: &CallVariantDef,
+	dev_mode: bool,
+	warnings: &mut Vec<Warning>,
+) {
+	if dev_mode {
+		return
+	}
+	let CallWeightDef::Immediate(w) = &method.weight else {
+		return;
+	};
+
+	let partial_warning = Warning::new_deprecated("UncheckedWeightWitness")
+		.old("not check weight witness data")
+		.new("ensure that all witness data for weight calculation is checked before usage")
+		.help_link("https://github.com/paritytech/polkadot-sdk/pull/1818");
+
+	for (_, arg_ident, _) in method.args.iter() {
+		if !arg_ident.to_string().starts_with('_') || !contains_ident(w.clone(), &arg_ident) {
+			continue
+		}
+
+		let warning = partial_warning
+			.clone()
+			.index(warnings.len())
+			.span(arg_ident.span())
+			.build_or_panic();
+
+		warnings.push(warning);
+	}
+}
+
+/// Warn if the weight is a constant and the pallet not in `dev_mode`.
+pub(crate) fn weight_constant_warning(
+	weight: &syn::Expr,
+	dev_mode: bool,
+	warnings: &mut Vec<Warning>,
+) {
+	if dev_mode {
+		return
+	}
+	let syn::Expr::Lit(lit) = weight else {
+		return;
+	};
+
+	let warning = Warning::new_deprecated("ConstantWeight")
+		.index(warnings.len())
+		.old("use hard-coded constant as call weight")
+		.new("benchmark all calls or put the pallet into `dev` mode")
+		.help_link("https://github.com/paritytech/substrate/pull/13798")
+		.span(lit.span())
+		.build_or_panic();
+
+	warnings.push(warning);
+}
+
+/// Returns whether `expr` contains `ident`.
+fn contains_ident(mut expr: syn::Expr, ident: &syn::Ident) -> bool {
+	struct ContainsIdent {
+		ident: syn::Ident,
+		found: bool,
+	}
+
+	impl<'a> Visit<'a> for ContainsIdent {
+		fn visit_ident(&mut self, i: &syn::Ident) {
+			if *i == self.ident {
+				self.found = true;
+			}
+		}
+	}
+
+	let mut visitor = ContainsIdent { ident: ident.clone(), found: false };
+	visit::visit_expr(&mut visitor, &mut expr);
+	visitor.found
+}
diff --git a/substrate/frame/support/procedural/src/pallet/mod.rs b/substrate/frame/support/procedural/src/pallet/mod.rs
index 3618711051d7f..42d8272fb23ed 100644
--- a/substrate/frame/support/procedural/src/pallet/mod.rs
+++ b/substrate/frame/support/procedural/src/pallet/mod.rs
@@ -26,7 +26,7 @@
 //!   to user defined types. And also crate new types and implement block.
 
 mod expand;
-mod parse;
+pub(crate) mod parse;
 
 pub use parse::{composite::keyword::CompositeKeyword, Def};
 use syn::spanned::Spanned;
diff --git a/substrate/frame/support/procedural/src/pallet/parse/helper.rs b/substrate/frame/support/procedural/src/pallet/parse/helper.rs
index bfa19d8ddc39b..446ec203d2ba5 100644
--- a/substrate/frame/support/procedural/src/pallet/parse/helper.rs
+++ b/substrate/frame/support/procedural/src/pallet/parse/helper.rs
@@ -15,7 +15,8 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-use quote::ToTokens;
+use proc_macro2::TokenStream;
+use quote::{quote, ToTokens};
 use syn::spanned::Spanned;
 
 /// List of additional token to be used for parsing.
@@ -610,3 +611,16 @@ pub fn check_pallet_call_return_type(type_: &syn::Type) -> syn::Result<()> {
 
 	syn::parse2::<Checker>(type_.to_token_stream()).map(|_| ())
 }
+
+pub(crate) fn two128_str(s: &str) -> TokenStream {
+	bytes_to_array(sp_core_hashing::twox_128(s.as_bytes()).into_iter())
+}
+
+pub(crate) fn bytes_to_array(bytes: impl IntoIterator<Item = u8>) -> TokenStream {
+	let bytes = bytes.into_iter();
+
+	quote!(
+		[ #( #bytes ),* ]
+	)
+	.into()
+}
diff --git a/substrate/frame/support/procedural/src/storage_alias.rs b/substrate/frame/support/procedural/src/storage_alias.rs
index a3f21806e18b9..4903fd1c129c7 100644
--- a/substrate/frame/support/procedural/src/storage_alias.rs
+++ b/substrate/frame/support/procedural/src/storage_alias.rs
@@ -17,7 +17,7 @@
 
 //! Implementation of the `storage_alias` attribute macro.
 
-use crate::counter_prefix;
+use crate::{counter_prefix, pallet::parse::helper};
 use frame_support_procedural_tools::generate_crate_access_2018;
 use proc_macro2::{Span, TokenStream};
 use quote::{quote, ToTokens};
@@ -619,6 +619,7 @@ fn generate_storage_instance(
 	let counter_code = is_counted_map.then(|| {
 		let counter_name = Ident::new(&counter_prefix(&name_str), Span::call_site());
 		let counter_storage_name_str = counter_prefix(&storage_name_str);
+		let storage_prefix_hash = helper::two128_str(&counter_storage_name_str);
 
 		quote! {
 			#visibility struct #counter_name< #impl_generics >(
@@ -633,6 +634,9 @@ fn generate_storage_instance(
 				}
 
 				const STORAGE_PREFIX: &'static str = #counter_storage_name_str;
+				fn storage_prefix_hash() -> [u8; 16] {
+					#storage_prefix_hash
+				}
 			}
 
 			impl<#impl_generics> #crate_::storage::types::CountedStorageMapInstance
@@ -643,6 +647,8 @@ fn generate_storage_instance(
 		}
 	});
 
+	let storage_prefix_hash = helper::two128_str(&storage_name_str);
+
 	// Implement `StorageInstance` trait.
 	let code = quote! {
 		#[allow(non_camel_case_types)]
@@ -658,6 +664,9 @@ fn generate_storage_instance(
 			}
 
 			const STORAGE_PREFIX: &'static str = #storage_name_str;
+			fn storage_prefix_hash() -> [u8; 16] {
+				#storage_prefix_hash
+			}
 		}
 
 		#counter_code
diff --git a/substrate/frame/support/procedural/tools/Cargo.toml b/substrate/frame/support/procedural/tools/Cargo.toml
index 7589fa353d16a..fd42e18180d39 100644
--- a/substrate/frame/support/procedural/tools/Cargo.toml
+++ b/substrate/frame/support/procedural/tools/Cargo.toml
@@ -15,5 +15,5 @@ targets = ["x86_64-unknown-linux-gnu"]
 proc-macro-crate = "1.1.3"
 proc-macro2 = "1.0.56"
 quote = "1.0.28"
-syn = { version = "2.0.37", features = ["full", "visit", "extra-traits"] }
+syn = { version = "2.0.38", features = ["full", "visit", "extra-traits"] }
 frame-support-procedural-tools-derive = { path = "derive" }
diff --git a/substrate/frame/support/procedural/tools/derive/Cargo.toml b/substrate/frame/support/procedural/tools/derive/Cargo.toml
index 5bf67d43d06ed..06f8e0f3d537a 100644
--- a/substrate/frame/support/procedural/tools/derive/Cargo.toml
+++ b/substrate/frame/support/procedural/tools/derive/Cargo.toml
@@ -17,4 +17,4 @@ proc-macro = true
 [dependencies]
 proc-macro2 = "1.0.56"
 quote = { version = "1.0.28", features = ["proc-macro"] }
-syn = { version = "2.0.37", features = ["proc-macro", "full", "extra-traits", "parsing"] }
+syn = { version = "2.0.38", features = ["proc-macro", "full", "extra-traits", "parsing"] }
diff --git a/substrate/frame/support/src/storage/generator/double_map.rs b/substrate/frame/support/src/storage/generator/double_map.rs
index 00a3f1bc7c1ce..a4c1f58203e3c 100644
--- a/substrate/frame/support/src/storage/generator/double_map.rs
+++ b/substrate/frame/support/src/storage/generator/double_map.rs
@@ -33,7 +33,7 @@ use sp_std::prelude::*;
 ///
 /// Thus value for (key1, key2) is stored at:
 /// ```nocompile
-/// Twox128(module_prefix) ++ Twox128(storage_prefix) ++ Hasher1(encode(key1)) ++ Hasher2(encode(key2))
+/// Twox128(pallet_prefix) ++ Twox128(storage_prefix) ++ Hasher1(encode(key1)) ++ Hasher2(encode(key2))
 /// ```
 ///
 /// # Warning
@@ -53,18 +53,15 @@ pub trait StorageDoubleMap<K1: FullEncode, K2: FullEncode, V: FullCodec> {
 	/// Hasher for the second key.
 	type Hasher2: StorageHasher;
 
-	/// Module prefix. Used for generating final key.
-	fn module_prefix() -> &'static [u8];
+	/// Pallet prefix. Used for generating final key.
+	fn pallet_prefix() -> &'static [u8];
 
 	/// Storage prefix. Used for generating final key.
 	fn storage_prefix() -> &'static [u8];
 
-	/// The full prefix; just the hash of `module_prefix` concatenated to the hash of
+	/// The full prefix; just the hash of `pallet_prefix` concatenated to the hash of
 	/// `storage_prefix`.
-	fn prefix_hash() -> Vec<u8> {
-		let result = storage_prefix(Self::module_prefix(), Self::storage_prefix());
-		result.to_vec()
-	}
+	fn prefix_hash() -> [u8; 32];
 
 	/// Convert an optional value retrieved from storage to the type queried.
 	fn from_optional_value_to_query(v: Option<V>) -> Self::Query;
@@ -77,7 +74,7 @@ pub trait StorageDoubleMap<K1: FullEncode, K2: FullEncode, V: FullCodec> {
 	where
 		KArg1: EncodeLike<K1>,
 	{
-		let storage_prefix = storage_prefix(Self::module_prefix(), Self::storage_prefix());
+		let storage_prefix = storage_prefix(Self::pallet_prefix(), Self::storage_prefix());
 		let key_hashed = k1.using_encoded(Self::Hasher1::hash);
 
 		let mut final_key = Vec::with_capacity(storage_prefix.len() + key_hashed.as_ref().len());
@@ -94,7 +91,7 @@ pub trait StorageDoubleMap<K1: FullEncode, K2: FullEncode, V: FullCodec> {
 		KArg1: EncodeLike<K1>,
 		KArg2: EncodeLike<K2>,
 	{
-		let storage_prefix = storage_prefix(Self::module_prefix(), Self::storage_prefix());
+		let storage_prefix = storage_prefix(Self::pallet_prefix(), Self::storage_prefix());
 		let key1_hashed = k1.using_encoded(Self::Hasher1::hash);
 		let key2_hashed = k2.using_encoded(Self::Hasher2::hash);
 
@@ -334,7 +331,7 @@ where
 		key2: KeyArg2,
 	) -> Option<V> {
 		let old_key = {
-			let storage_prefix = storage_prefix(Self::module_prefix(), Self::storage_prefix());
+			let storage_prefix = storage_prefix(Self::pallet_prefix(), Self::storage_prefix());
 
 			let key1_hashed = key1.using_encoded(OldHasher1::hash);
 			let key2_hashed = key2.using_encoded(OldHasher2::hash);
@@ -419,7 +416,7 @@ where
 	}
 
 	fn iter() -> Self::Iterator {
-		let prefix = G::prefix_hash();
+		let prefix = G::prefix_hash().to_vec();
 		Self::Iterator {
 			prefix: prefix.clone(),
 			previous_key: prefix,
@@ -442,7 +439,7 @@ where
 	}
 
 	fn iter_keys() -> Self::FullKeyIterator {
-		let prefix = G::prefix_hash();
+		let prefix = G::prefix_hash().to_vec();
 		Self::FullKeyIterator {
 			prefix: prefix.clone(),
 			previous_key: prefix,
@@ -470,7 +467,7 @@ where
 	}
 
 	fn translate<O: Decode, F: FnMut(K1, K2, O) -> Option<V>>(mut f: F) {
-		let prefix = G::prefix_hash();
+		let prefix = G::prefix_hash().to_vec();
 		let mut previous_key = prefix.clone();
 		while let Some(next) =
 			sp_io::storage::next_key(&previous_key).filter(|n| n.starts_with(&prefix))
@@ -561,7 +558,7 @@ mod test_iterators {
 			type DoubleMap = self::frame_system::DoubleMap<Runtime>;
 
 			// All map iterator
-			let prefix = DoubleMap::prefix_hash();
+			let prefix = DoubleMap::prefix_hash().to_vec();
 
 			unhashed::put(&key_before_prefix(prefix.clone()), &1u64);
 			unhashed::put(&key_after_prefix(prefix.clone()), &1u64);
@@ -621,7 +618,7 @@ mod test_iterators {
 			assert_eq!(unhashed::get(&key_after_prefix(prefix.clone())), Some(1u64));
 
 			// Translate
-			let prefix = DoubleMap::prefix_hash();
+			let prefix = DoubleMap::prefix_hash().to_vec();
 
 			unhashed::put(&key_before_prefix(prefix.clone()), &1u64);
 			unhashed::put(&key_after_prefix(prefix.clone()), &1u64);
diff --git a/substrate/frame/support/src/storage/generator/map.rs b/substrate/frame/support/src/storage/generator/map.rs
index 1d2511e324dc6..b2919bff8d134 100644
--- a/substrate/frame/support/src/storage/generator/map.rs
+++ b/substrate/frame/support/src/storage/generator/map.rs
@@ -28,7 +28,7 @@ use sp_std::prelude::*;
 ///
 /// By default each key value is stored at:
 /// ```nocompile
-/// Twox128(module_prefix) ++ Twox128(storage_prefix) ++ Hasher(encode(key))
+/// Twox128(pallet_prefix) ++ Twox128(storage_prefix) ++ Hasher(encode(key))
 /// ```
 ///
 /// # Warning
@@ -42,18 +42,15 @@ pub trait StorageMap<K: FullEncode, V: FullCodec> {
 	/// Hasher. Used for generating final key.
 	type Hasher: StorageHasher;
 
-	/// Module prefix. Used for generating final key.
-	fn module_prefix() -> &'static [u8];
+	/// Pallet prefix. Used for generating final key.
+	fn pallet_prefix() -> &'static [u8];
 
 	/// Storage prefix. Used for generating final key.
 	fn storage_prefix() -> &'static [u8];
 
-	/// The full prefix; just the hash of `module_prefix` concatenated to the hash of
+	/// The full prefix; just the hash of `pallet_prefix` concatenated to the hash of
 	/// `storage_prefix`.
-	fn prefix_hash() -> Vec<u8> {
-		let result = storage_prefix(Self::module_prefix(), Self::storage_prefix());
-		result.to_vec()
-	}
+	fn prefix_hash() -> [u8; 32];
 
 	/// Convert an optional value retrieved from storage to the type queried.
 	fn from_optional_value_to_query(v: Option<V>) -> Self::Query;
@@ -66,7 +63,7 @@ pub trait StorageMap<K: FullEncode, V: FullCodec> {
 	where
 		KeyArg: EncodeLike<K>,
 	{
-		let storage_prefix = storage_prefix(Self::module_prefix(), Self::storage_prefix());
+		let storage_prefix = storage_prefix(Self::pallet_prefix(), Self::storage_prefix());
 		let key_hashed = key.using_encoded(Self::Hasher::hash);
 
 		let mut final_key = Vec::with_capacity(storage_prefix.len() + key_hashed.as_ref().len());
@@ -128,7 +125,7 @@ where
 
 	/// Enumerate all elements in the map.
 	fn iter() -> Self::Iterator {
-		let prefix = G::prefix_hash();
+		let prefix = G::prefix_hash().to_vec();
 		PrefixIterator {
 			prefix: prefix.clone(),
 			previous_key: prefix,
@@ -150,7 +147,7 @@ where
 
 	/// Enumerate all keys in the map.
 	fn iter_keys() -> Self::KeyIterator {
-		let prefix = G::prefix_hash();
+		let prefix = G::prefix_hash().to_vec();
 		KeyPrefixIterator {
 			prefix: prefix.clone(),
 			previous_key: prefix,
@@ -190,7 +187,7 @@ where
 		previous_key: Option<Vec<u8>>,
 		mut f: F,
 	) -> Option<Vec<u8>> {
-		let prefix = G::prefix_hash();
+		let prefix = G::prefix_hash().to_vec();
 		let previous_key = previous_key.unwrap_or_else(|| prefix.clone());
 
 		let current_key =
@@ -339,7 +336,7 @@ impl<K: FullEncode, V: FullCodec, G: StorageMap<K, V>> storage::StorageMap<K, V>
 
 	fn migrate_key<OldHasher: StorageHasher, KeyArg: EncodeLike<K>>(key: KeyArg) -> Option<V> {
 		let old_key = {
-			let storage_prefix = storage_prefix(Self::module_prefix(), Self::storage_prefix());
+			let storage_prefix = storage_prefix(Self::pallet_prefix(), Self::storage_prefix());
 			let key_hashed = key.using_encoded(OldHasher::hash);
 
 			let mut final_key =
@@ -398,7 +395,7 @@ mod test_iterators {
 			type Map = self::frame_system::Map<Runtime>;
 
 			// All map iterator
-			let prefix = Map::prefix_hash();
+			let prefix = Map::prefix_hash().to_vec();
 
 			unhashed::put(&key_before_prefix(prefix.clone()), &1u64);
 			unhashed::put(&key_after_prefix(prefix.clone()), &1u64);
@@ -420,7 +417,7 @@ mod test_iterators {
 			assert_eq!(unhashed::get(&key_after_prefix(prefix.clone())), Some(1u64));
 
 			// Translate
-			let prefix = Map::prefix_hash();
+			let prefix = Map::prefix_hash().to_vec();
 
 			unhashed::put(&key_before_prefix(prefix.clone()), &1u64);
 			unhashed::put(&key_after_prefix(prefix.clone()), &1u64);
diff --git a/substrate/frame/support/src/storage/generator/nmap.rs b/substrate/frame/support/src/storage/generator/nmap.rs
index 5d3d689aa98a6..4b49ad3eb38d4 100755
--- a/substrate/frame/support/src/storage/generator/nmap.rs
+++ b/substrate/frame/support/src/storage/generator/nmap.rs
@@ -61,18 +61,15 @@ pub trait StorageNMap<K: KeyGenerator, V: FullCodec> {
 	/// The type that get/take returns.
 	type Query;
 
-	/// Module prefix. Used for generating final key.
-	fn module_prefix() -> &'static [u8];
+	/// Pallet prefix. Used for generating final key.
+	fn pallet_prefix() -> &'static [u8];
 
 	/// Storage prefix. Used for generating final key.
 	fn storage_prefix() -> &'static [u8];
 
-	/// The full prefix; just the hash of `module_prefix` concatenated to the hash of
+	/// The full prefix; just the hash of `pallet_prefix` concatenated to the hash of
 	/// `storage_prefix`.
-	fn prefix_hash() -> Vec<u8> {
-		let result = storage_prefix(Self::module_prefix(), Self::storage_prefix());
-		result.to_vec()
-	}
+	fn prefix_hash() -> [u8; 32];
 
 	/// Convert an optional value retrieved from storage to the type queried.
 	fn from_optional_value_to_query(v: Option<V>) -> Self::Query;
@@ -85,7 +82,7 @@ pub trait StorageNMap<K: KeyGenerator, V: FullCodec> {
 	where
 		K: HasKeyPrefix<KP>,
 	{
-		let storage_prefix = storage_prefix(Self::module_prefix(), Self::storage_prefix());
+		let storage_prefix = storage_prefix(Self::pallet_prefix(), Self::storage_prefix());
 		let key_hashed = <K as HasKeyPrefix<KP>>::partial_key(key);
 
 		let mut final_key = Vec::with_capacity(storage_prefix.len() + key_hashed.len());
@@ -102,7 +99,7 @@ pub trait StorageNMap<K: KeyGenerator, V: FullCodec> {
 		KG: KeyGenerator,
 		KArg: EncodeLikeTuple<KG::KArg> + TupleToEncodedIter,
 	{
-		let storage_prefix = storage_prefix(Self::module_prefix(), Self::storage_prefix());
+		let storage_prefix = storage_prefix(Self::pallet_prefix(), Self::storage_prefix());
 		let key_hashed = KG::final_key(key);
 
 		let mut final_key = Vec::with_capacity(storage_prefix.len() + key_hashed.len());
@@ -299,7 +296,7 @@ where
 		KArg: EncodeLikeTuple<K::KArg> + TupleToEncodedIter,
 	{
 		let old_key = {
-			let storage_prefix = storage_prefix(Self::module_prefix(), Self::storage_prefix());
+			let storage_prefix = storage_prefix(Self::pallet_prefix(), Self::storage_prefix());
 			let key_hashed = K::migrate_key(&key, hash_fns);
 
 			let mut final_key = Vec::with_capacity(storage_prefix.len() + key_hashed.len());
@@ -386,11 +383,11 @@ impl<K: ReversibleKeyGenerator, V: FullCodec, G: StorageNMap<K, V>>
 	}
 
 	fn iter() -> Self::Iterator {
-		Self::iter_from(G::prefix_hash())
+		Self::iter_from(G::prefix_hash().to_vec())
 	}
 
 	fn iter_from(starting_raw_key: Vec<u8>) -> Self::Iterator {
-		let prefix = G::prefix_hash();
+		let prefix = G::prefix_hash().to_vec();
 		Self::Iterator {
 			prefix,
 			previous_key: starting_raw_key,
@@ -404,11 +401,11 @@ impl<K: ReversibleKeyGenerator, V: FullCodec, G: StorageNMap<K, V>>
 	}
 
 	fn iter_keys() -> Self::KeyIterator {
-		Self::iter_keys_from(G::prefix_hash())
+		Self::iter_keys_from(G::prefix_hash().to_vec())
 	}
 
 	fn iter_keys_from(starting_raw_key: Vec<u8>) -> Self::KeyIterator {
-		let prefix = G::prefix_hash();
+		let prefix = G::prefix_hash().to_vec();
 		Self::KeyIterator {
 			prefix,
 			previous_key: starting_raw_key,
@@ -427,7 +424,7 @@ impl<K: ReversibleKeyGenerator, V: FullCodec, G: StorageNMap<K, V>>
 	}
 
 	fn translate<O: Decode, F: FnMut(K::Key, O) -> Option<V>>(mut f: F) {
-		let prefix = G::prefix_hash();
+		let prefix = G::prefix_hash().to_vec();
 		let mut previous_key = prefix.clone();
 		while let Some(next) =
 			sp_io::storage::next_key(&previous_key).filter(|n| n.starts_with(&prefix))
@@ -537,7 +534,7 @@ mod test_iterators {
 			type NMap = self::frame_system::NMap<Runtime>;
 
 			// All map iterator
-			let prefix = NMap::prefix_hash();
+			let prefix = NMap::prefix_hash().to_vec();
 
 			unhashed::put(&key_before_prefix(prefix.clone()), &1u64);
 			unhashed::put(&key_after_prefix(prefix.clone()), &1u64);
@@ -594,7 +591,7 @@ mod test_iterators {
 			assert_eq!(unhashed::get(&key_after_prefix(prefix.clone())), Some(1u64));
 
 			// Translate
-			let prefix = NMap::prefix_hash();
+			let prefix = NMap::prefix_hash().to_vec();
 
 			unhashed::put(&key_before_prefix(prefix.clone()), &1u64);
 			unhashed::put(&key_after_prefix(prefix.clone()), &1u64);
diff --git a/substrate/frame/support/src/storage/generator/value.rs b/substrate/frame/support/src/storage/generator/value.rs
index 4ffe40bac53ca..21166b39467bb 100644
--- a/substrate/frame/support/src/storage/generator/value.rs
+++ b/substrate/frame/support/src/storage/generator/value.rs
@@ -25,14 +25,14 @@ use codec::{Decode, Encode, EncodeLike, FullCodec};
 ///
 /// By default value is stored at:
 /// ```nocompile
-/// Twox128(module_prefix) ++ Twox128(storage_prefix)
+/// Twox128(pallet_prefix) ++ Twox128(storage_prefix)
 /// ```
 pub trait StorageValue<T: FullCodec> {
 	/// The type that get/take returns.
 	type Query;
 
-	/// Module prefix. Used for generating final key.
-	fn module_prefix() -> &'static [u8];
+	/// Pallet prefix. Used for generating final key.
+	fn pallet_prefix() -> &'static [u8];
 
 	/// Storage prefix. Used for generating final key.
 	fn storage_prefix() -> &'static [u8];
@@ -44,9 +44,7 @@ pub trait StorageValue<T: FullCodec> {
 	fn from_query_to_optional_value(v: Self::Query) -> Option<T>;
 
 	/// Generate the full key used in top storage.
-	fn storage_value_final_key() -> [u8; 32] {
-		crate::storage::storage_prefix(Self::module_prefix(), Self::storage_prefix())
-	}
+	fn storage_value_final_key() -> [u8; 32];
 }
 
 impl<T: FullCodec, G: StorageValue<T>> storage::StorageValue<T> for G {
@@ -97,10 +95,6 @@ impl<T: FullCodec, G: StorageValue<T>> storage::StorageValue<T> for G {
 		}
 	}
 
-	fn kill() {
-		unhashed::kill(&Self::storage_value_final_key())
-	}
-
 	fn mutate<R, F: FnOnce(&mut G::Query) -> R>(f: F) -> R {
 		Self::try_mutate(|v| Ok::<R, Never>(f(v))).expect("`Never` can not be constructed; qed")
 	}
@@ -142,6 +136,10 @@ impl<T: FullCodec, G: StorageValue<T>> storage::StorageValue<T> for G {
 		ret
 	}
 
+	fn kill() {
+		unhashed::kill(&Self::storage_value_final_key())
+	}
+
 	fn take() -> G::Query {
 		let key = Self::storage_value_final_key();
 		let value = unhashed::get(&key);
diff --git a/substrate/frame/support/src/storage/mod.rs b/substrate/frame/support/src/storage/mod.rs
index d52908fa366c6..851b0687bd122 100644
--- a/substrate/frame/support/src/storage/mod.rs
+++ b/substrate/frame/support/src/storage/mod.rs
@@ -191,7 +191,7 @@ pub trait StorageList<V: FullCodec> {
 
 	/// Append a single element.
 	///
-	/// Should not be called repeatedly; use `append_many` instead.  
+	/// Should not be called repeatedly; use `append_many` instead.
 	/// Worst case linear `O(len)` with `len` being the number if elements in the list.
 	fn append_one<EncodeLikeValue>(item: EncodeLikeValue)
 	where
@@ -202,7 +202,7 @@ pub trait StorageList<V: FullCodec> {
 
 	/// Append many elements.
 	///
-	/// Should not be called repeatedly; use `appender` instead.  
+	/// Should not be called repeatedly; use `appender` instead.
 	/// Worst case linear `O(len + items.count())` with `len` beings the number if elements in the
 	/// list.
 	fn append_many<EncodeLikeValue, I>(items: I)
@@ -1273,15 +1273,15 @@ impl<T> Iterator for ChildTriePrefixIterator<T> {
 
 /// Trait for storage types that store all its value after a unique prefix.
 pub trait StoragePrefixedContainer {
-	/// Module prefix. Used for generating final key.
-	fn module_prefix() -> &'static [u8];
+	/// Pallet prefix. Used for generating final key.
+	fn pallet_prefix() -> &'static [u8];
 
 	/// Storage prefix. Used for generating final key.
 	fn storage_prefix() -> &'static [u8];
 
 	/// Final full prefix that prefixes all keys.
 	fn final_prefix() -> [u8; 32] {
-		crate::storage::storage_prefix(Self::module_prefix(), Self::storage_prefix())
+		crate::storage::storage_prefix(Self::pallet_prefix(), Self::storage_prefix())
 	}
 }
 
@@ -1289,18 +1289,18 @@ pub trait StoragePrefixedContainer {
 ///
 /// By default the final prefix is:
 /// ```nocompile
-/// Twox128(module_prefix) ++ Twox128(storage_prefix)
+/// Twox128(pallet_prefix) ++ Twox128(storage_prefix)
 /// ```
 pub trait StoragePrefixedMap<Value: FullCodec> {
-	/// Module prefix. Used for generating final key.
-	fn module_prefix() -> &'static [u8]; // TODO move to StoragePrefixedContainer
+	/// Pallet prefix. Used for generating final key.
+	fn pallet_prefix() -> &'static [u8]; // TODO move to StoragePrefixedContainer
 
 	/// Storage prefix. Used for generating final key.
 	fn storage_prefix() -> &'static [u8];
 
 	/// Final full prefix that prefixes all keys.
 	fn final_prefix() -> [u8; 32] {
-		crate::storage::storage_prefix(Self::module_prefix(), Self::storage_prefix())
+		crate::storage::storage_prefix(Self::pallet_prefix(), Self::storage_prefix())
 	}
 
 	/// Remove all values in the overlay and up to `limit` in the backend.
@@ -1624,7 +1624,7 @@ mod test {
 		TestExternalities::default().execute_with(|| {
 			struct MyStorage;
 			impl StoragePrefixedMap<u64> for MyStorage {
-				fn module_prefix() -> &'static [u8] {
+				fn pallet_prefix() -> &'static [u8] {
 					b"MyModule"
 				}
 
@@ -1701,7 +1701,7 @@ mod test {
 			impl generator::StorageValue<Digest> for Storage {
 				type Query = Digest;
 
-				fn module_prefix() -> &'static [u8] {
+				fn pallet_prefix() -> &'static [u8] {
 					b"MyModule"
 				}
 
@@ -1716,6 +1716,10 @@ mod test {
 				fn from_query_to_optional_value(v: Self::Query) -> Option<Digest> {
 					Some(v)
 				}
+
+				fn storage_value_final_key() -> [u8; 32] {
+					storage_prefix(Self::pallet_prefix(), Self::storage_prefix())
+				}
 			}
 
 			Storage::append(DigestItem::Other(Vec::new()));
@@ -1736,7 +1740,7 @@ mod test {
 				type Query = u64;
 				type Hasher = Twox64Concat;
 
-				fn module_prefix() -> &'static [u8] {
+				fn pallet_prefix() -> &'static [u8] {
 					b"MyModule"
 				}
 
@@ -1744,6 +1748,10 @@ mod test {
 					b"MyStorageMap"
 				}
 
+				fn prefix_hash() -> [u8; 32] {
+					storage_prefix(Self::pallet_prefix(), Self::storage_prefix())
+				}
+
 				fn from_optional_value_to_query(v: Option<u64>) -> Self::Query {
 					v.unwrap_or_default()
 				}
diff --git a/substrate/frame/support/src/storage/types/counted_map.rs b/substrate/frame/support/src/storage/types/counted_map.rs
index 5b750a74098b8..50e2c678248c9 100644
--- a/substrate/frame/support/src/storage/types/counted_map.rs
+++ b/substrate/frame/support/src/storage/types/counted_map.rs
@@ -107,7 +107,7 @@ where
 	/// The prefix used to generate the key of the map.
 	pub fn map_storage_final_prefix() -> Vec<u8> {
 		use crate::storage::generator::StorageMap;
-		<Self as MapWrapper>::Map::prefix_hash()
+		<Self as MapWrapper>::Map::prefix_hash().to_vec()
 	}
 
 	/// Get the storage key used to fetch a value corresponding to a specific key.
diff --git a/substrate/frame/support/src/storage/types/counted_nmap.rs b/substrate/frame/support/src/storage/types/counted_nmap.rs
index 54f8e57cf242d..5da31c0592254 100644
--- a/substrate/frame/support/src/storage/types/counted_nmap.rs
+++ b/substrate/frame/support/src/storage/types/counted_nmap.rs
@@ -104,7 +104,7 @@ where
 	/// The prefix used to generate the key of the map.
 	pub fn map_storage_final_prefix() -> Vec<u8> {
 		use crate::storage::generator::StorageNMap;
-		<Self as MapWrapper>::Map::prefix_hash()
+		<Self as MapWrapper>::Map::prefix_hash().to_vec()
 	}
 
 	/// Get the storage key used to fetch a value corresponding to a specific key.
diff --git a/substrate/frame/support/src/storage/types/double_map.rs b/substrate/frame/support/src/storage/types/double_map.rs
index e787921841032..519ffcbafadee 100644
--- a/substrate/frame/support/src/storage/types/double_map.rs
+++ b/substrate/frame/support/src/storage/types/double_map.rs
@@ -117,12 +117,17 @@ where
 	type Query = QueryKind::Query;
 	type Hasher1 = Hasher1;
 	type Hasher2 = Hasher2;
-	fn module_prefix() -> &'static [u8] {
+	fn pallet_prefix() -> &'static [u8] {
 		Prefix::pallet_prefix().as_bytes()
 	}
+
 	fn storage_prefix() -> &'static [u8] {
 		Prefix::STORAGE_PREFIX.as_bytes()
 	}
+	fn prefix_hash() -> [u8; 32] {
+		Prefix::prefix_hash()
+	}
+
 	fn from_optional_value_to_query(v: Option<Value>) -> Self::Query {
 		QueryKind::from_optional_value_to_query(v)
 	}
@@ -145,8 +150,8 @@ where
 	OnEmpty: Get<QueryKind::Query> + 'static,
 	MaxValues: Get<Option<u32>>,
 {
-	fn module_prefix() -> &'static [u8] {
-		<Self as crate::storage::generator::StorageDoubleMap<Key1, Key2, Value>>::module_prefix()
+	fn pallet_prefix() -> &'static [u8] {
+		<Self as crate::storage::generator::StorageDoubleMap<Key1, Key2, Value>>::pallet_prefix()
 	}
 	fn storage_prefix() -> &'static [u8] {
 		<Self as crate::storage::generator::StorageDoubleMap<Key1, Key2, Value>>::storage_prefix()
@@ -691,7 +696,7 @@ where
 {
 	fn storage_info() -> Vec<StorageInfo> {
 		vec![StorageInfo {
-			pallet_name: Self::module_prefix().to_vec(),
+			pallet_name: Self::pallet_prefix().to_vec(),
 			storage_name: Self::storage_prefix().to_vec(),
 			prefix: Self::final_prefix().to_vec(),
 			max_values: MaxValues::get(),
@@ -722,7 +727,7 @@ where
 {
 	fn partial_storage_info() -> Vec<StorageInfo> {
 		vec![StorageInfo {
-			pallet_name: Self::module_prefix().to_vec(),
+			pallet_name: Self::pallet_prefix().to_vec(),
 			storage_name: Self::storage_prefix().to_vec(),
 			prefix: Self::final_prefix().to_vec(),
 			max_values: MaxValues::get(),
diff --git a/substrate/frame/support/src/storage/types/map.rs b/substrate/frame/support/src/storage/types/map.rs
index 816b90162f644..7f936a8a35a61 100644
--- a/substrate/frame/support/src/storage/types/map.rs
+++ b/substrate/frame/support/src/storage/types/map.rs
@@ -83,12 +83,15 @@ where
 {
 	type Query = QueryKind::Query;
 	type Hasher = Hasher;
-	fn module_prefix() -> &'static [u8] {
+	fn pallet_prefix() -> &'static [u8] {
 		Prefix::pallet_prefix().as_bytes()
 	}
 	fn storage_prefix() -> &'static [u8] {
 		Prefix::STORAGE_PREFIX.as_bytes()
 	}
+	fn prefix_hash() -> [u8; 32] {
+		Prefix::prefix_hash()
+	}
 	fn from_optional_value_to_query(v: Option<Value>) -> Self::Query {
 		QueryKind::from_optional_value_to_query(v)
 	}
@@ -108,8 +111,8 @@ where
 	OnEmpty: Get<QueryKind::Query> + 'static,
 	MaxValues: Get<Option<u32>>,
 {
-	fn module_prefix() -> &'static [u8] {
-		<Self as crate::storage::generator::StorageMap<Key, Value>>::module_prefix()
+	fn pallet_prefix() -> &'static [u8] {
+		<Self as crate::storage::generator::StorageMap<Key, Value>>::pallet_prefix()
 	}
 	fn storage_prefix() -> &'static [u8] {
 		<Self as crate::storage::generator::StorageMap<Key, Value>>::storage_prefix()
@@ -469,7 +472,7 @@ where
 {
 	fn storage_info() -> Vec<StorageInfo> {
 		vec![StorageInfo {
-			pallet_name: Self::module_prefix().to_vec(),
+			pallet_name: Self::pallet_prefix().to_vec(),
 			storage_name: Self::storage_prefix().to_vec(),
 			prefix: Self::final_prefix().to_vec(),
 			max_values: MaxValues::get(),
@@ -497,7 +500,7 @@ where
 {
 	fn partial_storage_info() -> Vec<StorageInfo> {
 		vec![StorageInfo {
-			pallet_name: Self::module_prefix().to_vec(),
+			pallet_name: Self::pallet_prefix().to_vec(),
 			storage_name: Self::storage_prefix().to_vec(),
 			prefix: Self::final_prefix().to_vec(),
 			max_values: MaxValues::get(),
diff --git a/substrate/frame/support/src/storage/types/nmap.rs b/substrate/frame/support/src/storage/types/nmap.rs
index e9a4b12dd43a1..406fd42eaf7b3 100755
--- a/substrate/frame/support/src/storage/types/nmap.rs
+++ b/substrate/frame/support/src/storage/types/nmap.rs
@@ -72,12 +72,15 @@ where
 	MaxValues: Get<Option<u32>>,
 {
 	type Query = QueryKind::Query;
-	fn module_prefix() -> &'static [u8] {
+	fn pallet_prefix() -> &'static [u8] {
 		Prefix::pallet_prefix().as_bytes()
 	}
 	fn storage_prefix() -> &'static [u8] {
 		Prefix::STORAGE_PREFIX.as_bytes()
 	}
+	fn prefix_hash() -> [u8; 32] {
+		Prefix::prefix_hash()
+	}
 	fn from_optional_value_to_query(v: Option<Value>) -> Self::Query {
 		QueryKind::from_optional_value_to_query(v)
 	}
@@ -96,8 +99,8 @@ where
 	OnEmpty: Get<QueryKind::Query> + 'static,
 	MaxValues: Get<Option<u32>>,
 {
-	fn module_prefix() -> &'static [u8] {
-		<Self as crate::storage::generator::StorageNMap<Key, Value>>::module_prefix()
+	fn pallet_prefix() -> &'static [u8] {
+		<Self as crate::storage::generator::StorageNMap<Key, Value>>::pallet_prefix()
 	}
 	fn storage_prefix() -> &'static [u8] {
 		<Self as crate::storage::generator::StorageNMap<Key, Value>>::storage_prefix()
@@ -581,7 +584,7 @@ where
 {
 	fn storage_info() -> Vec<StorageInfo> {
 		vec![StorageInfo {
-			pallet_name: Self::module_prefix().to_vec(),
+			pallet_name: Self::pallet_prefix().to_vec(),
 			storage_name: Self::storage_prefix().to_vec(),
 			prefix: Self::final_prefix().to_vec(),
 			max_values: MaxValues::get(),
@@ -607,7 +610,7 @@ where
 {
 	fn partial_storage_info() -> Vec<StorageInfo> {
 		vec![StorageInfo {
-			pallet_name: Self::module_prefix().to_vec(),
+			pallet_name: Self::pallet_prefix().to_vec(),
 			storage_name: Self::storage_prefix().to_vec(),
 			prefix: Self::final_prefix().to_vec(),
 			max_values: MaxValues::get(),
diff --git a/substrate/frame/support/src/storage/types/value.rs b/substrate/frame/support/src/storage/types/value.rs
index 3c7f24715ac94..3e1f2fe9551d3 100644
--- a/substrate/frame/support/src/storage/types/value.rs
+++ b/substrate/frame/support/src/storage/types/value.rs
@@ -49,7 +49,7 @@ where
 	OnEmpty: crate::traits::Get<QueryKind::Query> + 'static,
 {
 	type Query = QueryKind::Query;
-	fn module_prefix() -> &'static [u8] {
+	fn pallet_prefix() -> &'static [u8] {
 		Prefix::pallet_prefix().as_bytes()
 	}
 	fn storage_prefix() -> &'static [u8] {
@@ -61,6 +61,9 @@ where
 	fn from_query_to_optional_value(v: Self::Query) -> Option<Value> {
 		QueryKind::from_query_to_optional_value(v)
 	}
+	fn storage_value_final_key() -> [u8; 32] {
+		Prefix::prefix_hash()
+	}
 }
 
 impl<Prefix, Value, QueryKind, OnEmpty> StorageValue<Prefix, Value, QueryKind, OnEmpty>
@@ -251,7 +254,7 @@ where
 {
 	fn storage_info() -> Vec<StorageInfo> {
 		vec![StorageInfo {
-			pallet_name: Self::module_prefix().to_vec(),
+			pallet_name: Self::pallet_prefix().to_vec(),
 			storage_name: Self::storage_prefix().to_vec(),
 			prefix: Self::hashed_key().to_vec(),
 			max_values: Some(1),
@@ -271,7 +274,7 @@ where
 {
 	fn partial_storage_info() -> Vec<StorageInfo> {
 		vec![StorageInfo {
-			pallet_name: Self::module_prefix().to_vec(),
+			pallet_name: Self::pallet_prefix().to_vec(),
 			storage_name: Self::storage_prefix().to_vec(),
 			prefix: Self::hashed_key().to_vec(),
 			max_values: Some(1),
diff --git a/substrate/frame/support/src/tests/storage_alias.rs b/substrate/frame/support/src/tests/storage_alias.rs
index 05ea1b5f712c6..6fc5cfefdad19 100644
--- a/substrate/frame/support/src/tests/storage_alias.rs
+++ b/substrate/frame/support/src/tests/storage_alias.rs
@@ -112,7 +112,7 @@ fn verbatim_attribute() {
 		assert_eq!(1, Value::get().unwrap());
 
 		// The prefix is the one we declared above.
-		assert_eq!(&b"Test"[..], Value::module_prefix());
+		assert_eq!(&b"Test"[..], Value::pallet_prefix());
 	});
 }
 
@@ -130,7 +130,7 @@ fn pallet_name_attribute() {
 
 		// The prefix is the pallet name. In this case the pallet name is `System` as declared in
 		// `construct_runtime!`.
-		assert_eq!(&b"System"[..], Value::<Runtime>::module_prefix());
+		assert_eq!(&b"System"[..], Value::<Runtime>::pallet_prefix());
 	});
 }
 
@@ -154,7 +154,7 @@ fn dynamic_attribute() {
 		assert_eq!(1, Value::<Prefix>::get().unwrap());
 
 		// The prefix is the one we declared above.
-		assert_eq!(&b"Hello"[..], Value::<Prefix>::module_prefix());
+		assert_eq!(&b"Hello"[..], Value::<Prefix>::pallet_prefix());
 	});
 }
 
@@ -166,13 +166,13 @@ fn storage_alias_guess() {
 		#[crate::storage_alias]
 		pub type Value = StorageValue<Test, u32>;
 
-		assert_eq!(&b"Test"[..], Value::module_prefix());
+		assert_eq!(&b"Test"[..], Value::pallet_prefix());
 
 		// The macro will use the pallet name as prefix.
 		#[crate::storage_alias]
 		pub type PalletValue<T: Config> = StorageValue<Pallet<T>, u32>;
 
-		assert_eq!(&b"System"[..], PalletValue::<Runtime>::module_prefix());
+		assert_eq!(&b"System"[..], PalletValue::<Runtime>::pallet_prefix());
 	});
 }
 
diff --git a/substrate/frame/support/src/traits/metadata.rs b/substrate/frame/support/src/traits/metadata.rs
index 85d8f9a5a74e0..bd29b60091613 100644
--- a/substrate/frame/support/src/traits/metadata.rs
+++ b/substrate/frame/support/src/traits/metadata.rs
@@ -31,6 +31,8 @@ pub trait PalletInfo {
 	fn index<P: 'static>() -> Option<usize>;
 	/// Convert the given pallet `P` into its name as configured in the runtime.
 	fn name<P: 'static>() -> Option<&'static str>;
+	/// The two128 hash of name.
+	fn name_hash<P: 'static>() -> Option<[u8; 16]>;
 	/// Convert the given pallet `P` into its Rust module name as used in `construct_runtime!`.
 	fn module_name<P: 'static>() -> Option<&'static str>;
 	/// Convert the given pallet `P` into its containing crate version.
@@ -59,6 +61,8 @@ pub trait PalletInfoAccess {
 	fn index() -> usize;
 	/// Name of the pallet as configured in the runtime.
 	fn name() -> &'static str;
+	/// Two128 hash of name.
+	fn name_hash() -> [u8; 16];
 	/// Name of the Rust module containing the pallet.
 	fn module_name() -> &'static str;
 	/// Version of the crate containing the pallet.
@@ -281,6 +285,7 @@ pub trait GetStorageVersion {
 #[cfg(test)]
 mod tests {
 	use super::*;
+	use sp_core::twox_128;
 
 	struct Pallet1;
 	impl PalletInfoAccess for Pallet1 {
@@ -290,6 +295,9 @@ mod tests {
 		fn name() -> &'static str {
 			"Pallet1"
 		}
+		fn name_hash() -> [u8; 16] {
+			twox_128(Self::name().as_bytes())
+		}
 		fn module_name() -> &'static str {
 			"pallet1"
 		}
@@ -305,6 +313,11 @@ mod tests {
 		fn name() -> &'static str {
 			"Pallet2"
 		}
+
+		fn name_hash() -> [u8; 16] {
+			twox_128(Self::name().as_bytes())
+		}
+
 		fn module_name() -> &'static str {
 			"pallet2"
 		}
diff --git a/substrate/frame/support/src/traits/storage.rs b/substrate/frame/support/src/traits/storage.rs
index e0ce1c0fbd317..fe1b9bf13bb02 100644
--- a/substrate/frame/support/src/traits/storage.rs
+++ b/substrate/frame/support/src/traits/storage.rs
@@ -61,8 +61,35 @@ pub trait StorageInstance {
 	/// Prefix of a pallet to isolate it from other pallets.
 	fn pallet_prefix() -> &'static str;
 
+	/// Return the prefix hash of pallet instance.
+	///
+	/// NOTE: This hash must be `twox_128(pallet_prefix())`.
+	/// Should not impl this function by hand. Only use the default or macro generated impls.
+	fn pallet_prefix_hash() -> [u8; 16] {
+		sp_io::hashing::twox_128(Self::pallet_prefix().as_bytes())
+	}
+
 	/// Prefix given to a storage to isolate from other storages in the pallet.
 	const STORAGE_PREFIX: &'static str;
+
+	/// Return the prefix hash of storage instance.
+	///
+	/// NOTE: This hash must be `twox_128(STORAGE_PREFIX)`.
+	fn storage_prefix_hash() -> [u8; 16] {
+		sp_io::hashing::twox_128(Self::STORAGE_PREFIX.as_bytes())
+	}
+
+	/// Return the prefix hash of instance.
+	///
+	/// NOTE: This hash must be `twox_128(pallet_prefix())++twox_128(STORAGE_PREFIX)`.
+	/// Should not impl this function by hand. Only use the default or macro generated impls.
+	fn prefix_hash() -> [u8; 32] {
+		let mut final_key = [0u8; 32];
+		final_key[..16].copy_from_slice(&Self::pallet_prefix_hash());
+		final_key[16..].copy_from_slice(&Self::storage_prefix_hash());
+
+		final_key
+	}
 }
 
 /// Metadata about storage from the runtime.
diff --git a/substrate/frame/support/src/traits/tokens.rs b/substrate/frame/support/src/traits/tokens.rs
index 253b49c6671f8..3635311e64357 100644
--- a/substrate/frame/support/src/traits/tokens.rs
+++ b/substrate/frame/support/src/traits/tokens.rs
@@ -31,6 +31,7 @@ pub mod pay;
 pub use misc::{
 	AssetId, Balance, BalanceStatus, ConversionFromAssetBalance, ConversionToAssetBalance,
 	ConvertRank, DepositConsequence, ExistenceRequirement, Fortitude, GetSalary, Locker, Precision,
-	Preservation, Provenance, Restriction, WithdrawConsequence, WithdrawReasons,
+	Preservation, Provenance, Restriction, UnityAssetBalanceConversion, WithdrawConsequence,
+	WithdrawReasons,
 };
 pub use pay::{Pay, PayFromAccount, PaymentStatus};
diff --git a/substrate/frame/support/src/traits/tokens/misc.rs b/substrate/frame/support/src/traits/tokens/misc.rs
index e8587be101794..fd497bc4eda6f 100644
--- a/substrate/frame/support/src/traits/tokens/misc.rs
+++ b/substrate/frame/support/src/traits/tokens/misc.rs
@@ -277,6 +277,26 @@ pub trait ConversionFromAssetBalance<AssetBalance, AssetId, OutBalance> {
 		balance: AssetBalance,
 		asset_id: AssetId,
 	) -> Result<OutBalance, Self::Error>;
+	/// Ensures that a conversion for the `asset_id` will be successful if done immediately after
+	/// this call.
+	#[cfg(feature = "runtime-benchmarks")]
+	fn ensure_successful(asset_id: AssetId);
+}
+
+/// Implements [`ConversionFromAssetBalance`], enabling a 1:1 conversion of the asset balance
+/// value to the balance.
+pub struct UnityAssetBalanceConversion;
+impl<AssetBalance, AssetId, OutBalance>
+	ConversionFromAssetBalance<AssetBalance, AssetId, OutBalance> for UnityAssetBalanceConversion
+where
+	AssetBalance: Into<OutBalance>,
+{
+	type Error = ();
+	fn from_asset_balance(balance: AssetBalance, _: AssetId) -> Result<OutBalance, Self::Error> {
+		Ok(balance.into())
+	}
+	#[cfg(feature = "runtime-benchmarks")]
+	fn ensure_successful(_: AssetId) {}
 }
 
 /// Trait to handle NFT locking mechanism to ensure interactions with the asset can be implemented
diff --git a/substrate/frame/support/src/traits/tokens/pay.rs b/substrate/frame/support/src/traits/tokens/pay.rs
index 78f8e7b873480..18af7e5e54838 100644
--- a/substrate/frame/support/src/traits/tokens/pay.rs
+++ b/substrate/frame/support/src/traits/tokens/pay.rs
@@ -23,7 +23,7 @@ use sp_core::{RuntimeDebug, TypedGet};
 use sp_runtime::DispatchError;
 use sp_std::fmt::Debug;
 
-use super::{fungible, Balance, Preservation::Expendable};
+use super::{fungible, fungibles, Balance, Preservation::Expendable};
 
 /// Can be implemented by `PayFromAccount` using a `fungible` impl, but can also be implemented with
 /// XCM/MultiAsset and made generic over assets.
@@ -107,3 +107,36 @@ impl<A: TypedGet, F: fungible::Mutate<A::Type>> Pay for PayFromAccount<F, A> {
 	#[cfg(feature = "runtime-benchmarks")]
 	fn ensure_concluded(_: Self::Id) {}
 }
+
+/// Simple implementation of `Pay` for assets which makes a payment from a "pot" - i.e. a single
+/// account.
+pub struct PayAssetFromAccount<F, A>(sp_std::marker::PhantomData<(F, A)>);
+impl<A, F> frame_support::traits::tokens::Pay for PayAssetFromAccount<F, A>
+where
+	A: TypedGet,
+	F: fungibles::Mutate<A::Type> + fungibles::Create<A::Type>,
+{
+	type Balance = F::Balance;
+	type Beneficiary = A::Type;
+	type AssetKind = F::AssetId;
+	type Id = ();
+	type Error = DispatchError;
+	fn pay(
+		who: &Self::Beneficiary,
+		asset: Self::AssetKind,
+		amount: Self::Balance,
+	) -> Result<Self::Id, Self::Error> {
+		<F as fungibles::Mutate<_>>::transfer(asset, &A::get(), who, amount, Expendable)?;
+		Ok(())
+	}
+	fn check_payment(_: ()) -> PaymentStatus {
+		PaymentStatus::Success
+	}
+	#[cfg(feature = "runtime-benchmarks")]
+	fn ensure_successful(_: &Self::Beneficiary, asset: Self::AssetKind, amount: Self::Balance) {
+		<F as fungibles::Create<_>>::create(asset.clone(), A::get(), true, amount).unwrap();
+		<F as fungibles::Mutate<_>>::mint_into(asset, &A::get(), amount).unwrap();
+	}
+	#[cfg(feature = "runtime-benchmarks")]
+	fn ensure_concluded(_: Self::Id) {}
+}
diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr
index cc2c2e160095d..08954bb6ab5c5 100644
--- a/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr
+++ b/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr
@@ -148,7 +148,11 @@ error[E0277]: the trait bound `Runtime: Config` is not satisfied in `frame_syste
    | ||_- in this macro invocation
 ...  |
    |
-   = note: required because it appears within the type `Event<Runtime>`
+note: required because it appears within the type `Event<Runtime>`
+  --> $WORKSPACE/substrate/frame/system/src/lib.rs
+   |
+   |     pub enum Event<T: Config> {
+   |              ^^^^^
 note: required by a bound in `From`
   --> $RUST/core/src/convert/mod.rs
    |
@@ -169,7 +173,11 @@ error[E0277]: the trait bound `Runtime: Config` is not satisfied in `frame_syste
    | ||_- in this macro invocation
 ...  |
    |
-   = note: required because it appears within the type `Event<Runtime>`
+note: required because it appears within the type `Event<Runtime>`
+  --> $WORKSPACE/substrate/frame/system/src/lib.rs
+   |
+   |     pub enum Event<T: Config> {
+   |              ^^^^^
 note: required by a bound in `TryInto`
   --> $RUST/core/src/convert/mod.rs
    |
diff --git a/substrate/frame/support/test/tests/derive_impl_ui/bad_default_impl_path.stderr b/substrate/frame/support/test/tests/derive_impl_ui/bad_default_impl_path.stderr
index 5cfbd8c886249..c91226ea9c310 100644
--- a/substrate/frame/support/test/tests/derive_impl_ui/bad_default_impl_path.stderr
+++ b/substrate/frame/support/test/tests/derive_impl_ui/bad_default_impl_path.stderr
@@ -1,7 +1,5 @@
-error: cannot find macro `__export_tokens_tt_tiger` in this scope
-  --> tests/derive_impl_ui/bad_default_impl_path.rs:59:1
+error: cannot find macro `Tiger` in this scope
+  --> tests/derive_impl_ui/bad_default_impl_path.rs:59:15
    |
 59 | #[derive_impl(Tiger as Animal)]
-   | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-   |
-   = note: this error originates in the macro `frame_support::macro_magic::forward_tokens` (in Nightly builds, run with -Z macro-backtrace for more info)
+   |               ^^^^^
diff --git a/substrate/frame/support/test/tests/derive_impl_ui/inject_runtime_type_fails_when_type_not_in_scope.stderr b/substrate/frame/support/test/tests/derive_impl_ui/inject_runtime_type_fails_when_type_not_in_scope.stderr
index 79b50a940b802..f3ac6b2328110 100644
--- a/substrate/frame/support/test/tests/derive_impl_ui/inject_runtime_type_fails_when_type_not_in_scope.stderr
+++ b/substrate/frame/support/test/tests/derive_impl_ui/inject_runtime_type_fails_when_type_not_in_scope.stderr
@@ -7,4 +7,4 @@ error[E0412]: cannot find type `RuntimeCall` in this scope
 35 | #[derive_impl(Pallet)] // Injects type RuntimeCall = RuntimeCall;
    | ---------------------- in this macro invocation
    |
-   = note: this error originates in the macro `__export_tokens_tt_pallet` which comes from the expansion of the macro `frame_support::macro_magic::forward_tokens` (in Nightly builds, run with -Z macro-backtrace for more info)
+   = note: this error originates in the macro `Pallet` which comes from the expansion of the macro `frame_support::macro_magic::forward_tokens_verbatim` (in Nightly builds, run with -Z macro-backtrace for more info)
diff --git a/substrate/frame/support/test/tests/derive_no_bound_ui/debug.stderr b/substrate/frame/support/test/tests/derive_no_bound_ui/debug.stderr
index d86292d71b7a1..3291f658f10e1 100644
--- a/substrate/frame/support/test/tests/derive_no_bound_ui/debug.stderr
+++ b/substrate/frame/support/test/tests/derive_no_bound_ui/debug.stderr
@@ -5,4 +5,4 @@ error[E0277]: `<T as Config>::C` doesn't implement `std::fmt::Debug`
    |     ^ `<T as Config>::C` cannot be formatted using `{:?}` because it doesn't implement `std::fmt::Debug`
    |
    = help: the trait `std::fmt::Debug` is not implemented for `<T as Config>::C`
-   = note: required for the cast from `<T as Config>::C` to the object type `dyn std::fmt::Debug`
+   = note: required for the cast from `&<T as Config>::C` to `&dyn std::fmt::Debug`
diff --git a/substrate/frame/support/test/tests/pallet.rs b/substrate/frame/support/test/tests/pallet.rs
index 1898246470c73..83ae5b9253cec 100644
--- a/substrate/frame/support/test/tests/pallet.rs
+++ b/substrate/frame/support/test/tests/pallet.rs
@@ -210,12 +210,13 @@ pub mod pallet {
 	{
 		/// Doc comment put in metadata
 		#[pallet::call_index(0)]
-		#[pallet::weight(Weight::from_parts(*_foo as u64, 0))]
+		#[pallet::weight(Weight::from_parts(*foo as u64, 0))]
 		pub fn foo(
 			origin: OriginFor<T>,
-			#[pallet::compact] _foo: u32,
+			#[pallet::compact] foo: u32,
 			_bar: u32,
 		) -> DispatchResultWithPostInfo {
+			let _ = foo;
 			let _ = T::AccountId::from(SomeType1); // Test for where clause
 			let _ = T::AccountId::from(SomeType3); // Test for where clause
 			let _ = origin;
diff --git a/substrate/frame/support/test/tests/pallet_instance.rs b/substrate/frame/support/test/tests/pallet_instance.rs
index 8d2d52d18852a..724734ec4fc9d 100644
--- a/substrate/frame/support/test/tests/pallet_instance.rs
+++ b/substrate/frame/support/test/tests/pallet_instance.rs
@@ -87,12 +87,13 @@ pub mod pallet {
 	impl<T: Config<I>, I: 'static> Pallet<T, I> {
 		/// Doc comment put in metadata
 		#[pallet::call_index(0)]
-		#[pallet::weight(Weight::from_parts(*_foo as u64, 0))]
+		#[pallet::weight(Weight::from_parts(*foo as u64, 0))]
 		pub fn foo(
 			origin: OriginFor<T>,
-			#[pallet::compact] _foo: u32,
+			#[pallet::compact] foo: u32,
 		) -> DispatchResultWithPostInfo {
 			let _ = origin;
+			let _ = foo;
 			Self::deposit_event(Event::Something(3));
 			Ok(().into())
 		}
diff --git a/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr b/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr
index c86930f8a64e3..08ea7c0bec3a5 100644
--- a/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr
+++ b/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr
@@ -19,7 +19,7 @@ error[E0277]: `<T as pallet::Config>::Bar` doesn't implement `std::fmt::Debug`
    |
    = help: the trait `std::fmt::Debug` is not implemented for `<T as pallet::Config>::Bar`
    = note: required for `&<T as pallet::Config>::Bar` to implement `std::fmt::Debug`
-   = note: required for the cast from `&<T as pallet::Config>::Bar` to the object type `dyn std::fmt::Debug`
+   = note: required for the cast from `&&<T as pallet::Config>::Bar` to `&dyn std::fmt::Debug`
 
 error[E0277]: the trait bound `<T as pallet::Config>::Bar: Clone` is not satisfied
   --> tests/pallet_ui/call_argument_invalid_bound.rs:38:36
diff --git a/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr b/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr
index 1b04f44c78feb..80316fcd24897 100644
--- a/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr
+++ b/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr
@@ -19,7 +19,7 @@ error[E0277]: `<T as pallet::Config>::Bar` doesn't implement `std::fmt::Debug`
    |
    = help: the trait `std::fmt::Debug` is not implemented for `<T as pallet::Config>::Bar`
    = note: required for `&<T as pallet::Config>::Bar` to implement `std::fmt::Debug`
-   = note: required for the cast from `&<T as pallet::Config>::Bar` to the object type `dyn std::fmt::Debug`
+   = note: required for the cast from `&&<T as pallet::Config>::Bar` to `&dyn std::fmt::Debug`
 
 error[E0277]: the trait bound `<T as pallet::Config>::Bar: Clone` is not satisfied
   --> tests/pallet_ui/call_argument_invalid_bound_2.rs:38:36
diff --git a/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr b/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr
index 7429bce050c28..d45b74bad8428 100644
--- a/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr
+++ b/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr
@@ -20,7 +20,7 @@ error[E0277]: `Bar` doesn't implement `std::fmt::Debug`
    = help: the trait `std::fmt::Debug` is not implemented for `Bar`
    = note: add `#[derive(Debug)]` to `Bar` or manually `impl std::fmt::Debug for Bar`
    = note: required for `&Bar` to implement `std::fmt::Debug`
-   = note: required for the cast from `&Bar` to the object type `dyn std::fmt::Debug`
+   = note: required for the cast from `&&Bar` to `&dyn std::fmt::Debug`
 help: consider annotating `Bar` with `#[derive(Debug)]`
    |
 34 +     #[derive(Debug)]
diff --git a/substrate/frame/support/test/tests/pallet_ui/call_weight_unchecked_warning.rs b/substrate/frame/support/test/tests/pallet_ui/call_weight_unchecked_warning.rs
new file mode 100644
index 0000000000000..8d93638f5a51e
--- /dev/null
+++ b/substrate/frame/support/test/tests/pallet_ui/call_weight_unchecked_warning.rs
@@ -0,0 +1,38 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#[frame_support::pallet]
+mod pallet {
+	use frame_support::pallet_prelude::DispatchResult;
+	use frame_system::pallet_prelude::OriginFor;
+
+	#[pallet::config]
+	pub trait Config: frame_system::Config {}
+
+	#[pallet::pallet]
+	pub struct Pallet<T>(core::marker::PhantomData<T>);
+
+	#[pallet::call]
+	impl<T: Config> Pallet<T> {
+		#[pallet::call_index(0)]
+        #[pallet::weight(*_unused)]
+		pub fn foo(_: OriginFor<T>, _unused: u64) -> DispatchResult { Ok(()) }
+	}
+}
+
+fn main() {
+}
diff --git a/substrate/frame/support/test/tests/pallet_ui/call_weight_unchecked_warning.stderr b/substrate/frame/support/test/tests/pallet_ui/call_weight_unchecked_warning.stderr
new file mode 100644
index 0000000000000..89fc1e0820f5e
--- /dev/null
+++ b/substrate/frame/support/test/tests/pallet_ui/call_weight_unchecked_warning.stderr
@@ -0,0 +1,12 @@
+error: use of deprecated constant `pallet::warnings::UncheckedWeightWitness_0::_w`:
+               It is deprecated to not check weight witness data.
+               Please instead ensure that all witness data for weight calculation is checked before usage.
+
+               For more info see:
+                   <https://github.com/paritytech/polkadot-sdk/pull/1818>
+  --> tests/pallet_ui/call_weight_unchecked_warning.rs:33:31
+   |
+33 |         pub fn foo(_: OriginFor<T>, _unused: u64) -> DispatchResult { Ok(()) }
+   |                                     ^^^^^^^
+   |
+   = note: `-D deprecated` implied by `-D warnings`
diff --git a/substrate/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr b/substrate/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr
index 74ee0e4aeba5c..531e8bdffeb0c 100644
--- a/substrate/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr
+++ b/substrate/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr
@@ -30,13 +30,13 @@ error[E0277]: the trait bound `Vec<u8>: MaxEncodedLen` is not satisfied
    |               ^^^^^^ the trait `MaxEncodedLen` is not implemented for `Vec<u8>`
    |
    = help: the following other types implement trait `MaxEncodedLen`:
-             ()
-             (TupleElement0, TupleElement1)
-             (TupleElement0, TupleElement1, TupleElement2)
-             (TupleElement0, TupleElement1, TupleElement2, TupleElement3)
-             (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4)
-             (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5)
-             (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6)
-             (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7)
+             bool
+             i8
+             i16
+             i32
+             i64
+             i128
+             u8
+             u16
            and $N others
    = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageMyStorage<T>, Vec<u8>>` to implement `StorageInfoTrait`
diff --git a/substrate/frame/support/test/tests/pallet_ui/error_does_not_derive_pallet_error.stderr b/substrate/frame/support/test/tests/pallet_ui/error_does_not_derive_pallet_error.stderr
index cfa0d465990a2..ea1d0ed99cd39 100644
--- a/substrate/frame/support/test/tests/pallet_ui/error_does_not_derive_pallet_error.stderr
+++ b/substrate/frame/support/test/tests/pallet_ui/error_does_not_derive_pallet_error.stderr
@@ -5,13 +5,13 @@ error[E0277]: the trait bound `MyError: PalletError` is not satisfied
    | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `PalletError` is not implemented for `MyError`
    |
    = help: the following other types implement trait `PalletError`:
-             ()
-             (TupleElement0, TupleElement1)
-             (TupleElement0, TupleElement1, TupleElement2)
-             (TupleElement0, TupleElement1, TupleElement2, TupleElement3)
-             (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4)
-             (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5)
-             (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6)
-             (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7)
+             bool
+             i8
+             i16
+             i32
+             i64
+             i128
+             u8
+             u16
            and $N others
    = note: this error originates in the derive macro `frame_support::PalletError` (in Nightly builds, run with -Z macro-backtrace for more info)
diff --git a/substrate/frame/support/test/tests/pallet_ui/event_field_not_member.stderr b/substrate/frame/support/test/tests/pallet_ui/event_field_not_member.stderr
index 4df6deafa0df6..fc4a33b721500 100644
--- a/substrate/frame/support/test/tests/pallet_ui/event_field_not_member.stderr
+++ b/substrate/frame/support/test/tests/pallet_ui/event_field_not_member.stderr
@@ -18,4 +18,4 @@ error[E0277]: `<T as pallet::Config>::Bar` doesn't implement `std::fmt::Debug`
    |
    = help: the trait `std::fmt::Debug` is not implemented for `<T as pallet::Config>::Bar`
    = note: required for `&<T as pallet::Config>::Bar` to implement `std::fmt::Debug`
-   = note: required for the cast from `&<T as pallet::Config>::Bar` to the object type `dyn std::fmt::Debug`
+   = note: required for the cast from `&&<T as pallet::Config>::Bar` to `&dyn std::fmt::Debug`
diff --git a/substrate/frame/support/test/tests/pallet_ui/inherent_check_inner_span.stderr b/substrate/frame/support/test/tests/pallet_ui/inherent_check_inner_span.stderr
index 3a26d1c049541..5ea3be470a068 100644
--- a/substrate/frame/support/test/tests/pallet_ui/inherent_check_inner_span.stderr
+++ b/substrate/frame/support/test/tests/pallet_ui/inherent_check_inner_span.stderr
@@ -4,8 +4,8 @@ error[E0046]: not all trait items implemented, missing: `Call`, `Error`, `INHERE
 36 |     impl<T: Config> ProvideInherent for Pallet<T> {}
    |     ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ missing `Call`, `Error`, `INHERENT_IDENTIFIER`, `create_inherent`, `is_inherent` in implementation
    |
-   = help: implement the missing item: `type Call = Type;`
-   = help: implement the missing item: `type Error = Type;`
+   = help: implement the missing item: `type Call = /* Type */;`
+   = help: implement the missing item: `type Error = /* Type */;`
    = help: implement the missing item: `const INHERENT_IDENTIFIER: [u8; 8] = value;`
    = help: implement the missing item: `fn create_inherent(_: &InherentData) -> std::option::Option<<Self as ProvideInherent>::Call> { todo!() }`
    = help: implement the missing item: `fn is_inherent(_: &<Self as ProvideInherent>::Call) -> bool { todo!() }`
diff --git a/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr b/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr
index e290b22a0eaa5..930af1d7fcb30 100644
--- a/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr
+++ b/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr
@@ -5,10 +5,10 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied
    |               ^^^^^^^^^^^^^^^^^^^^ the trait `WrapperTypeDecode` is not implemented for `Bar`
    |
    = help: the following other types implement trait `WrapperTypeDecode`:
-             Arc<T>
              Box<T>
-             Rc<T>
              frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes
+             Rc<T>
+             Arc<T>
    = note: required for `Bar` to implement `Decode`
    = note: required for `Bar` to implement `FullCodec`
    = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo<T>, Bar>` to implement `PartialStorageInfoTrait`
@@ -20,14 +20,14 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied
    |               ^^^^^^^^^^^^^^^^^^^^ the trait `EncodeLike` is not implemented for `Bar`
    |
    = help: the following other types implement trait `EncodeLike<T>`:
-             <&&T as EncodeLike<T>>
-             <&T as EncodeLike<T>>
-             <&T as EncodeLike>
-             <&[(K, V)] as EncodeLike<BTreeMap<LikeK, LikeV>>>
-             <&[(T,)] as EncodeLike<BTreeSet<LikeT>>>
-             <&[(T,)] as EncodeLike<BinaryHeap<LikeT>>>
-             <&[(T,)] as EncodeLike<LinkedList<LikeT>>>
-             <&[T] as EncodeLike<Vec<U>>>
+             <bool as EncodeLike>
+             <i8 as EncodeLike>
+             <i16 as EncodeLike>
+             <i32 as EncodeLike>
+             <i64 as EncodeLike>
+             <i128 as EncodeLike>
+             <u8 as EncodeLike>
+             <u16 as EncodeLike>
            and $N others
    = note: required for `Bar` to implement `FullEncode`
    = note: required for `Bar` to implement `FullCodec`
@@ -40,14 +40,14 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied
    |               ^^^^^^^^^^^^^^^^^^^^ the trait `WrapperTypeEncode` is not implemented for `Bar`
    |
    = help: the following other types implement trait `WrapperTypeEncode`:
-             &T
-             &mut T
-             Arc<T>
              Box<T>
+             bytes::bytes::Bytes
              Cow<'a, T>
+             parity_scale_codec::Ref<'a, T, U>
+             frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes
              Rc<T>
+             Arc<T>
              Vec<T>
-             bytes::bytes::Bytes
            and $N others
    = note: required for `Bar` to implement `Encode`
    = note: required for `Bar` to implement `FullEncode`
@@ -61,14 +61,14 @@ error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied
    |               ^^^^^^^ the trait `TypeInfo` is not implemented for `Bar`
    |
    = help: the following other types implement trait `TypeInfo`:
-             &T
-             &mut T
-             ()
-             (A, B)
-             (A, B, C)
-             (A, B, C, D)
-             (A, B, C, D, E)
-             (A, B, C, D, E, F)
+             bool
+             char
+             i8
+             i16
+             i32
+             i64
+             i128
+             u8
            and $N others
    = note: required for `Bar` to implement `StaticTypeInfo`
    = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo<T>, Bar>` to implement `StorageEntryMetadataBuilder`
@@ -80,10 +80,10 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied
    |               ^^^^^^^ the trait `WrapperTypeDecode` is not implemented for `Bar`
    |
    = help: the following other types implement trait `WrapperTypeDecode`:
-             Arc<T>
              Box<T>
-             Rc<T>
              frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes
+             Rc<T>
+             Arc<T>
    = note: required for `Bar` to implement `Decode`
    = note: required for `Bar` to implement `FullCodec`
    = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo<T>, Bar>` to implement `StorageEntryMetadataBuilder`
@@ -95,14 +95,14 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied
    |               ^^^^^^^ the trait `EncodeLike` is not implemented for `Bar`
    |
    = help: the following other types implement trait `EncodeLike<T>`:
-             <&&T as EncodeLike<T>>
-             <&T as EncodeLike<T>>
-             <&T as EncodeLike>
-             <&[(K, V)] as EncodeLike<BTreeMap<LikeK, LikeV>>>
-             <&[(T,)] as EncodeLike<BTreeSet<LikeT>>>
-             <&[(T,)] as EncodeLike<BinaryHeap<LikeT>>>
-             <&[(T,)] as EncodeLike<LinkedList<LikeT>>>
-             <&[T] as EncodeLike<Vec<U>>>
+             <bool as EncodeLike>
+             <i8 as EncodeLike>
+             <i16 as EncodeLike>
+             <i32 as EncodeLike>
+             <i64 as EncodeLike>
+             <i128 as EncodeLike>
+             <u8 as EncodeLike>
+             <u16 as EncodeLike>
            and $N others
    = note: required for `Bar` to implement `FullEncode`
    = note: required for `Bar` to implement `FullCodec`
@@ -115,14 +115,14 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied
    |               ^^^^^^^ the trait `WrapperTypeEncode` is not implemented for `Bar`
    |
    = help: the following other types implement trait `WrapperTypeEncode`:
-             &T
-             &mut T
-             Arc<T>
              Box<T>
+             bytes::bytes::Bytes
              Cow<'a, T>
+             parity_scale_codec::Ref<'a, T, U>
+             frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes
              Rc<T>
+             Arc<T>
              Vec<T>
-             bytes::bytes::Bytes
            and $N others
    = note: required for `Bar` to implement `Encode`
    = note: required for `Bar` to implement `FullEncode`
diff --git a/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr b/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr
index 0e3a7c9f1cbfa..79798963c8b1a 100644
--- a/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr
+++ b/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr
@@ -5,10 +5,10 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied
    |               ^^^^^^^^^^^^^^^^^^^^ the trait `WrapperTypeDecode` is not implemented for `Bar`
    |
    = help: the following other types implement trait `WrapperTypeDecode`:
-             Arc<T>
              Box<T>
-             Rc<T>
              frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes
+             Rc<T>
+             Arc<T>
    = note: required for `Bar` to implement `Decode`
    = note: required for `Bar` to implement `FullCodec`
    = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo<T>, Bar>` to implement `PartialStorageInfoTrait`
@@ -20,14 +20,14 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied
    |               ^^^^^^^^^^^^^^^^^^^^ the trait `EncodeLike` is not implemented for `Bar`
    |
    = help: the following other types implement trait `EncodeLike<T>`:
-             <&&T as EncodeLike<T>>
-             <&T as EncodeLike<T>>
-             <&T as EncodeLike>
-             <&[(K, V)] as EncodeLike<BTreeMap<LikeK, LikeV>>>
-             <&[(T,)] as EncodeLike<BTreeSet<LikeT>>>
-             <&[(T,)] as EncodeLike<BinaryHeap<LikeT>>>
-             <&[(T,)] as EncodeLike<LinkedList<LikeT>>>
-             <&[T] as EncodeLike<Vec<U>>>
+             <bool as EncodeLike>
+             <i8 as EncodeLike>
+             <i16 as EncodeLike>
+             <i32 as EncodeLike>
+             <i64 as EncodeLike>
+             <i128 as EncodeLike>
+             <u8 as EncodeLike>
+             <u16 as EncodeLike>
            and $N others
    = note: required for `Bar` to implement `FullEncode`
    = note: required for `Bar` to implement `FullCodec`
@@ -40,14 +40,14 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied
    |               ^^^^^^^^^^^^^^^^^^^^ the trait `WrapperTypeEncode` is not implemented for `Bar`
    |
    = help: the following other types implement trait `WrapperTypeEncode`:
-             &T
-             &mut T
-             Arc<T>
              Box<T>
+             bytes::bytes::Bytes
              Cow<'a, T>
+             parity_scale_codec::Ref<'a, T, U>
+             frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes
              Rc<T>
+             Arc<T>
              Vec<T>
-             bytes::bytes::Bytes
            and $N others
    = note: required for `Bar` to implement `Encode`
    = note: required for `Bar` to implement `FullEncode`
@@ -61,14 +61,14 @@ error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied
    |               ^^^^^^^ the trait `TypeInfo` is not implemented for `Bar`
    |
    = help: the following other types implement trait `TypeInfo`:
-             &T
-             &mut T
-             ()
-             (A, B)
-             (A, B, C)
-             (A, B, C, D)
-             (A, B, C, D, E)
-             (A, B, C, D, E, F)
+             bool
+             char
+             i8
+             i16
+             i32
+             i64
+             i128
+             u8
            and $N others
    = note: required for `Bar` to implement `StaticTypeInfo`
    = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo<T>, Bar>` to implement `StorageEntryMetadataBuilder`
@@ -80,10 +80,10 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied
    |               ^^^^^^^ the trait `WrapperTypeDecode` is not implemented for `Bar`
    |
    = help: the following other types implement trait `WrapperTypeDecode`:
-             Arc<T>
              Box<T>
-             Rc<T>
              frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes
+             Rc<T>
+             Arc<T>
    = note: required for `Bar` to implement `Decode`
    = note: required for `Bar` to implement `FullCodec`
    = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo<T>, Bar>` to implement `StorageEntryMetadataBuilder`
@@ -95,14 +95,14 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied
    |               ^^^^^^^ the trait `EncodeLike` is not implemented for `Bar`
    |
    = help: the following other types implement trait `EncodeLike<T>`:
-             <&&T as EncodeLike<T>>
-             <&T as EncodeLike<T>>
-             <&T as EncodeLike>
-             <&[(K, V)] as EncodeLike<BTreeMap<LikeK, LikeV>>>
-             <&[(T,)] as EncodeLike<BTreeSet<LikeT>>>
-             <&[(T,)] as EncodeLike<BinaryHeap<LikeT>>>
-             <&[(T,)] as EncodeLike<LinkedList<LikeT>>>
-             <&[T] as EncodeLike<Vec<U>>>
+             <bool as EncodeLike>
+             <i8 as EncodeLike>
+             <i16 as EncodeLike>
+             <i32 as EncodeLike>
+             <i64 as EncodeLike>
+             <i128 as EncodeLike>
+             <u8 as EncodeLike>
+             <u16 as EncodeLike>
            and $N others
    = note: required for `Bar` to implement `FullEncode`
    = note: required for `Bar` to implement `FullCodec`
@@ -115,14 +115,14 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied
    |               ^^^^^^^ the trait `WrapperTypeEncode` is not implemented for `Bar`
    |
    = help: the following other types implement trait `WrapperTypeEncode`:
-             &T
-             &mut T
-             Arc<T>
              Box<T>
+             bytes::bytes::Bytes
              Cow<'a, T>
+             parity_scale_codec::Ref<'a, T, U>
+             frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes
              Rc<T>
+             Arc<T>
              Vec<T>
-             bytes::bytes::Bytes
            and $N others
    = note: required for `Bar` to implement `Encode`
    = note: required for `Bar` to implement `FullEncode`
diff --git a/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr b/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr
index 9a31e4b6bdf46..e04de98800ec2 100644
--- a/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr
+++ b/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr
@@ -5,13 +5,13 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied
    |               ^^^^^^ the trait `MaxEncodedLen` is not implemented for `Bar`
    |
    = help: the following other types implement trait `MaxEncodedLen`:
-             ()
-             (TupleElement0, TupleElement1)
-             (TupleElement0, TupleElement1, TupleElement2)
-             (TupleElement0, TupleElement1, TupleElement2, TupleElement3)
-             (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4)
-             (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5)
-             (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6)
-             (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7)
+             bool
+             i8
+             i16
+             i32
+             i64
+             i128
+             u8
+             u16
            and $N others
    = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo<T>, Bar>` to implement `StorageInfoTrait`
diff --git a/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr b/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr
index cdcd1b401f801..31fe3b5733896 100644
--- a/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr
+++ b/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr
@@ -5,14 +5,14 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied
    |               ^^^^^^ the trait `MaxEncodedLen` is not implemented for `Bar`
    |
    = help: the following other types implement trait `MaxEncodedLen`:
-             ()
-             (TupleElement0, TupleElement1)
-             (TupleElement0, TupleElement1, TupleElement2)
-             (TupleElement0, TupleElement1, TupleElement2, TupleElement3)
-             (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4)
-             (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5)
-             (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6)
-             (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7)
+             bool
+             i8
+             i16
+             i32
+             i64
+             i128
+             u8
+             u16
            and $N others
-   = note: required for `Key<frame_support::Twox64Concat, Bar>` to implement `KeyGeneratorMaxEncodedLen`
-   = note: required for `frame_support::pallet_prelude::StorageNMap<_GeneratedPrefixForStorageFoo<T>, Key<frame_support::Twox64Concat, Bar>, u32>` to implement `StorageInfoTrait`
+   = note: required for `NMapKey<frame_support::Twox64Concat, Bar>` to implement `KeyGeneratorMaxEncodedLen`
+   = note: required for `frame_support::pallet_prelude::StorageNMap<_GeneratedPrefixForStorageFoo<T>, NMapKey<frame_support::Twox64Concat, Bar>, u32>` to implement `StorageInfoTrait`
diff --git a/substrate/frame/system/src/extensions/check_nonce.rs b/substrate/frame/system/src/extensions/check_nonce.rs
index 2939fd6534c09..7504a814aef13 100644
--- a/substrate/frame/system/src/extensions/check_nonce.rs
+++ b/substrate/frame/system/src/extensions/check_nonce.rs
@@ -20,7 +20,7 @@ use codec::{Decode, Encode};
 use frame_support::dispatch::DispatchInfo;
 use scale_info::TypeInfo;
 use sp_runtime::{
-	traits::{DispatchInfoOf, Dispatchable, One, SignedExtension},
+	traits::{DispatchInfoOf, Dispatchable, One, SignedExtension, Zero},
 	transaction_validity::{
 		InvalidTransaction, TransactionLongevity, TransactionValidity, TransactionValidityError,
 		ValidTransaction,
@@ -80,6 +80,10 @@ where
 		_len: usize,
 	) -> Result<(), TransactionValidityError> {
 		let mut account = crate::Account::<T>::get(who);
+		if account.providers.is_zero() && account.sufficients.is_zero() {
+			// Nonce storage not paid for
+			return Err(InvalidTransaction::Payment.into())
+		}
 		if self.0 != account.nonce {
 			return Err(if self.0 < account.nonce {
 				InvalidTransaction::Stale
@@ -100,8 +104,11 @@ where
 		_info: &DispatchInfoOf<Self::Call>,
 		_len: usize,
 	) -> TransactionValidity {
-		// check index
 		let account = crate::Account::<T>::get(who);
+		if account.providers.is_zero() && account.sufficients.is_zero() {
+			// Nonce storage not paid for
+			return InvalidTransaction::Payment.into()
+		}
 		if self.0 < account.nonce {
 			return InvalidTransaction::Stale.into()
 		}
@@ -137,7 +144,7 @@ mod tests {
 				crate::AccountInfo {
 					nonce: 1,
 					consumers: 0,
-					providers: 0,
+					providers: 1,
 					sufficients: 0,
 					data: 0,
 				},
@@ -164,4 +171,47 @@ mod tests {
 			);
 		})
 	}
+
+	#[test]
+	fn signed_ext_check_nonce_requires_provider() {
+		new_test_ext().execute_with(|| {
+			crate::Account::<Test>::insert(
+				2,
+				crate::AccountInfo {
+					nonce: 1,
+					consumers: 0,
+					providers: 1,
+					sufficients: 0,
+					data: 0,
+				},
+			);
+			crate::Account::<Test>::insert(
+				3,
+				crate::AccountInfo {
+					nonce: 1,
+					consumers: 0,
+					providers: 0,
+					sufficients: 1,
+					data: 0,
+				},
+			);
+			let info = DispatchInfo::default();
+			let len = 0_usize;
+			// Both providers and sufficients zero
+			assert_noop!(
+				CheckNonce::<Test>(1).validate(&1, CALL, &info, len),
+				InvalidTransaction::Payment
+			);
+			assert_noop!(
+				CheckNonce::<Test>(1).pre_dispatch(&1, CALL, &info, len),
+				InvalidTransaction::Payment
+			);
+			// Non-zero providers
+			assert_ok!(CheckNonce::<Test>(1).validate(&2, CALL, &info, len));
+			assert_ok!(CheckNonce::<Test>(1).pre_dispatch(&2, CALL, &info, len));
+			// Non-zero sufficients
+			assert_ok!(CheckNonce::<Test>(1).validate(&3, CALL, &info, len));
+			assert_ok!(CheckNonce::<Test>(1).pre_dispatch(&3, CALL, &info, len));
+		})
+	}
 }
diff --git a/substrate/frame/system/src/lib.rs b/substrate/frame/system/src/lib.rs
index 84b6dc031457d..897d3bd7ce91f 100644
--- a/substrate/frame/system/src/lib.rs
+++ b/substrate/frame/system/src/lib.rs
@@ -420,8 +420,9 @@ pub mod pallet {
 		///
 		/// Can be executed by every `origin`.
 		#[pallet::call_index(0)]
-		#[pallet::weight(T::SystemWeightInfo::remark(_remark.len() as u32))]
-		pub fn remark(_origin: OriginFor<T>, _remark: Vec<u8>) -> DispatchResultWithPostInfo {
+		#[pallet::weight(T::SystemWeightInfo::remark(remark.len() as u32))]
+		pub fn remark(_origin: OriginFor<T>, remark: Vec<u8>) -> DispatchResultWithPostInfo {
+			let _ = remark; // No need to check the weight witness.
 			Ok(().into())
 		}
 
@@ -495,16 +496,16 @@ pub mod pallet {
 		/// the prefix we are removing to accurately calculate the weight of this function.
 		#[pallet::call_index(6)]
 		#[pallet::weight((
-			T::SystemWeightInfo::kill_prefix(_subkeys.saturating_add(1)),
+			T::SystemWeightInfo::kill_prefix(subkeys.saturating_add(1)),
 			DispatchClass::Operational,
 		))]
 		pub fn kill_prefix(
 			origin: OriginFor<T>,
 			prefix: Key,
-			_subkeys: u32,
+			subkeys: u32,
 		) -> DispatchResultWithPostInfo {
 			ensure_root(origin)?;
-			let _ = storage::unhashed::clear_prefix(&prefix, None, None);
+			let _ = storage::unhashed::clear_prefix(&prefix, Some(subkeys), None);
 			Ok(().into())
 		}
 
diff --git a/substrate/frame/tips/src/tests.rs b/substrate/frame/tips/src/tests.rs
index 9cb90c3798018..8fe111afc26a4 100644
--- a/substrate/frame/tips/src/tests.rs
+++ b/substrate/frame/tips/src/tests.rs
@@ -29,7 +29,10 @@ use sp_storage::Storage;
 use frame_support::{
 	assert_noop, assert_ok, parameter_types,
 	storage::StoragePrefixedMap,
-	traits::{ConstU32, ConstU64, SortedMembers, StorageVersion},
+	traits::{
+		tokens::{PayFromAccount, UnityAssetBalanceConversion},
+		ConstU32, ConstU64, SortedMembers, StorageVersion,
+	},
 	PalletId,
 };
 
@@ -123,7 +126,10 @@ parameter_types! {
 	pub const Burn: Permill = Permill::from_percent(50);
 	pub const TreasuryPalletId: PalletId = PalletId(*b"py/trsry");
 	pub const TreasuryPalletId2: PalletId = PalletId(*b"py/trsr2");
+	pub TreasuryAccount: u128 = Treasury::account_id();
+	pub TreasuryInstance1Account: u128 = Treasury1::account_id();
 }
+
 impl pallet_treasury::Config for Test {
 	type PalletId = TreasuryPalletId;
 	type Currency = pallet_balances::Pallet<Test>;
@@ -141,6 +147,14 @@ impl pallet_treasury::Config for Test {
 	type SpendFunds = ();
 	type MaxApprovals = ConstU32<100>;
 	type SpendOrigin = frame_support::traits::NeverEnsureOrigin<u64>;
+	type AssetKind = ();
+	type Beneficiary = Self::AccountId;
+	type BeneficiaryLookup = IdentityLookup<Self::Beneficiary>;
+	type Paymaster = PayFromAccount<Balances, TreasuryAccount>;
+	type BalanceConverter = UnityAssetBalanceConversion;
+	type PayoutPeriod = ConstU64<10>;
+	#[cfg(feature = "runtime-benchmarks")]
+	type BenchmarkHelper = ();
 }
 
 impl pallet_treasury::Config<Instance1> for Test {
@@ -160,6 +174,14 @@ impl pallet_treasury::Config<Instance1> for Test {
 	type SpendFunds = ();
 	type MaxApprovals = ConstU32<100>;
 	type SpendOrigin = frame_support::traits::NeverEnsureOrigin<u64>;
+	type AssetKind = ();
+	type Beneficiary = Self::AccountId;
+	type BeneficiaryLookup = IdentityLookup<Self::Beneficiary>;
+	type Paymaster = PayFromAccount<Balances, TreasuryInstance1Account>;
+	type BalanceConverter = UnityAssetBalanceConversion;
+	type PayoutPeriod = ConstU64<10>;
+	#[cfg(feature = "runtime-benchmarks")]
+	type BenchmarkHelper = ();
 }
 
 parameter_types! {
@@ -489,7 +511,7 @@ fn test_last_reward_migration() {
 	s.top = data.into_iter().collect();
 
 	sp_io::TestExternalities::new(s).execute_with(|| {
-		let module = pallet_tips::Tips::<Test>::module_prefix();
+		let module = pallet_tips::Tips::<Test>::pallet_prefix();
 		let item = pallet_tips::Tips::<Test>::storage_prefix();
 		Tips::migrate_retract_tip_for_tip_new(module, item);
 
diff --git a/substrate/frame/treasury/Cargo.toml b/substrate/frame/treasury/Cargo.toml
index 785564cd9888d..f7f7a6ae89c56 100644
--- a/substrate/frame/treasury/Cargo.toml
+++ b/substrate/frame/treasury/Cargo.toml
@@ -17,6 +17,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features =
 	"derive",
 	"max-encoded-len",
 ] }
+docify = "0.2.0"
 impl-trait-for-tuples = "0.2.2"
 scale-info = { version = "2.5.0", default-features = false, features = ["derive"] }
 serde = { version = "1.0.188", features = ["derive"], optional = true }
@@ -26,11 +27,12 @@ frame-system = { path = "../system", default-features = false}
 pallet-balances = { path = "../balances", default-features = false}
 sp-runtime = { path = "../../primitives/runtime", default-features = false}
 sp-std = { path = "../../primitives/std", default-features = false}
+sp-core = { path = "../../primitives/core", default-features = false, optional = true}
 
 [dev-dependencies]
-sp-core = { path = "../../primitives/core" }
 sp-io = { path = "../../primitives/io" }
 pallet-utility = { path = "../utility" }
+sp-core = { path = "../../primitives/core", default-features = false }
 
 [features]
 default = [ "std" ]
@@ -43,12 +45,13 @@ std = [
 	"pallet-utility/std",
 	"scale-info/std",
 	"serde",
-	"sp-core/std",
+	"sp-core?/std",
 	"sp-io/std",
 	"sp-runtime/std",
 	"sp-std/std",
 ]
 runtime-benchmarks = [
+	"dep:sp-core",
 	"frame-benchmarking/runtime-benchmarks",
 	"frame-support/runtime-benchmarks",
 	"frame-system/runtime-benchmarks",
diff --git a/substrate/frame/treasury/src/benchmarking.rs b/substrate/frame/treasury/src/benchmarking.rs
index 24c290ddb665e..f5f73ea8ddabd 100644
--- a/substrate/frame/treasury/src/benchmarking.rs
+++ b/substrate/frame/treasury/src/benchmarking.rs
@@ -21,12 +21,41 @@
 
 use super::{Pallet as Treasury, *};
 
-use frame_benchmarking::v1::{account, benchmarks_instance_pallet, BenchmarkError};
+use frame_benchmarking::{
+	v1::{account, BenchmarkError},
+	v2::*,
+};
 use frame_support::{
 	ensure,
-	traits::{EnsureOrigin, OnInitialize, UnfilteredDispatchable},
+	traits::{
+		tokens::{ConversionFromAssetBalance, PaymentStatus},
+		EnsureOrigin, OnInitialize,
+	},
 };
 use frame_system::RawOrigin;
+use sp_core::crypto::FromEntropy;
+
+/// Trait describing factory functions for dispatchables' parameters.
+pub trait ArgumentsFactory<AssetKind, Beneficiary> {
+	/// Factory function for an asset kind.
+	fn create_asset_kind(seed: u32) -> AssetKind;
+	/// Factory function for a beneficiary.
+	fn create_beneficiary(seed: [u8; 32]) -> Beneficiary;
+}
+
+/// Implementation that expects the parameters implement the [`FromEntropy`] trait.
+impl<AssetKind, Beneficiary> ArgumentsFactory<AssetKind, Beneficiary> for ()
+where
+	AssetKind: FromEntropy,
+	Beneficiary: FromEntropy,
+{
+	fn create_asset_kind(seed: u32) -> AssetKind {
+		AssetKind::from_entropy(&mut seed.encode().as_slice()).unwrap()
+	}
+	fn create_beneficiary(seed: [u8; 32]) -> Beneficiary {
+		Beneficiary::from_entropy(&mut seed.as_slice()).unwrap()
+	}
+}
 
 const SEED: u32 = 0;
 
@@ -66,81 +95,245 @@ fn assert_last_event<T: Config<I>, I: 'static>(generic_event: <T as Config<I>>::
 	frame_system::Pallet::<T>::assert_last_event(generic_event.into());
 }
 
-benchmarks_instance_pallet! {
+// Create the arguments for the `spend` dispatchable.
+fn create_spend_arguments<T: Config<I>, I: 'static>(
+	seed: u32,
+) -> (T::AssetKind, AssetBalanceOf<T, I>, T::Beneficiary, BeneficiaryLookupOf<T, I>) {
+	let asset_kind = T::BenchmarkHelper::create_asset_kind(seed);
+	let beneficiary = T::BenchmarkHelper::create_beneficiary([seed.try_into().unwrap(); 32]);
+	let beneficiary_lookup = T::BeneficiaryLookup::unlookup(beneficiary.clone());
+	(asset_kind, 100u32.into(), beneficiary, beneficiary_lookup)
+}
+
+#[instance_benchmarks]
+mod benchmarks {
+	use super::*;
+
 	// This benchmark is short-circuited if `SpendOrigin` cannot provide
 	// a successful origin, in which case `spend` is un-callable and can use weight=0.
-	spend {
+	#[benchmark]
+	fn spend_local() -> Result<(), BenchmarkError> {
 		let (_, value, beneficiary_lookup) = setup_proposal::<T, _>(SEED);
-		let origin = T::SpendOrigin::try_successful_origin();
+		let origin =
+			T::SpendOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?;
 		let beneficiary = T::Lookup::lookup(beneficiary_lookup.clone()).unwrap();
-		let call = Call::<T, I>::spend { amount: value, beneficiary: beneficiary_lookup };
-	}: {
-		if let Ok(origin) = origin.clone() {
-			call.dispatch_bypass_filter(origin)?;
-		}
-	}
-	verify {
-		if origin.is_ok() {
-			assert_last_event::<T, I>(Event::SpendApproved { proposal_index: 0, amount: value, beneficiary }.into())
-		}
+
+		#[extrinsic_call]
+		_(origin as T::RuntimeOrigin, value, beneficiary_lookup);
+
+		assert_last_event::<T, I>(
+			Event::SpendApproved { proposal_index: 0, amount: value, beneficiary }.into(),
+		);
+		Ok(())
 	}
 
-	propose_spend {
+	#[benchmark]
+	fn propose_spend() -> Result<(), BenchmarkError> {
 		let (caller, value, beneficiary_lookup) = setup_proposal::<T, _>(SEED);
 		// Whitelist caller account from further DB operations.
 		let caller_key = frame_system::Account::<T>::hashed_key_for(&caller);
 		frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into());
-	}: _(RawOrigin::Signed(caller), value, beneficiary_lookup)
 
-	reject_proposal {
+		#[extrinsic_call]
+		_(RawOrigin::Signed(caller), value, beneficiary_lookup);
+
+		Ok(())
+	}
+
+	#[benchmark]
+	fn reject_proposal() -> Result<(), BenchmarkError> {
 		let (caller, value, beneficiary_lookup) = setup_proposal::<T, _>(SEED);
 		#[allow(deprecated)]
 		Treasury::<T, _>::propose_spend(
 			RawOrigin::Signed(caller).into(),
 			value,
-			beneficiary_lookup
+			beneficiary_lookup,
 		)?;
 		let proposal_id = Treasury::<T, _>::proposal_count() - 1;
 		let reject_origin =
 			T::RejectOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?;
-	}: _<T::RuntimeOrigin>(reject_origin, proposal_id)
 
-	approve_proposal {
-		let p in 0 .. T::MaxApprovals::get() - 1;
+		#[extrinsic_call]
+		_(reject_origin as T::RuntimeOrigin, proposal_id);
+
+		Ok(())
+	}
+
+	#[benchmark]
+	fn approve_proposal(
+		p: Linear<0, { T::MaxApprovals::get() - 1 }>,
+	) -> Result<(), BenchmarkError> {
 		create_approved_proposals::<T, _>(p)?;
 		let (caller, value, beneficiary_lookup) = setup_proposal::<T, _>(SEED);
 		#[allow(deprecated)]
 		Treasury::<T, _>::propose_spend(
 			RawOrigin::Signed(caller).into(),
 			value,
-			beneficiary_lookup
+			beneficiary_lookup,
 		)?;
 		let proposal_id = Treasury::<T, _>::proposal_count() - 1;
 		let approve_origin =
 			T::ApproveOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?;
-	}: _<T::RuntimeOrigin>(approve_origin, proposal_id)
 
-	remove_approval {
+		#[extrinsic_call]
+		_(approve_origin as T::RuntimeOrigin, proposal_id);
+
+		Ok(())
+	}
+
+	#[benchmark]
+	fn remove_approval() -> Result<(), BenchmarkError> {
 		let (caller, value, beneficiary_lookup) = setup_proposal::<T, _>(SEED);
 		#[allow(deprecated)]
 		Treasury::<T, _>::propose_spend(
 			RawOrigin::Signed(caller).into(),
 			value,
-			beneficiary_lookup
+			beneficiary_lookup,
 		)?;
 		let proposal_id = Treasury::<T, _>::proposal_count() - 1;
 		#[allow(deprecated)]
 		Treasury::<T, I>::approve_proposal(RawOrigin::Root.into(), proposal_id)?;
 		let reject_origin =
 			T::RejectOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?;
-	}: _<T::RuntimeOrigin>(reject_origin, proposal_id)
 
-	on_initialize_proposals {
-		let p in 0 .. T::MaxApprovals::get();
+		#[extrinsic_call]
+		_(reject_origin as T::RuntimeOrigin, proposal_id);
+
+		Ok(())
+	}
+
+	#[benchmark]
+	fn on_initialize_proposals(
+		p: Linear<0, { T::MaxApprovals::get() - 1 }>,
+	) -> Result<(), BenchmarkError> {
 		setup_pot_account::<T, _>();
 		create_approved_proposals::<T, _>(p)?;
-	}: {
-		Treasury::<T, _>::on_initialize(frame_system::pallet_prelude::BlockNumberFor::<T>::zero());
+
+		#[block]
+		{
+			Treasury::<T, _>::on_initialize(0u32.into());
+		}
+
+		Ok(())
+	}
+
+	#[benchmark]
+	fn spend() -> Result<(), BenchmarkError> {
+		let origin =
+			T::SpendOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?;
+		let (asset_kind, amount, beneficiary, beneficiary_lookup) =
+			create_spend_arguments::<T, _>(SEED);
+		T::BalanceConverter::ensure_successful(asset_kind.clone());
+
+		#[extrinsic_call]
+		_(
+			origin as T::RuntimeOrigin,
+			Box::new(asset_kind.clone()),
+			amount,
+			Box::new(beneficiary_lookup),
+			None,
+		);
+
+		let valid_from = frame_system::Pallet::<T>::block_number();
+		let expire_at = valid_from.saturating_add(T::PayoutPeriod::get());
+		assert_last_event::<T, I>(
+			Event::AssetSpendApproved {
+				index: 0,
+				asset_kind,
+				amount,
+				beneficiary,
+				valid_from,
+				expire_at,
+			}
+			.into(),
+		);
+		Ok(())
+	}
+
+	#[benchmark]
+	fn payout() -> Result<(), BenchmarkError> {
+		let origin = T::SpendOrigin::try_successful_origin().map_err(|_| "No origin")?;
+		let (asset_kind, amount, beneficiary, beneficiary_lookup) =
+			create_spend_arguments::<T, _>(SEED);
+		T::BalanceConverter::ensure_successful(asset_kind.clone());
+		Treasury::<T, _>::spend(
+			origin,
+			Box::new(asset_kind.clone()),
+			amount,
+			Box::new(beneficiary_lookup),
+			None,
+		)?;
+		T::Paymaster::ensure_successful(&beneficiary, asset_kind, amount);
+		let caller: T::AccountId = account("caller", 0, SEED);
+
+		#[extrinsic_call]
+		_(RawOrigin::Signed(caller.clone()), 0u32);
+
+		let id = match Spends::<T, I>::get(0).unwrap().status {
+			PaymentState::Attempted { id, .. } => {
+				assert_ne!(T::Paymaster::check_payment(id), PaymentStatus::Failure);
+				id
+			},
+			_ => panic!("No payout attempt made"),
+		};
+		assert_last_event::<T, I>(Event::Paid { index: 0, payment_id: id }.into());
+		assert!(Treasury::<T, _>::payout(RawOrigin::Signed(caller).into(), 0u32).is_err());
+		Ok(())
+	}
+
+	#[benchmark]
+	fn check_status() -> Result<(), BenchmarkError> {
+		let origin = T::SpendOrigin::try_successful_origin().map_err(|_| "No origin")?;
+		let (asset_kind, amount, beneficiary, beneficiary_lookup) =
+			create_spend_arguments::<T, _>(SEED);
+		T::BalanceConverter::ensure_successful(asset_kind.clone());
+		Treasury::<T, _>::spend(
+			origin,
+			Box::new(asset_kind.clone()),
+			amount,
+			Box::new(beneficiary_lookup),
+			None,
+		)?;
+		T::Paymaster::ensure_successful(&beneficiary, asset_kind, amount);
+		let caller: T::AccountId = account("caller", 0, SEED);
+		Treasury::<T, _>::payout(RawOrigin::Signed(caller.clone()).into(), 0u32)?;
+		match Spends::<T, I>::get(0).unwrap().status {
+			PaymentState::Attempted { id, .. } => {
+				T::Paymaster::ensure_concluded(id);
+			},
+			_ => panic!("No payout attempt made"),
+		};
+
+		#[extrinsic_call]
+		_(RawOrigin::Signed(caller.clone()), 0u32);
+
+		if let Some(s) = Spends::<T, I>::get(0) {
+			assert!(!matches!(s.status, PaymentState::Attempted { .. }));
+		}
+		Ok(())
+	}
+
+	#[benchmark]
+	fn void_spend() -> Result<(), BenchmarkError> {
+		let origin = T::SpendOrigin::try_successful_origin().map_err(|_| "No origin")?;
+		let (asset_kind, amount, _, beneficiary_lookup) = create_spend_arguments::<T, _>(SEED);
+		T::BalanceConverter::ensure_successful(asset_kind.clone());
+		Treasury::<T, _>::spend(
+			origin,
+			Box::new(asset_kind.clone()),
+			amount,
+			Box::new(beneficiary_lookup),
+			None,
+		)?;
+		assert!(Spends::<T, I>::get(0).is_some());
+		let origin =
+			T::RejectOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?;
+
+		#[extrinsic_call]
+		_(origin as T::RuntimeOrigin, 0u32);
+
+		assert!(Spends::<T, I>::get(0).is_none());
+		Ok(())
 	}
 
 	impl_benchmark_test_suite!(Treasury, crate::tests::new_test_ext(), crate::tests::Test);
diff --git a/substrate/frame/treasury/src/lib.rs b/substrate/frame/treasury/src/lib.rs
index 730fae2a4e92c..b2b3a8801c156 100644
--- a/substrate/frame/treasury/src/lib.rs
+++ b/substrate/frame/treasury/src/lib.rs
@@ -15,46 +15,60 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
+//! > Made with *Substrate*, for *Polkadot*.
+//!
+//! [![github]](https://github.com/paritytech/substrate/frame/fast-unstake) -
+//! [![polkadot]](https://polkadot.network)
+//!
+//! [polkadot]: https://img.shields.io/badge/polkadot-E6007A?style=for-the-badge&logo=polkadot&logoColor=white
+//! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github
+//!
 //! # Treasury Pallet
 //!
 //! The Treasury pallet provides a "pot" of funds that can be managed by stakeholders in the system
 //! and a structure for making spending proposals from this pot.
 //!
-//! - [`Config`]
-//! - [`Call`]
-//!
 //! ## Overview
 //!
 //! The Treasury Pallet itself provides the pot to store funds, and a means for stakeholders to
-//! propose, approve, and deny expenditures. The chain will need to provide a method (e.g.
-//! inflation, fees) for collecting funds.
+//! propose and claim expenditures (aka spends). The chain will need to provide a method to approve
+//! spends (e.g. public referendum) and a method for collecting funds (e.g. inflation, fees).
 //!
-//! By way of example, the Council could vote to fund the Treasury with a portion of the block
+//! By way of example, stakeholders could vote to fund the Treasury with a portion of the block
 //! reward and use the funds to pay developers.
 //!
-//!
 //! ### Terminology
 //!
 //! - **Proposal:** A suggestion to allocate funds from the pot to a beneficiary.
 //! - **Beneficiary:** An account who will receive the funds from a proposal iff the proposal is
 //!   approved.
-//! - **Deposit:** Funds that a proposer must lock when making a proposal. The deposit will be
-//!   returned or slashed if the proposal is approved or rejected respectively.
 //! - **Pot:** Unspent funds accumulated by the treasury pallet.
+//! - **Spend** An approved proposal for transferring a specific amount of funds to a designated
+//!   beneficiary.
 //!
-//! ## Interface
+//! ### Example
 //!
-//! ### Dispatchable Functions
+//! 1. Multiple local spends approved by spend origins and received by a beneficiary.
+#![doc = docify::embed!("src/tests.rs", spend_local_origin_works)]
 //!
-//! General spending/proposal protocol:
-//! - `propose_spend` - Make a spending proposal and stake the required deposit.
-//! - `reject_proposal` - Reject a proposal, slashing the deposit.
-//! - `approve_proposal` - Accept the proposal, returning the deposit.
-//! - `remove_approval` - Remove an approval, the deposit will no longer be returned.
+//! 2. Approve a spend of some asset kind and claim it.
+#![doc = docify::embed!("src/tests.rs", spend_payout_works)]
 //!
-//! ## GenesisConfig
+//! ## Pallet API
 //!
-//! The Treasury pallet depends on the [`GenesisConfig`].
+//! See the [`pallet`] module for more information about the interfaces this pallet exposes,
+//! including its configuration trait, dispatchables, storage items, events and errors.
+//!
+//! ## Low Level / Implementation Details
+//!
+//! Spends can be initiated using either the `spend_local` or `spend` dispatchable. The
+//! `spend_local` dispatchable enables the creation of spends using the native currency of the
+//! chain, utilizing the funds stored in the pot. These spends are automatically paid out every
+//! [`pallet::Config::SpendPeriod`]. On the other hand, the `spend` dispatchable allows spending of
+//! any asset kind managed by the treasury, with payment facilitated by a designated
+//! [`pallet::Config::Paymaster`]. To claim these spends, the `payout` dispatchable should be called
+//! within some temporal bounds, starting from the moment they become valid and within one
+//! [`pallet::Config::PayoutPeriod`].
 
 #![cfg_attr(not(feature = "std"), no_std)]
 
@@ -62,6 +76,8 @@ mod benchmarking;
 #[cfg(test)]
 mod tests;
 pub mod weights;
+#[cfg(feature = "runtime-benchmarks")]
+pub use benchmarking::ArgumentsFactory;
 
 use codec::{Decode, Encode, MaxEncodedLen};
 use scale_info::TypeInfo;
@@ -75,7 +91,7 @@ use sp_std::{collections::btree_map::BTreeMap, prelude::*};
 use frame_support::{
 	print,
 	traits::{
-		Currency, ExistenceRequirement::KeepAlive, Get, Imbalance, OnUnbalanced,
+		tokens::Pay, Currency, ExistenceRequirement::KeepAlive, Get, Imbalance, OnUnbalanced,
 		ReservableCurrency, WithdrawReasons,
 	},
 	weights::Weight,
@@ -87,6 +103,7 @@ pub use weights::WeightInfo;
 
 pub type BalanceOf<T, I = ()> =
 	<<T as Config<I>>::Currency as Currency<<T as frame_system::Config>::AccountId>>::Balance;
+pub type AssetBalanceOf<T, I> = <<T as Config<I>>::Paymaster as Pay>::Balance;
 pub type PositiveImbalanceOf<T, I = ()> = <<T as Config<I>>::Currency as Currency<
 	<T as frame_system::Config>::AccountId,
 >>::PositiveImbalance;
@@ -94,6 +111,7 @@ pub type NegativeImbalanceOf<T, I = ()> = <<T as Config<I>>::Currency as Currenc
 	<T as frame_system::Config>::AccountId,
 >>::NegativeImbalance;
 type AccountIdLookupOf<T> = <<T as frame_system::Config>::Lookup as StaticLookup>::Source;
+type BeneficiaryLookupOf<T, I> = <<T as Config<I>>::BeneficiaryLookup as StaticLookup>::Source;
 
 /// A trait to allow the Treasury Pallet to spend it's funds for other purposes.
 /// There is an expectation that the implementer of this trait will correctly manage
@@ -133,10 +151,47 @@ pub struct Proposal<AccountId, Balance> {
 	bond: Balance,
 }
 
+/// The state of the payment claim.
+#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))]
+#[derive(Encode, Decode, Clone, PartialEq, Eq, MaxEncodedLen, RuntimeDebug, TypeInfo)]
+pub enum PaymentState<Id> {
+	/// Pending claim.
+	Pending,
+	/// Payment attempted with a payment identifier.
+	Attempted { id: Id },
+	/// Payment failed.
+	Failed,
+}
+
+/// Info regarding an approved treasury spend.
+#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))]
+#[derive(Encode, Decode, Clone, PartialEq, Eq, MaxEncodedLen, RuntimeDebug, TypeInfo)]
+pub struct SpendStatus<AssetKind, AssetBalance, Beneficiary, BlockNumber, PaymentId> {
+	// The kind of asset to be spent.
+	asset_kind: AssetKind,
+	/// The asset amount of the spend.
+	amount: AssetBalance,
+	/// The beneficiary of the spend.
+	beneficiary: Beneficiary,
+	/// The block number from which the spend can be claimed.
+	valid_from: BlockNumber,
+	/// The block number by which the spend has to be claimed.
+	expire_at: BlockNumber,
+	/// The status of the payout/claim.
+	status: PaymentState<PaymentId>,
+}
+
+/// Index of an approved treasury spend.
+pub type SpendIndex = u32;
+
 #[frame_support::pallet]
 pub mod pallet {
 	use super::*;
-	use frame_support::{dispatch_context::with_context, pallet_prelude::*};
+	use frame_support::{
+		dispatch_context::with_context,
+		pallet_prelude::*,
+		traits::tokens::{ConversionFromAssetBalance, PaymentStatus},
+	};
 	use frame_system::pallet_prelude::*;
 
 	#[pallet::pallet]
@@ -201,9 +256,38 @@ pub mod pallet {
 		type MaxApprovals: Get<u32>;
 
 		/// The origin required for approving spends from the treasury outside of the proposal
-		/// process. The `Success` value is the maximum amount that this origin is allowed to
-		/// spend at a time.
+		/// process. The `Success` value is the maximum amount in a native asset that this origin
+		/// is allowed to spend at a time.
 		type SpendOrigin: EnsureOrigin<Self::RuntimeOrigin, Success = BalanceOf<Self, I>>;
+
+		/// Type parameter representing the asset kinds to be spent from the treasury.
+		type AssetKind: Parameter + MaxEncodedLen;
+
+		/// Type parameter used to identify the beneficiaries eligible to receive treasury spends.
+		type Beneficiary: Parameter + MaxEncodedLen;
+
+		/// Converting trait to take a source type and convert to [`Self::Beneficiary`].
+		type BeneficiaryLookup: StaticLookup<Target = Self::Beneficiary>;
+
+		/// Type for processing spends of [Self::AssetKind] in favor of [`Self::Beneficiary`].
+		type Paymaster: Pay<Beneficiary = Self::Beneficiary, AssetKind = Self::AssetKind>;
+
+		/// Type for converting the balance of an [Self::AssetKind] to the balance of the native
+		/// asset, solely for the purpose of asserting the result against the maximum allowed spend
+		/// amount of the [`Self::SpendOrigin`].
+		type BalanceConverter: ConversionFromAssetBalance<
+			<Self::Paymaster as Pay>::Balance,
+			Self::AssetKind,
+			BalanceOf<Self, I>,
+		>;
+
+		/// The period during which an approved treasury spend has to be claimed.
+		#[pallet::constant]
+		type PayoutPeriod: Get<BlockNumberFor<Self>>;
+
+		/// Helper type for benchmarks.
+		#[cfg(feature = "runtime-benchmarks")]
+		type BenchmarkHelper: ArgumentsFactory<Self::AssetKind, Self::Beneficiary>;
 	}
 
 	/// Number of proposals that have been made.
@@ -233,6 +317,27 @@ pub mod pallet {
 	pub type Approvals<T: Config<I>, I: 'static = ()> =
 		StorageValue<_, BoundedVec<ProposalIndex, T::MaxApprovals>, ValueQuery>;
 
+	/// The count of spends that have been made.
+	#[pallet::storage]
+	pub(crate) type SpendCount<T, I = ()> = StorageValue<_, SpendIndex, ValueQuery>;
+
+	/// Spends that have been approved and being processed.
+	// Hasher: Twox safe since `SpendIndex` is an internal count based index.
+	#[pallet::storage]
+	pub type Spends<T: Config<I>, I: 'static = ()> = StorageMap<
+		_,
+		Twox64Concat,
+		SpendIndex,
+		SpendStatus<
+			T::AssetKind,
+			AssetBalanceOf<T, I>,
+			T::Beneficiary,
+			BlockNumberFor<T>,
+			<T::Paymaster as Pay>::Id,
+		>,
+		OptionQuery,
+	>;
+
 	#[pallet::genesis_config]
 	#[derive(frame_support::DefaultNoBound)]
 	pub struct GenesisConfig<T: Config<I>, I: 'static = ()> {
@@ -277,6 +382,24 @@ pub mod pallet {
 		},
 		/// The inactive funds of the pallet have been updated.
 		UpdatedInactive { reactivated: BalanceOf<T, I>, deactivated: BalanceOf<T, I> },
+		/// A new asset spend proposal has been approved.
+		AssetSpendApproved {
+			index: SpendIndex,
+			asset_kind: T::AssetKind,
+			amount: AssetBalanceOf<T, I>,
+			beneficiary: T::Beneficiary,
+			valid_from: BlockNumberFor<T>,
+			expire_at: BlockNumberFor<T>,
+		},
+		/// An approved spend was voided.
+		AssetSpendVoided { index: SpendIndex },
+		/// A payment happened.
+		Paid { index: SpendIndex, payment_id: <T::Paymaster as Pay>::Id },
+		/// A payment failed and can be retried.
+		PaymentFailed { index: SpendIndex, payment_id: <T::Paymaster as Pay>::Id },
+		/// A spend was processed and removed from the storage. It might have been successfully
+		/// paid or it may have expired.
+		SpendProcessed { index: SpendIndex },
 	}
 
 	/// Error for the treasury pallet.
@@ -284,7 +407,7 @@ pub mod pallet {
 	pub enum Error<T, I = ()> {
 		/// Proposer's balance is too low.
 		InsufficientProposersBalance,
-		/// No proposal or bounty at that index.
+		/// No proposal, bounty or spend at that index.
 		InvalidIndex,
 		/// Too many approvals in the queue.
 		TooManyApprovals,
@@ -293,6 +416,20 @@ pub mod pallet {
 		InsufficientPermission,
 		/// Proposal has not been approved.
 		ProposalNotApproved,
+		/// The balance of the asset kind is not convertible to the balance of the native asset.
+		FailedToConvertBalance,
+		/// The spend has expired and cannot be claimed.
+		SpendExpired,
+		/// The spend is not yet eligible for payout.
+		EarlyPayout,
+		/// The payment has already been attempted.
+		AlreadyAttempted,
+		/// There was some issue with the mechanism of payment.
+		PayoutError,
+		/// The payout was not yet attempted/claimed.
+		NotAttempted,
+		/// The payment has neither failed nor succeeded yet.
+		Inconclusive,
 	}
 
 	#[pallet::hooks]
@@ -328,12 +465,22 @@ pub mod pallet {
 
 	#[pallet::call]
 	impl<T: Config<I>, I: 'static> Pallet<T, I> {
-		/// Put forward a suggestion for spending. A deposit proportional to the value
-		/// is reserved and slashed if the proposal is rejected. It is returned once the
-		/// proposal is awarded.
+		/// Put forward a suggestion for spending.
 		///
-		/// ## Complexity
+		/// ## Dispatch Origin
+		///
+		/// Must be signed.
+		///
+		/// ## Details
+		/// A deposit proportional to the value is reserved and slashed if the proposal is rejected.
+		/// It is returned once the proposal is awarded.
+		///
+		/// ### Complexity
 		/// - O(1)
+		///
+		/// ## Events
+		///
+		/// Emits [`Event::Proposed`] if successful.
 		#[pallet::call_index(0)]
 		#[pallet::weight(T::WeightInfo::propose_spend())]
 		#[allow(deprecated)]
@@ -360,12 +507,21 @@ pub mod pallet {
 			Ok(())
 		}
 
-		/// Reject a proposed spend. The original deposit will be slashed.
+		/// Reject a proposed spend.
 		///
-		/// May only be called from `T::RejectOrigin`.
+		/// ## Dispatch Origin
 		///
-		/// ## Complexity
+		/// Must be [`Config::RejectOrigin`].
+		///
+		/// ## Details
+		/// The original deposit will be slashed.
+		///
+		/// ### Complexity
 		/// - O(1)
+		///
+		/// ## Events
+		///
+		/// Emits [`Event::Rejected`] if successful.
 		#[pallet::call_index(1)]
 		#[pallet::weight((T::WeightInfo::reject_proposal(), DispatchClass::Operational))]
 		#[allow(deprecated)]
@@ -391,13 +547,23 @@ pub mod pallet {
 			Ok(())
 		}
 
-		/// Approve a proposal. At a later time, the proposal will be allocated to the beneficiary
-		/// and the original deposit will be returned.
+		/// Approve a proposal.
 		///
-		/// May only be called from `T::ApproveOrigin`.
+		/// ## Dispatch Origin
 		///
-		/// ## Complexity
+		/// Must be [`Config::ApproveOrigin`].
+		///
+		/// ## Details
+		///
+		/// At a later time, the proposal will be allocated to the beneficiary and the original
+		/// deposit will be returned.
+		///
+		/// ### Complexity
 		///  - O(1).
+		///
+		/// ## Events
+		///
+		/// No events are emitted from this dispatch.
 		#[pallet::call_index(2)]
 		#[pallet::weight((T::WeightInfo::approve_proposal(T::MaxApprovals::get()), DispatchClass::Operational))]
 		#[allow(deprecated)]
@@ -418,15 +584,24 @@ pub mod pallet {
 
 		/// Propose and approve a spend of treasury funds.
 		///
-		/// - `origin`: Must be `SpendOrigin` with the `Success` value being at least `amount`.
-		/// - `amount`: The amount to be transferred from the treasury to the `beneficiary`.
-		/// - `beneficiary`: The destination account for the transfer.
+		/// ## Dispatch Origin
 		///
+		/// Must be [`Config::SpendOrigin`] with the `Success` value being at least `amount`.
+		///
+		/// ### Details
 		/// NOTE: For record-keeping purposes, the proposer is deemed to be equivalent to the
 		/// beneficiary.
+		///
+		/// ### Parameters
+		/// - `amount`: The amount to be transferred from the treasury to the `beneficiary`.
+		/// - `beneficiary`: The destination account for the transfer.
+		///
+		/// ## Events
+		///
+		/// Emits [`Event::SpendApproved`] if successful.
 		#[pallet::call_index(3)]
-		#[pallet::weight(T::WeightInfo::spend())]
-		pub fn spend(
+		#[pallet::weight(T::WeightInfo::spend_local())]
+		pub fn spend_local(
 			origin: OriginFor<T>,
 			#[pallet::compact] amount: BalanceOf<T, I>,
 			beneficiary: AccountIdLookupOf<T>,
@@ -472,18 +647,26 @@ pub mod pallet {
 		}
 
 		/// Force a previously approved proposal to be removed from the approval queue.
+		///
+		/// ## Dispatch Origin
+		///
+		/// Must be [`Config::RejectOrigin`].
+		///
+		/// ## Details
+		///
 		/// The original deposit will no longer be returned.
 		///
-		/// May only be called from `T::RejectOrigin`.
+		/// ### Parameters
 		/// - `proposal_id`: The index of a proposal
 		///
-		/// ## Complexity
+		/// ### Complexity
 		/// - O(A) where `A` is the number of approvals
 		///
-		/// Errors:
-		/// - `ProposalNotApproved`: The `proposal_id` supplied was not found in the approval queue,
-		/// i.e., the proposal has not been approved. This could also mean the proposal does not
-		/// exist altogether, thus there is no way it would have been approved in the first place.
+		/// ### Errors
+		/// - [`Error::ProposalNotApproved`]: The `proposal_id` supplied was not found in the
+		///   approval queue, i.e., the proposal has not been approved. This could also mean the
+		///   proposal does not exist altogether, thus there is no way it would have been approved
+		///   in the first place.
 		#[pallet::call_index(4)]
 		#[pallet::weight((T::WeightInfo::remove_approval(), DispatchClass::Operational))]
 		pub fn remove_approval(
@@ -503,6 +686,229 @@ pub mod pallet {
 
 			Ok(())
 		}
+
+		/// Propose and approve a spend of treasury funds.
+		///
+		/// ## Dispatch Origin
+		///
+		/// Must be [`Config::SpendOrigin`] with the `Success` value being at least
+		/// `amount` of `asset_kind` in the native asset. The amount of `asset_kind` is converted
+		/// for assertion using the [`Config::BalanceConverter`].
+		///
+		/// ## Details
+		///
+		/// Create an approved spend for transferring a specific `amount` of `asset_kind` to a
+		/// designated beneficiary. The spend must be claimed using the `payout` dispatchable within
+		/// the [`Config::PayoutPeriod`].
+		///
+		/// ### Parameters
+		/// - `asset_kind`: An indicator of the specific asset class to be spent.
+		/// - `amount`: The amount to be transferred from the treasury to the `beneficiary`.
+		/// - `beneficiary`: The beneficiary of the spend.
+		/// - `valid_from`: The block number from which the spend can be claimed. It can refer to
+		///   the past if the resulting spend has not yet expired according to the
+		///   [`Config::PayoutPeriod`]. If `None`, the spend can be claimed immediately after
+		///   approval.
+		///
+		/// ## Events
+		///
+		/// Emits [`Event::AssetSpendApproved`] if successful.
+		#[pallet::call_index(5)]
+		#[pallet::weight(T::WeightInfo::spend())]
+		pub fn spend(
+			origin: OriginFor<T>,
+			asset_kind: Box<T::AssetKind>,
+			#[pallet::compact] amount: AssetBalanceOf<T, I>,
+			beneficiary: Box<BeneficiaryLookupOf<T, I>>,
+			valid_from: Option<BlockNumberFor<T>>,
+		) -> DispatchResult {
+			let max_amount = T::SpendOrigin::ensure_origin(origin)?;
+			let beneficiary = T::BeneficiaryLookup::lookup(*beneficiary)?;
+
+			let now = frame_system::Pallet::<T>::block_number();
+			let valid_from = valid_from.unwrap_or(now);
+			let expire_at = valid_from.saturating_add(T::PayoutPeriod::get());
+			ensure!(expire_at > now, Error::<T, I>::SpendExpired);
+
+			let native_amount =
+				T::BalanceConverter::from_asset_balance(amount, *asset_kind.clone())
+					.map_err(|_| Error::<T, I>::FailedToConvertBalance)?;
+
+			ensure!(native_amount <= max_amount, Error::<T, I>::InsufficientPermission);
+
+			with_context::<SpendContext<BalanceOf<T, I>>, _>(|v| {
+				let context = v.or_default();
+				// We group based on `max_amount`, to distinguish between different kind of
+				// origins. (assumes that all origins have different `max_amount`)
+				//
+				// Worst case is that we reject some "valid" request.
+				let spend = context.spend_in_context.entry(max_amount).or_default();
+
+				// Ensure that we don't overflow nor use more than `max_amount`
+				if spend.checked_add(&native_amount).map(|s| s > max_amount).unwrap_or(true) {
+					Err(Error::<T, I>::InsufficientPermission)
+				} else {
+					*spend = spend.saturating_add(native_amount);
+					Ok(())
+				}
+			})
+			.unwrap_or(Ok(()))?;
+
+			let index = SpendCount::<T, I>::get();
+			Spends::<T, I>::insert(
+				index,
+				SpendStatus {
+					asset_kind: *asset_kind.clone(),
+					amount,
+					beneficiary: beneficiary.clone(),
+					valid_from,
+					expire_at,
+					status: PaymentState::Pending,
+				},
+			);
+			SpendCount::<T, I>::put(index + 1);
+
+			Self::deposit_event(Event::AssetSpendApproved {
+				index,
+				asset_kind: *asset_kind,
+				amount,
+				beneficiary,
+				valid_from,
+				expire_at,
+			});
+			Ok(())
+		}
+
+		/// Claim a spend.
+		///
+		/// ## Dispatch Origin
+		///
+		/// Must be signed.
+		///
+		/// ## Details
+		///
+		/// Spends must be claimed within some temporal bounds. A spend may be claimed within one
+		/// [`Config::PayoutPeriod`] from the `valid_from` block.
+		/// In case of a payout failure, the spend status must be updated with the `check_status`
+		/// dispatchable before retrying with the current function.
+		///
+		/// ### Parameters
+		/// - `index`: The spend index.
+		///
+		/// ## Events
+		///
+		/// Emits [`Event::Paid`] if successful.
+		#[pallet::call_index(6)]
+		#[pallet::weight(T::WeightInfo::payout())]
+		pub fn payout(origin: OriginFor<T>, index: SpendIndex) -> DispatchResult {
+			ensure_signed(origin)?;
+			let mut spend = Spends::<T, I>::get(index).ok_or(Error::<T, I>::InvalidIndex)?;
+			let now = frame_system::Pallet::<T>::block_number();
+			ensure!(now >= spend.valid_from, Error::<T, I>::EarlyPayout);
+			ensure!(spend.expire_at > now, Error::<T, I>::SpendExpired);
+			ensure!(
+				matches!(spend.status, PaymentState::Pending | PaymentState::Failed),
+				Error::<T, I>::AlreadyAttempted
+			);
+
+			let id = T::Paymaster::pay(&spend.beneficiary, spend.asset_kind.clone(), spend.amount)
+				.map_err(|_| Error::<T, I>::PayoutError)?;
+
+			spend.status = PaymentState::Attempted { id };
+			Spends::<T, I>::insert(index, spend);
+
+			Self::deposit_event(Event::<T, I>::Paid { index, payment_id: id });
+
+			Ok(())
+		}
+
+		/// Check the status of the spend and remove it from the storage if processed.
+		///
+		/// ## Dispatch Origin
+		///
+		/// Must be signed.
+		///
+		/// ## Details
+		///
+		/// The status check is a prerequisite for retrying a failed payout.
+		/// If a spend has either succeeded or expired, it is removed from the storage by this
+		/// function. In such instances, transaction fees are refunded.
+		///
+		/// ### Parameters
+		/// - `index`: The spend index.
+		///
+		/// ## Events
+		///
+		/// Emits [`Event::PaymentFailed`] if the spend payout has failed.
+		/// Emits [`Event::SpendProcessed`] if the spend payout has succeed.
+		#[pallet::call_index(7)]
+		#[pallet::weight(T::WeightInfo::check_status())]
+		pub fn check_status(origin: OriginFor<T>, index: SpendIndex) -> DispatchResultWithPostInfo {
+			use PaymentState as State;
+			use PaymentStatus as Status;
+
+			ensure_signed(origin)?;
+			let mut spend = Spends::<T, I>::get(index).ok_or(Error::<T, I>::InvalidIndex)?;
+			let now = frame_system::Pallet::<T>::block_number();
+
+			if now > spend.expire_at && !matches!(spend.status, State::Attempted { .. }) {
+				// spend has expired and no further status update is expected.
+				Spends::<T, I>::remove(index);
+				Self::deposit_event(Event::<T, I>::SpendProcessed { index });
+				return Ok(Pays::No.into())
+			}
+
+			let payment_id = match spend.status {
+				State::Attempted { id } => id,
+				_ => return Err(Error::<T, I>::NotAttempted.into()),
+			};
+
+			match T::Paymaster::check_payment(payment_id) {
+				Status::Failure => {
+					spend.status = PaymentState::Failed;
+					Spends::<T, I>::insert(index, spend);
+					Self::deposit_event(Event::<T, I>::PaymentFailed { index, payment_id });
+				},
+				Status::Success | Status::Unknown => {
+					Spends::<T, I>::remove(index);
+					Self::deposit_event(Event::<T, I>::SpendProcessed { index });
+					return Ok(Pays::No.into())
+				},
+				Status::InProgress => return Err(Error::<T, I>::Inconclusive.into()),
+			}
+			return Ok(Pays::Yes.into())
+		}
+
+		/// Void previously approved spend.
+		///
+		/// ## Dispatch Origin
+		///
+		/// Must be [`Config::RejectOrigin`].
+		///
+		/// ## Details
+		///
+		/// A spend void is only possible if the payout has not been attempted yet.
+		///
+		/// ### Parameters
+		/// - `index`: The spend index.
+		///
+		/// ## Events
+		///
+		/// Emits [`Event::AssetSpendVoided`] if successful.
+		#[pallet::call_index(8)]
+		#[pallet::weight(T::WeightInfo::void_spend())]
+		pub fn void_spend(origin: OriginFor<T>, index: SpendIndex) -> DispatchResult {
+			T::RejectOrigin::ensure_origin(origin)?;
+			let spend = Spends::<T, I>::get(index).ok_or(Error::<T, I>::InvalidIndex)?;
+			ensure!(
+				matches!(spend.status, PaymentState::Pending | PaymentState::Failed),
+				Error::<T, I>::AlreadyAttempted
+			);
+
+			Spends::<T, I>::remove(index);
+			Self::deposit_event(Event::<T, I>::AssetSpendVoided { index });
+			Ok(())
+		}
 	}
 }
 
diff --git a/substrate/frame/treasury/src/tests.rs b/substrate/frame/treasury/src/tests.rs
index ba45d5f6ff16f..4bb00547d9f28 100644
--- a/substrate/frame/treasury/src/tests.rs
+++ b/substrate/frame/treasury/src/tests.rs
@@ -19,6 +19,7 @@
 
 #![cfg(test)]
 
+use core::{cell::RefCell, marker::PhantomData};
 use sp_core::H256;
 use sp_runtime::{
 	traits::{BadOrigin, BlakeTwo256, Dispatchable, IdentityLookup},
@@ -26,8 +27,13 @@ use sp_runtime::{
 };
 
 use frame_support::{
-	assert_err_ignore_postinfo, assert_noop, assert_ok, parameter_types,
-	traits::{ConstU32, ConstU64, OnInitialize},
+	assert_err_ignore_postinfo, assert_noop, assert_ok,
+	pallet_prelude::Pays,
+	parameter_types,
+	traits::{
+		tokens::{ConversionFromAssetBalance, PaymentStatus},
+		ConstU32, ConstU64, OnInitialize,
+	},
 	PalletId,
 };
 
@@ -96,10 +102,64 @@ impl pallet_utility::Config for Test {
 	type WeightInfo = ();
 }
 
+thread_local! {
+	pub static PAID: RefCell<BTreeMap<(u128, u32), u64>> = RefCell::new(BTreeMap::new());
+	pub static STATUS: RefCell<BTreeMap<u64, PaymentStatus>> = RefCell::new(BTreeMap::new());
+	pub static LAST_ID: RefCell<u64> = RefCell::new(0u64);
+}
+
+/// paid balance for a given account and asset ids
+fn paid(who: u128, asset_id: u32) -> u64 {
+	PAID.with(|p| p.borrow().get(&(who, asset_id)).cloned().unwrap_or(0))
+}
+
+/// reduce paid balance for a given account and asset ids
+fn unpay(who: u128, asset_id: u32, amount: u64) {
+	PAID.with(|p| p.borrow_mut().entry((who, asset_id)).or_default().saturating_reduce(amount))
+}
+
+/// set status for a given payment id
+fn set_status(id: u64, s: PaymentStatus) {
+	STATUS.with(|m| m.borrow_mut().insert(id, s));
+}
+
+pub struct TestPay;
+impl Pay for TestPay {
+	type Beneficiary = u128;
+	type Balance = u64;
+	type Id = u64;
+	type AssetKind = u32;
+	type Error = ();
+
+	fn pay(
+		who: &Self::Beneficiary,
+		asset_kind: Self::AssetKind,
+		amount: Self::Balance,
+	) -> Result<Self::Id, Self::Error> {
+		PAID.with(|paid| *paid.borrow_mut().entry((*who, asset_kind)).or_default() += amount);
+		Ok(LAST_ID.with(|lid| {
+			let x = *lid.borrow();
+			lid.replace(x + 1);
+			x
+		}))
+	}
+	fn check_payment(id: Self::Id) -> PaymentStatus {
+		STATUS.with(|s| s.borrow().get(&id).cloned().unwrap_or(PaymentStatus::Unknown))
+	}
+	#[cfg(feature = "runtime-benchmarks")]
+	fn ensure_successful(_: &Self::Beneficiary, _: Self::AssetKind, _: Self::Balance) {}
+	#[cfg(feature = "runtime-benchmarks")]
+	fn ensure_concluded(id: Self::Id) {
+		set_status(id, PaymentStatus::Failure)
+	}
+}
+
 parameter_types! {
 	pub const ProposalBond: Permill = Permill::from_percent(5);
 	pub const Burn: Permill = Permill::from_percent(50);
 	pub const TreasuryPalletId: PalletId = PalletId(*b"py/trsry");
+	pub TreasuryAccount: u128 = Treasury::account_id();
+	pub const SpendPayoutPeriod: u64 = 5;
 }
 pub struct TestSpendOrigin;
 impl frame_support::traits::EnsureOrigin<RuntimeOrigin> for TestSpendOrigin {
@@ -120,6 +180,16 @@ impl frame_support::traits::EnsureOrigin<RuntimeOrigin> for TestSpendOrigin {
 	}
 }
 
+pub struct MulBy<N>(PhantomData<N>);
+impl<N: Get<u64>> ConversionFromAssetBalance<u64, u32, u64> for MulBy<N> {
+	type Error = ();
+	fn from_asset_balance(balance: u64, _asset_id: u32) -> Result<u64, Self::Error> {
+		return balance.checked_mul(N::get()).ok_or(())
+	}
+	#[cfg(feature = "runtime-benchmarks")]
+	fn ensure_successful(_: u32) {}
+}
+
 impl Config for Test {
 	type PalletId = TreasuryPalletId;
 	type Currency = pallet_balances::Pallet<Test>;
@@ -137,6 +207,14 @@ impl Config for Test {
 	type SpendFunds = ();
 	type MaxApprovals = ConstU32<100>;
 	type SpendOrigin = TestSpendOrigin;
+	type AssetKind = u32;
+	type Beneficiary = u128;
+	type BeneficiaryLookup = IdentityLookup<Self::Beneficiary>;
+	type Paymaster = TestPay;
+	type BalanceConverter = MulBy<ConstU64<2>>;
+	type PayoutPeriod = SpendPayoutPeriod;
+	#[cfg(feature = "runtime-benchmarks")]
+	type BenchmarkHelper = ();
 }
 
 pub fn new_test_ext() -> sp_io::TestExternalities {
@@ -151,6 +229,14 @@ pub fn new_test_ext() -> sp_io::TestExternalities {
 	t.into()
 }
 
+fn get_payment_id(i: SpendIndex) -> Option<u64> {
+	let spend = Spends::<Test, _>::get(i).expect("no spend");
+	match spend.status {
+		PaymentState::Attempted { id } => Some(id),
+		_ => None,
+	}
+}
+
 #[test]
 fn genesis_config_works() {
 	new_test_ext().execute_with(|| {
@@ -160,46 +246,49 @@ fn genesis_config_works() {
 }
 
 #[test]
-fn spend_origin_permissioning_works() {
+fn spend_local_origin_permissioning_works() {
 	new_test_ext().execute_with(|| {
-		assert_noop!(Treasury::spend(RuntimeOrigin::signed(1), 1, 1), BadOrigin);
+		assert_noop!(Treasury::spend_local(RuntimeOrigin::signed(1), 1, 1), BadOrigin);
 		assert_noop!(
-			Treasury::spend(RuntimeOrigin::signed(10), 6, 1),
+			Treasury::spend_local(RuntimeOrigin::signed(10), 6, 1),
 			Error::<Test>::InsufficientPermission
 		);
 		assert_noop!(
-			Treasury::spend(RuntimeOrigin::signed(11), 11, 1),
+			Treasury::spend_local(RuntimeOrigin::signed(11), 11, 1),
 			Error::<Test>::InsufficientPermission
 		);
 		assert_noop!(
-			Treasury::spend(RuntimeOrigin::signed(12), 21, 1),
+			Treasury::spend_local(RuntimeOrigin::signed(12), 21, 1),
 			Error::<Test>::InsufficientPermission
 		);
 		assert_noop!(
-			Treasury::spend(RuntimeOrigin::signed(13), 51, 1),
+			Treasury::spend_local(RuntimeOrigin::signed(13), 51, 1),
 			Error::<Test>::InsufficientPermission
 		);
 	});
 }
 
+#[docify::export]
 #[test]
-fn spend_origin_works() {
+fn spend_local_origin_works() {
 	new_test_ext().execute_with(|| {
 		// Check that accumulate works when we have Some value in Dummy already.
 		Balances::make_free_balance_be(&Treasury::account_id(), 101);
-		assert_ok!(Treasury::spend(RuntimeOrigin::signed(10), 5, 6));
-		assert_ok!(Treasury::spend(RuntimeOrigin::signed(10), 5, 6));
-		assert_ok!(Treasury::spend(RuntimeOrigin::signed(10), 5, 6));
-		assert_ok!(Treasury::spend(RuntimeOrigin::signed(10), 5, 6));
-		assert_ok!(Treasury::spend(RuntimeOrigin::signed(11), 10, 6));
-		assert_ok!(Treasury::spend(RuntimeOrigin::signed(12), 20, 6));
-		assert_ok!(Treasury::spend(RuntimeOrigin::signed(13), 50, 6));
-
+		// approve spend of some amount to beneficiary `6`.
+		assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(10), 5, 6));
+		assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(10), 5, 6));
+		assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(10), 5, 6));
+		assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(10), 5, 6));
+		assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(11), 10, 6));
+		assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(12), 20, 6));
+		assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(13), 50, 6));
+		// free balance of `6` is zero, spend period has not passed.
 		<Treasury as OnInitialize<u64>>::on_initialize(1);
 		assert_eq!(Balances::free_balance(6), 0);
-
+		// free balance of `6` is `100`, spend period has passed.
 		<Treasury as OnInitialize<u64>>::on_initialize(2);
 		assert_eq!(Balances::free_balance(6), 100);
+		// `100` spent, `1` burned.
 		assert_eq!(Treasury::pot(), 0);
 	});
 }
@@ -578,14 +667,49 @@ fn remove_already_removed_approval_fails() {
 	});
 }
 
+#[test]
+fn spending_local_in_batch_respects_max_total() {
+	new_test_ext().execute_with(|| {
+		// Respect the `max_total` for the given origin.
+		assert_ok!(RuntimeCall::from(UtilityCall::batch_all {
+			calls: vec![
+				RuntimeCall::from(TreasuryCall::spend_local { amount: 2, beneficiary: 100 }),
+				RuntimeCall::from(TreasuryCall::spend_local { amount: 2, beneficiary: 101 })
+			]
+		})
+		.dispatch(RuntimeOrigin::signed(10)));
+
+		assert_err_ignore_postinfo!(
+			RuntimeCall::from(UtilityCall::batch_all {
+				calls: vec![
+					RuntimeCall::from(TreasuryCall::spend_local { amount: 2, beneficiary: 100 }),
+					RuntimeCall::from(TreasuryCall::spend_local { amount: 4, beneficiary: 101 })
+				]
+			})
+			.dispatch(RuntimeOrigin::signed(10)),
+			Error::<Test, _>::InsufficientPermission
+		);
+	})
+}
+
 #[test]
 fn spending_in_batch_respects_max_total() {
 	new_test_ext().execute_with(|| {
 		// Respect the `max_total` for the given origin.
 		assert_ok!(RuntimeCall::from(UtilityCall::batch_all {
 			calls: vec![
-				RuntimeCall::from(TreasuryCall::spend { amount: 2, beneficiary: 100 }),
-				RuntimeCall::from(TreasuryCall::spend { amount: 2, beneficiary: 101 })
+				RuntimeCall::from(TreasuryCall::spend {
+					asset_kind: Box::new(1),
+					amount: 1,
+					beneficiary: Box::new(100),
+					valid_from: None,
+				}),
+				RuntimeCall::from(TreasuryCall::spend {
+					asset_kind: Box::new(1),
+					amount: 1,
+					beneficiary: Box::new(101),
+					valid_from: None,
+				})
 			]
 		})
 		.dispatch(RuntimeOrigin::signed(10)));
@@ -593,8 +717,18 @@ fn spending_in_batch_respects_max_total() {
 		assert_err_ignore_postinfo!(
 			RuntimeCall::from(UtilityCall::batch_all {
 				calls: vec![
-					RuntimeCall::from(TreasuryCall::spend { amount: 2, beneficiary: 100 }),
-					RuntimeCall::from(TreasuryCall::spend { amount: 4, beneficiary: 101 })
+					RuntimeCall::from(TreasuryCall::spend {
+						asset_kind: Box::new(1),
+						amount: 2,
+						beneficiary: Box::new(100),
+						valid_from: None,
+					}),
+					RuntimeCall::from(TreasuryCall::spend {
+						asset_kind: Box::new(1),
+						amount: 2,
+						beneficiary: Box::new(101),
+						valid_from: None,
+					})
 				]
 			})
 			.dispatch(RuntimeOrigin::signed(10)),
@@ -602,3 +736,251 @@ fn spending_in_batch_respects_max_total() {
 		);
 	})
 }
+
+#[test]
+fn spend_origin_works() {
+	new_test_ext().execute_with(|| {
+		assert_ok!(Treasury::spend(RuntimeOrigin::signed(10), Box::new(1), 1, Box::new(6), None));
+		assert_ok!(Treasury::spend(RuntimeOrigin::signed(10), Box::new(1), 2, Box::new(6), None));
+		assert_noop!(
+			Treasury::spend(RuntimeOrigin::signed(10), Box::new(1), 3, Box::new(6), None),
+			Error::<Test, _>::InsufficientPermission
+		);
+		assert_ok!(Treasury::spend(RuntimeOrigin::signed(11), Box::new(1), 5, Box::new(6), None));
+		assert_noop!(
+			Treasury::spend(RuntimeOrigin::signed(11), Box::new(1), 6, Box::new(6), None),
+			Error::<Test, _>::InsufficientPermission
+		);
+		assert_ok!(Treasury::spend(RuntimeOrigin::signed(12), Box::new(1), 10, Box::new(6), None));
+		assert_noop!(
+			Treasury::spend(RuntimeOrigin::signed(12), Box::new(1), 11, Box::new(6), None),
+			Error::<Test, _>::InsufficientPermission
+		);
+
+		assert_eq!(SpendCount::<Test, _>::get(), 4);
+		assert_eq!(Spends::<Test, _>::iter().count(), 4);
+	});
+}
+
+#[test]
+fn spend_works() {
+	new_test_ext().execute_with(|| {
+		System::set_block_number(1);
+		assert_ok!(Treasury::spend(RuntimeOrigin::signed(10), Box::new(1), 2, Box::new(6), None));
+
+		assert_eq!(SpendCount::<Test, _>::get(), 1);
+		assert_eq!(
+			Spends::<Test, _>::get(0).unwrap(),
+			SpendStatus {
+				asset_kind: 1,
+				amount: 2,
+				beneficiary: 6,
+				valid_from: 1,
+				expire_at: 6,
+				status: PaymentState::Pending,
+			}
+		);
+		System::assert_last_event(
+			Event::<Test, _>::AssetSpendApproved {
+				index: 0,
+				asset_kind: 1,
+				amount: 2,
+				beneficiary: 6,
+				valid_from: 1,
+				expire_at: 6,
+			}
+			.into(),
+		);
+	});
+}
+
+#[test]
+fn spend_expires() {
+	new_test_ext().execute_with(|| {
+		assert_eq!(<Test as Config>::PayoutPeriod::get(), 5);
+
+		// spend `0` expires in 5 blocks after the creating.
+		System::set_block_number(1);
+		assert_ok!(Treasury::spend(RuntimeOrigin::signed(10), Box::new(1), 2, Box::new(6), None));
+		System::set_block_number(6);
+		assert_noop!(Treasury::payout(RuntimeOrigin::signed(1), 0), Error::<Test, _>::SpendExpired);
+
+		// spend cannot be approved since its already expired.
+		assert_noop!(
+			Treasury::spend(RuntimeOrigin::signed(10), Box::new(1), 2, Box::new(6), Some(0)),
+			Error::<Test, _>::SpendExpired
+		);
+	});
+}
+
+#[docify::export]
+#[test]
+fn spend_payout_works() {
+	new_test_ext().execute_with(|| {
+		System::set_block_number(1);
+		// approve a `2` coins spend of asset `1` to beneficiary `6`, the spend valid from now.
+		assert_ok!(Treasury::spend(RuntimeOrigin::signed(10), Box::new(1), 2, Box::new(6), None));
+		// payout the spend.
+		assert_ok!(Treasury::payout(RuntimeOrigin::signed(1), 0));
+		// beneficiary received `2` coins of asset `1`.
+		assert_eq!(paid(6, 1), 2);
+		assert_eq!(SpendCount::<Test, _>::get(), 1);
+		let payment_id = get_payment_id(0).expect("no payment attempt");
+		System::assert_last_event(Event::<Test, _>::Paid { index: 0, payment_id }.into());
+		set_status(payment_id, PaymentStatus::Success);
+		// the payment succeed.
+		assert_ok!(Treasury::check_status(RuntimeOrigin::signed(1), 0));
+		System::assert_last_event(Event::<Test, _>::SpendProcessed { index: 0 }.into());
+		// cannot payout the same spend twice.
+		assert_noop!(Treasury::payout(RuntimeOrigin::signed(1), 0), Error::<Test, _>::InvalidIndex);
+	});
+}
+
+#[test]
+fn payout_retry_works() {
+	new_test_ext().execute_with(|| {
+		System::set_block_number(1);
+		assert_ok!(Treasury::spend(RuntimeOrigin::signed(10), Box::new(1), 2, Box::new(6), None));
+		assert_ok!(Treasury::payout(RuntimeOrigin::signed(1), 0));
+		assert_eq!(paid(6, 1), 2);
+		let payment_id = get_payment_id(0).expect("no payment attempt");
+		// spend payment is failed
+		set_status(payment_id, PaymentStatus::Failure);
+		unpay(6, 1, 2);
+		// cannot payout a spend in the attempted state
+		assert_noop!(
+			Treasury::payout(RuntimeOrigin::signed(1), 0),
+			Error::<Test, _>::AlreadyAttempted
+		);
+		// check status and update it to retry the payout again
+		assert_ok!(Treasury::check_status(RuntimeOrigin::signed(1), 0));
+		System::assert_last_event(Event::<Test, _>::PaymentFailed { index: 0, payment_id }.into());
+		// the payout can be retried now
+		assert_ok!(Treasury::payout(RuntimeOrigin::signed(1), 0));
+		assert_eq!(paid(6, 1), 2);
+	});
+}
+
+#[test]
+fn spend_valid_from_works() {
+	new_test_ext().execute_with(|| {
+		assert_eq!(<Test as Config>::PayoutPeriod::get(), 5);
+		System::set_block_number(1);
+
+		// spend valid from block `2`.
+		assert_ok!(Treasury::spend(
+			RuntimeOrigin::signed(10),
+			Box::new(1),
+			2,
+			Box::new(6),
+			Some(2)
+		));
+		assert_noop!(Treasury::payout(RuntimeOrigin::signed(1), 0), Error::<Test, _>::EarlyPayout);
+		System::set_block_number(2);
+		assert_ok!(Treasury::payout(RuntimeOrigin::signed(1), 0));
+
+		System::set_block_number(5);
+		// spend approved even if `valid_from` in the past since the payout period has not passed.
+		assert_ok!(Treasury::spend(
+			RuntimeOrigin::signed(10),
+			Box::new(1),
+			2,
+			Box::new(6),
+			Some(4)
+		));
+		// spend paid.
+		assert_ok!(Treasury::payout(RuntimeOrigin::signed(1), 1));
+	});
+}
+
+#[test]
+fn void_spend_works() {
+	new_test_ext().execute_with(|| {
+		System::set_block_number(1);
+		// spend cannot be voided if already attempted.
+		assert_ok!(Treasury::spend(
+			RuntimeOrigin::signed(10),
+			Box::new(1),
+			2,
+			Box::new(6),
+			Some(1)
+		));
+		assert_ok!(Treasury::payout(RuntimeOrigin::signed(1), 0));
+		assert_noop!(
+			Treasury::void_spend(RuntimeOrigin::root(), 0),
+			Error::<Test, _>::AlreadyAttempted
+		);
+
+		// void spend.
+		assert_ok!(Treasury::spend(
+			RuntimeOrigin::signed(10),
+			Box::new(1),
+			2,
+			Box::new(6),
+			Some(10)
+		));
+		assert_ok!(Treasury::void_spend(RuntimeOrigin::root(), 1));
+		assert_eq!(Spends::<Test, _>::get(1), None);
+	});
+}
+
+#[test]
+fn check_status_works() {
+	new_test_ext().execute_with(|| {
+		assert_eq!(<Test as Config>::PayoutPeriod::get(), 5);
+		System::set_block_number(1);
+
+		// spend `0` expired and can be removed.
+		assert_ok!(Treasury::spend(RuntimeOrigin::signed(10), Box::new(1), 2, Box::new(6), None));
+		System::set_block_number(7);
+		let info = Treasury::check_status(RuntimeOrigin::signed(1), 0).unwrap();
+		assert_eq!(info.pays_fee, Pays::No);
+		System::assert_last_event(Event::<Test, _>::SpendProcessed { index: 0 }.into());
+
+		// spend `1` payment failed and expired hence can be removed.
+		assert_ok!(Treasury::spend(RuntimeOrigin::signed(10), Box::new(1), 2, Box::new(6), None));
+		assert_noop!(
+			Treasury::check_status(RuntimeOrigin::signed(1), 1),
+			Error::<Test, _>::NotAttempted
+		);
+		assert_ok!(Treasury::payout(RuntimeOrigin::signed(1), 1));
+		let payment_id = get_payment_id(1).expect("no payment attempt");
+		set_status(payment_id, PaymentStatus::Failure);
+		// spend expired.
+		System::set_block_number(13);
+		let info = Treasury::check_status(RuntimeOrigin::signed(1), 1).unwrap();
+		assert_eq!(info.pays_fee, Pays::Yes);
+		System::assert_last_event(Event::<Test, _>::PaymentFailed { index: 1, payment_id }.into());
+		let info = Treasury::check_status(RuntimeOrigin::signed(1), 1).unwrap();
+		assert_eq!(info.pays_fee, Pays::No);
+		System::assert_last_event(Event::<Test, _>::SpendProcessed { index: 1 }.into());
+
+		// spend `2` payment succeed.
+		assert_ok!(Treasury::spend(RuntimeOrigin::signed(10), Box::new(1), 2, Box::new(6), None));
+		assert_ok!(Treasury::payout(RuntimeOrigin::signed(1), 2));
+		let payment_id = get_payment_id(2).expect("no payment attempt");
+		set_status(payment_id, PaymentStatus::Success);
+		let info = Treasury::check_status(RuntimeOrigin::signed(1), 2).unwrap();
+		assert_eq!(info.pays_fee, Pays::No);
+		System::assert_last_event(Event::<Test, _>::SpendProcessed { index: 2 }.into());
+
+		// spend `3` payment in process.
+		assert_ok!(Treasury::spend(RuntimeOrigin::signed(10), Box::new(1), 2, Box::new(6), None));
+		assert_ok!(Treasury::payout(RuntimeOrigin::signed(1), 3));
+		let payment_id = get_payment_id(3).expect("no payment attempt");
+		set_status(payment_id, PaymentStatus::InProgress);
+		assert_noop!(
+			Treasury::check_status(RuntimeOrigin::signed(1), 3),
+			Error::<Test, _>::Inconclusive
+		);
+
+		// spend `4` removed since the payment status is unknown.
+		assert_ok!(Treasury::spend(RuntimeOrigin::signed(10), Box::new(1), 2, Box::new(6), None));
+		assert_ok!(Treasury::payout(RuntimeOrigin::signed(1), 4));
+		let payment_id = get_payment_id(4).expect("no payment attempt");
+		set_status(payment_id, PaymentStatus::Unknown);
+		let info = Treasury::check_status(RuntimeOrigin::signed(1), 4).unwrap();
+		assert_eq!(info.pays_fee, Pays::No);
+		System::assert_last_event(Event::<Test, _>::SpendProcessed { index: 4 }.into());
+	});
+}
diff --git a/substrate/frame/treasury/src/weights.rs b/substrate/frame/treasury/src/weights.rs
index 8f1418f76d969..030e18980eb54 100644
--- a/substrate/frame/treasury/src/weights.rs
+++ b/substrate/frame/treasury/src/weights.rs
@@ -18,28 +18,23 @@
 //! Autogenerated weights for pallet_treasury
 //!
 //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
-//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! DATE: 2023-07-07, STEPS: `20`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
-//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024
+//! HOSTNAME: `cob`, CPU: `<UNKNOWN>`
+//! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024
 
 // Executed Command:
-// ./target/production/substrate
+// ./target/debug/substrate
 // benchmark
 // pallet
 // --chain=dev
-// --steps=50
-// --repeat=20
-// --pallet=pallet_treasury
-// --no-storage-info
-// --no-median-slopes
-// --no-min-squares
+// --steps=20
+// --repeat=2
+// --pallet=pallet-treasury
 // --extrinsic=*
-// --execution=wasm
 // --wasm-execution=compiled
 // --heap-pages=4096
-// --output=./frame/treasury/src/weights.rs
-// --header=./HEADER-APACHE2
+// --output=./frame/treasury/src/._weights.rs
 // --template=./.maintain/frame-weight-template.hbs
 
 #![cfg_attr(rustfmt, rustfmt_skip)]
@@ -52,12 +47,16 @@ use core::marker::PhantomData;
 
 /// Weight functions needed for pallet_treasury.
 pub trait WeightInfo {
-	fn spend() -> Weight;
+	fn spend_local() -> Weight;
 	fn propose_spend() -> Weight;
 	fn reject_proposal() -> Weight;
 	fn approve_proposal(p: u32, ) -> Weight;
 	fn remove_approval() -> Weight;
 	fn on_initialize_proposals(p: u32, ) -> Weight;
+	fn spend() -> Weight;
+	fn payout() -> Weight;
+	fn check_status() -> Weight;
+	fn void_spend() -> Weight;
 }
 
 /// Weights for pallet_treasury using the Substrate node and recommended hardware.
@@ -69,12 +68,12 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 	/// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen)
 	/// Storage: Treasury Proposals (r:0 w:1)
 	/// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen)
-	fn spend() -> Weight {
+	fn spend_local() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `76`
 		//  Estimated: `1887`
-		// Minimum execution time: 15_057_000 picoseconds.
-		Weight::from_parts(15_803_000, 1887)
+		// Minimum execution time: 179_000_000 picoseconds.
+		Weight::from_parts(190_000_000, 1887)
 			.saturating_add(T::DbWeight::get().reads(2_u64))
 			.saturating_add(T::DbWeight::get().writes(3_u64))
 	}
@@ -86,8 +85,8 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `177`
 		//  Estimated: `1489`
-		// Minimum execution time: 28_923_000 picoseconds.
-		Weight::from_parts(29_495_000, 1489)
+		// Minimum execution time: 349_000_000 picoseconds.
+		Weight::from_parts(398_000_000, 1489)
 			.saturating_add(T::DbWeight::get().reads(1_u64))
 			.saturating_add(T::DbWeight::get().writes(2_u64))
 	}
@@ -99,8 +98,8 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `335`
 		//  Estimated: `3593`
-		// Minimum execution time: 30_539_000 picoseconds.
-		Weight::from_parts(30_986_000, 3593)
+		// Minimum execution time: 367_000_000 picoseconds.
+		Weight::from_parts(388_000_000, 3593)
 			.saturating_add(T::DbWeight::get().reads(2_u64))
 			.saturating_add(T::DbWeight::get().writes(2_u64))
 	}
@@ -111,12 +110,12 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 	/// The range of component `p` is `[0, 99]`.
 	fn approve_proposal(p: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `504 + p * (8 ±0)`
+		//  Measured:  `483 + p * (9 ±0)`
 		//  Estimated: `3573`
-		// Minimum execution time: 9_320_000 picoseconds.
-		Weight::from_parts(12_606_599, 3573)
-			// Standard Error: 1_302
-			.saturating_add(Weight::from_parts(71_054, 0).saturating_mul(p.into()))
+		// Minimum execution time: 111_000_000 picoseconds.
+		Weight::from_parts(108_813_243, 3573)
+			// Standard Error: 147_887
+			.saturating_add(Weight::from_parts(683_216, 0).saturating_mul(p.into()))
 			.saturating_add(T::DbWeight::get().reads(2_u64))
 			.saturating_add(T::DbWeight::get().writes(1_u64))
 	}
@@ -126,8 +125,8 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `161`
 		//  Estimated: `1887`
-		// Minimum execution time: 7_231_000 picoseconds.
-		Weight::from_parts(7_459_000, 1887)
+		// Minimum execution time: 71_000_000 picoseconds.
+		Weight::from_parts(78_000_000, 1887)
 			.saturating_add(T::DbWeight::get().reads(1_u64))
 			.saturating_add(T::DbWeight::get().writes(1_u64))
 	}
@@ -135,27 +134,81 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 	/// Proof: Treasury Deactivated (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen)
 	/// Storage: Treasury Approvals (r:1 w:1)
 	/// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen)
-	/// Storage: Treasury Proposals (r:100 w:100)
+	/// Storage: Treasury Proposals (r:99 w:99)
 	/// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen)
-	/// Storage: System Account (r:200 w:200)
+	/// Storage: System Account (r:198 w:198)
 	/// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen)
 	/// Storage: Bounties BountyApprovals (r:1 w:1)
 	/// Proof: Bounties BountyApprovals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen)
-	/// The range of component `p` is `[0, 100]`.
+	/// The range of component `p` is `[0, 99]`.
 	fn on_initialize_proposals(p: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `421 + p * (251 ±0)`
+		//  Measured:  `427 + p * (251 ±0)`
 		//  Estimated: `1887 + p * (5206 ±0)`
-		// Minimum execution time: 44_769_000 picoseconds.
-		Weight::from_parts(57_915_572, 1887)
-			// Standard Error: 59_484
-			.saturating_add(Weight::from_parts(42_343_732, 0).saturating_mul(p.into()))
+		// Minimum execution time: 614_000_000 picoseconds.
+		Weight::from_parts(498_501_558, 1887)
+			// Standard Error: 1_070_260
+			.saturating_add(Weight::from_parts(599_011_690, 0).saturating_mul(p.into()))
 			.saturating_add(T::DbWeight::get().reads(3_u64))
 			.saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(p.into())))
 			.saturating_add(T::DbWeight::get().writes(3_u64))
 			.saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(p.into())))
 			.saturating_add(Weight::from_parts(0, 5206).saturating_mul(p.into()))
 	}
+	/// Storage: AssetRate ConversionRateToNative (r:1 w:0)
+	/// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(36), added: 2511, mode: MaxEncodedLen)
+	/// Storage: Treasury SpendCount (r:1 w:1)
+	/// Proof: Treasury SpendCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen)
+	/// Storage: Treasury Spends (r:0 w:1)
+	/// Proof: Treasury Spends (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen)
+	fn spend() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `140`
+		//  Estimated: `3501`
+		// Minimum execution time: 214_000_000 picoseconds.
+		Weight::from_parts(216_000_000, 3501)
+			.saturating_add(T::DbWeight::get().reads(2_u64))
+			.saturating_add(T::DbWeight::get().writes(2_u64))
+	}
+	/// Storage: Treasury Spends (r:1 w:1)
+	/// Proof: Treasury Spends (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen)
+	/// Storage: Assets Asset (r:1 w:1)
+	/// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen)
+	/// Storage: Assets Account (r:2 w:2)
+	/// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen)
+	/// Storage: System Account (r:1 w:1)
+	/// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen)
+	fn payout() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `705`
+		//  Estimated: `6208`
+		// Minimum execution time: 760_000_000 picoseconds.
+		Weight::from_parts(822_000_000, 6208)
+			.saturating_add(T::DbWeight::get().reads(5_u64))
+			.saturating_add(T::DbWeight::get().writes(5_u64))
+	}
+	/// Storage: Treasury Spends (r:1 w:1)
+	/// Proof: Treasury Spends (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen)
+	fn check_status() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `194`
+		//  Estimated: `3534`
+		// Minimum execution time: 153_000_000 picoseconds.
+		Weight::from_parts(160_000_000, 3534)
+			.saturating_add(T::DbWeight::get().reads(1_u64))
+			.saturating_add(T::DbWeight::get().writes(1_u64))
+	}
+	/// Storage: Treasury Spends (r:1 w:1)
+	/// Proof: Treasury Spends (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen)
+	fn void_spend() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `194`
+		//  Estimated: `3534`
+		// Minimum execution time: 147_000_000 picoseconds.
+		Weight::from_parts(181_000_000, 3534)
+			.saturating_add(T::DbWeight::get().reads(1_u64))
+			.saturating_add(T::DbWeight::get().writes(1_u64))
+	}
 }
 
 // For backwards compatibility and tests
@@ -166,12 +219,12 @@ impl WeightInfo for () {
 	/// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen)
 	/// Storage: Treasury Proposals (r:0 w:1)
 	/// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen)
-	fn spend() -> Weight {
+	fn spend_local() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `76`
 		//  Estimated: `1887`
-		// Minimum execution time: 15_057_000 picoseconds.
-		Weight::from_parts(15_803_000, 1887)
+		// Minimum execution time: 179_000_000 picoseconds.
+		Weight::from_parts(190_000_000, 1887)
 			.saturating_add(RocksDbWeight::get().reads(2_u64))
 			.saturating_add(RocksDbWeight::get().writes(3_u64))
 	}
@@ -183,8 +236,8 @@ impl WeightInfo for () {
 		// Proof Size summary in bytes:
 		//  Measured:  `177`
 		//  Estimated: `1489`
-		// Minimum execution time: 28_923_000 picoseconds.
-		Weight::from_parts(29_495_000, 1489)
+		// Minimum execution time: 349_000_000 picoseconds.
+		Weight::from_parts(398_000_000, 1489)
 			.saturating_add(RocksDbWeight::get().reads(1_u64))
 			.saturating_add(RocksDbWeight::get().writes(2_u64))
 	}
@@ -196,8 +249,8 @@ impl WeightInfo for () {
 		// Proof Size summary in bytes:
 		//  Measured:  `335`
 		//  Estimated: `3593`
-		// Minimum execution time: 30_539_000 picoseconds.
-		Weight::from_parts(30_986_000, 3593)
+		// Minimum execution time: 367_000_000 picoseconds.
+		Weight::from_parts(388_000_000, 3593)
 			.saturating_add(RocksDbWeight::get().reads(2_u64))
 			.saturating_add(RocksDbWeight::get().writes(2_u64))
 	}
@@ -208,12 +261,12 @@ impl WeightInfo for () {
 	/// The range of component `p` is `[0, 99]`.
 	fn approve_proposal(p: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `504 + p * (8 ±0)`
+		//  Measured:  `483 + p * (9 ±0)`
 		//  Estimated: `3573`
-		// Minimum execution time: 9_320_000 picoseconds.
-		Weight::from_parts(12_606_599, 3573)
-			// Standard Error: 1_302
-			.saturating_add(Weight::from_parts(71_054, 0).saturating_mul(p.into()))
+		// Minimum execution time: 111_000_000 picoseconds.
+		Weight::from_parts(108_813_243, 3573)
+			// Standard Error: 147_887
+			.saturating_add(Weight::from_parts(683_216, 0).saturating_mul(p.into()))
 			.saturating_add(RocksDbWeight::get().reads(2_u64))
 			.saturating_add(RocksDbWeight::get().writes(1_u64))
 	}
@@ -223,8 +276,8 @@ impl WeightInfo for () {
 		// Proof Size summary in bytes:
 		//  Measured:  `161`
 		//  Estimated: `1887`
-		// Minimum execution time: 7_231_000 picoseconds.
-		Weight::from_parts(7_459_000, 1887)
+		// Minimum execution time: 71_000_000 picoseconds.
+		Weight::from_parts(78_000_000, 1887)
 			.saturating_add(RocksDbWeight::get().reads(1_u64))
 			.saturating_add(RocksDbWeight::get().writes(1_u64))
 	}
@@ -232,25 +285,79 @@ impl WeightInfo for () {
 	/// Proof: Treasury Deactivated (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen)
 	/// Storage: Treasury Approvals (r:1 w:1)
 	/// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen)
-	/// Storage: Treasury Proposals (r:100 w:100)
+	/// Storage: Treasury Proposals (r:99 w:99)
 	/// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen)
-	/// Storage: System Account (r:200 w:200)
+	/// Storage: System Account (r:198 w:198)
 	/// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen)
 	/// Storage: Bounties BountyApprovals (r:1 w:1)
 	/// Proof: Bounties BountyApprovals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen)
-	/// The range of component `p` is `[0, 100]`.
+	/// The range of component `p` is `[0, 99]`.
 	fn on_initialize_proposals(p: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `421 + p * (251 ±0)`
+		//  Measured:  `427 + p * (251 ±0)`
 		//  Estimated: `1887 + p * (5206 ±0)`
-		// Minimum execution time: 44_769_000 picoseconds.
-		Weight::from_parts(57_915_572, 1887)
-			// Standard Error: 59_484
-			.saturating_add(Weight::from_parts(42_343_732, 0).saturating_mul(p.into()))
+		// Minimum execution time: 614_000_000 picoseconds.
+		Weight::from_parts(498_501_558, 1887)
+			// Standard Error: 1_070_260
+			.saturating_add(Weight::from_parts(599_011_690, 0).saturating_mul(p.into()))
 			.saturating_add(RocksDbWeight::get().reads(3_u64))
 			.saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(p.into())))
 			.saturating_add(RocksDbWeight::get().writes(3_u64))
 			.saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(p.into())))
 			.saturating_add(Weight::from_parts(0, 5206).saturating_mul(p.into()))
 	}
+	/// Storage: AssetRate ConversionRateToNative (r:1 w:0)
+	/// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(36), added: 2511, mode: MaxEncodedLen)
+	/// Storage: Treasury SpendCount (r:1 w:1)
+	/// Proof: Treasury SpendCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen)
+	/// Storage: Treasury Spends (r:0 w:1)
+	/// Proof: Treasury Spends (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen)
+	fn spend() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `140`
+		//  Estimated: `3501`
+		// Minimum execution time: 214_000_000 picoseconds.
+		Weight::from_parts(216_000_000, 3501)
+			.saturating_add(RocksDbWeight::get().reads(2_u64))
+			.saturating_add(RocksDbWeight::get().writes(2_u64))
+	}
+	/// Storage: Treasury Spends (r:1 w:1)
+	/// Proof: Treasury Spends (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen)
+	/// Storage: Assets Asset (r:1 w:1)
+	/// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen)
+	/// Storage: Assets Account (r:2 w:2)
+	/// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen)
+	/// Storage: System Account (r:1 w:1)
+	/// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen)
+	fn payout() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `705`
+		//  Estimated: `6208`
+		// Minimum execution time: 760_000_000 picoseconds.
+		Weight::from_parts(822_000_000, 6208)
+			.saturating_add(RocksDbWeight::get().reads(5_u64))
+			.saturating_add(RocksDbWeight::get().writes(5_u64))
+	}
+	/// Storage: Treasury Spends (r:1 w:1)
+	/// Proof: Treasury Spends (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen)
+	fn check_status() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `194`
+		//  Estimated: `3534`
+		// Minimum execution time: 153_000_000 picoseconds.
+		Weight::from_parts(160_000_000, 3534)
+			.saturating_add(RocksDbWeight::get().reads(1_u64))
+			.saturating_add(RocksDbWeight::get().writes(1_u64))
+	}
+	/// Storage: Treasury Spends (r:1 w:1)
+	/// Proof: Treasury Spends (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen)
+	fn void_spend() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `194`
+		//  Estimated: `3534`
+		// Minimum execution time: 147_000_000 picoseconds.
+		Weight::from_parts(181_000_000, 3534)
+			.saturating_add(RocksDbWeight::get().reads(1_u64))
+			.saturating_add(RocksDbWeight::get().writes(1_u64))
+	}
 }
diff --git a/substrate/frame/tx-pause/src/lib.rs b/substrate/frame/tx-pause/src/lib.rs
index f8abf678e5a7e..a3be0f5017270 100644
--- a/substrate/frame/tx-pause/src/lib.rs
+++ b/substrate/frame/tx-pause/src/lib.rs
@@ -205,7 +205,7 @@ impl<T: Config> Pallet<T> {
 	/// Ensure that this call can be paused.
 	pub fn ensure_can_pause(full_name: &RuntimeCallNameOf<T>) -> Result<(), Error<T>> {
 		// SAFETY: The `TxPause` pallet can never pause itself.
-		if full_name.0.as_ref() == <Self as PalletInfoAccess>::name().as_bytes().to_vec() {
+		if full_name.0.as_slice() == <Self as PalletInfoAccess>::name().as_bytes() {
 			return Err(Error::<T>::Unpausable)
 		}
 
diff --git a/substrate/frame/utility/src/lib.rs b/substrate/frame/utility/src/lib.rs
index af212a31eb971..7f963e3637d6f 100644
--- a/substrate/frame/utility/src/lib.rs
+++ b/substrate/frame/utility/src/lib.rs
@@ -479,13 +479,15 @@ pub mod pallet {
 		///
 		/// The dispatch origin for this call must be _Root_.
 		#[pallet::call_index(5)]
-		#[pallet::weight((*_weight, call.get_dispatch_info().class))]
+		#[pallet::weight((*weight, call.get_dispatch_info().class))]
 		pub fn with_weight(
 			origin: OriginFor<T>,
 			call: Box<<T as Config>::RuntimeCall>,
-			_weight: Weight,
+			weight: Weight,
 		) -> DispatchResult {
 			ensure_root(origin)?;
+			let _ = weight; // Explicitly don't check the the weight witness.
+
 			let res = call.dispatch_bypass_filter(frame_system::RawOrigin::Root.into());
 			res.map(|_| ()).map_err(|e| e.error)
 		}
diff --git a/substrate/primitives/api/proc-macro/Cargo.toml b/substrate/primitives/api/proc-macro/Cargo.toml
index de5ddcf9dac09..25c87b5d0a4df 100644
--- a/substrate/primitives/api/proc-macro/Cargo.toml
+++ b/substrate/primitives/api/proc-macro/Cargo.toml
@@ -17,7 +17,7 @@ proc-macro = true
 
 [dependencies]
 quote = "1.0.28"
-syn = { version = "2.0.37", features = ["full", "fold", "extra-traits", "visit"] }
+syn = { version = "2.0.38", features = ["full", "fold", "extra-traits", "visit"] }
 proc-macro2 = "1.0.56"
 blake2 = { version = "0.10.4", default-features = false }
 proc-macro-crate = "1.1.3"
diff --git a/substrate/primitives/core/hashing/proc-macro/Cargo.toml b/substrate/primitives/core/hashing/proc-macro/Cargo.toml
index 64b46ab9c19ef..187b5559b931c 100644
--- a/substrate/primitives/core/hashing/proc-macro/Cargo.toml
+++ b/substrate/primitives/core/hashing/proc-macro/Cargo.toml
@@ -17,5 +17,5 @@ proc-macro = true
 
 [dependencies]
 quote = "1.0.28"
-syn = { version = "2.0.37", features = ["full", "parsing"] }
+syn = { version = "2.0.38", features = ["full", "parsing"] }
 sp-core-hashing = { path = "..", default-features = false}
diff --git a/substrate/primitives/core/src/crypto.rs b/substrate/primitives/core/src/crypto.rs
index 8c7d98f00cd89..e1bfb80046f74 100644
--- a/substrate/primitives/core/src/crypto.rs
+++ b/substrate/primitives/core/src/crypto.rs
@@ -630,6 +630,13 @@ impl sp_std::str::FromStr for AccountId32 {
 	}
 }
 
+/// Creates an [`AccountId32`] from the input, which should contain at least 32 bytes.
+impl FromEntropy for AccountId32 {
+	fn from_entropy(input: &mut impl codec::Input) -> Result<Self, codec::Error> {
+		Ok(AccountId32::new(FromEntropy::from_entropy(input)?))
+	}
+}
+
 #[cfg(feature = "std")]
 pub use self::dummy::*;
 
@@ -1154,6 +1161,8 @@ pub mod key_types {
 	pub const STAKING: KeyTypeId = KeyTypeId(*b"stak");
 	/// A key type for signing statements
 	pub const STATEMENT: KeyTypeId = KeyTypeId(*b"stmt");
+	/// Key type for Mixnet module, used to sign key-exchange public keys. Identified as `mixn`.
+	pub const MIXNET: KeyTypeId = KeyTypeId(*b"mixn");
 	/// A key type ID useful for tests.
 	pub const DUMMY: KeyTypeId = KeyTypeId(*b"dumy");
 }
@@ -1171,6 +1180,13 @@ impl FromEntropy for bool {
 	}
 }
 
+/// Create the unit type for any given input.
+impl FromEntropy for () {
+	fn from_entropy(_: &mut impl codec::Input) -> Result<Self, codec::Error> {
+		Ok(())
+	}
+}
+
 macro_rules! impl_from_entropy {
 	($type:ty , $( $others:tt )*) => {
 		impl_from_entropy!($type);
diff --git a/substrate/primitives/debug-derive/Cargo.toml b/substrate/primitives/debug-derive/Cargo.toml
index 9d3930ac25720..c97c8a0a3991e 100644
--- a/substrate/primitives/debug-derive/Cargo.toml
+++ b/substrate/primitives/debug-derive/Cargo.toml
@@ -18,7 +18,7 @@ proc-macro = true
 
 [dependencies]
 quote = "1.0.28"
-syn = "2.0.37"
+syn = "2.0.38"
 proc-macro2 = "1.0.56"
 
 [features]
diff --git a/substrate/primitives/mixnet/Cargo.toml b/substrate/primitives/mixnet/Cargo.toml
new file mode 100644
index 0000000000000..3e2dcc7ec5c4c
--- /dev/null
+++ b/substrate/primitives/mixnet/Cargo.toml
@@ -0,0 +1,30 @@
+[package]
+description = "Substrate mixnet types and runtime interface"
+name = "sp-mixnet"
+version = "0.1.0-dev"
+license = "Apache-2.0"
+authors = ["Parity Technologies <admin@parity.io>"]
+edition = "2021"
+homepage = "https://substrate.io"
+repository = "https://github.com/paritytech/substrate/"
+readme = "README.md"
+
+[package.metadata.docs.rs]
+targets = ["x86_64-unknown-linux-gnu"]
+
+[dependencies]
+codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] }
+scale-info = { version = "2.5.0", default-features = false, features = ["derive"] }
+sp-api = { default-features = false, path = "../api" }
+sp-application-crypto = { default-features = false, path = "../application-crypto" }
+sp-std = { default-features = false, path = "../std" }
+
+[features]
+default = [ "std" ]
+std = [
+	"codec/std",
+	"scale-info/std",
+	"sp-api/std",
+	"sp-application-crypto/std",
+	"sp-std/std",
+]
diff --git a/substrate/primitives/mixnet/README.md b/substrate/primitives/mixnet/README.md
new file mode 100644
index 0000000000000..47c109f6b57c6
--- /dev/null
+++ b/substrate/primitives/mixnet/README.md
@@ -0,0 +1,3 @@
+Substrate mixnet types and runtime interface.
+
+License: Apache-2.0
diff --git a/substrate/primitives/mixnet/src/lib.rs b/substrate/primitives/mixnet/src/lib.rs
new file mode 100644
index 0000000000000..58b8a10f0cd8d
--- /dev/null
+++ b/substrate/primitives/mixnet/src/lib.rs
@@ -0,0 +1,24 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Substrate mixnet types and runtime interface.
+
+#![warn(missing_docs)]
+#![cfg_attr(not(feature = "std"), no_std)]
+
+pub mod runtime_api;
+pub mod types;
diff --git a/substrate/primitives/mixnet/src/runtime_api.rs b/substrate/primitives/mixnet/src/runtime_api.rs
new file mode 100644
index 0000000000000..28ab40e633787
--- /dev/null
+++ b/substrate/primitives/mixnet/src/runtime_api.rs
@@ -0,0 +1,52 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Runtime API for querying mixnet configuration and registering mixnodes.
+
+use super::types::{Mixnode, MixnodesErr, SessionIndex, SessionStatus};
+use sp_std::vec::Vec;
+
+sp_api::decl_runtime_apis! {
+	/// API to query the mixnet session status and mixnode sets, and to register mixnodes.
+	pub trait MixnetApi {
+		/// Get the index and phase of the current session.
+		fn session_status() -> SessionStatus;
+
+		/// Get the mixnode set for the previous session.
+		fn prev_mixnodes() -> Result<Vec<Mixnode>, MixnodesErr>;
+
+		/// Get the mixnode set for the current session.
+		fn current_mixnodes() -> Result<Vec<Mixnode>, MixnodesErr>;
+
+		/// Try to register a mixnode for the next session.
+		///
+		/// If a registration extrinsic is submitted, `true` is returned. The caller should avoid
+		/// calling `maybe_register` again for a few blocks, to give the submitted extrinsic a
+		/// chance to get included.
+		///
+		/// With the above exception, `maybe_register` is designed to be called every block. Most
+		/// of the time it will not do anything, for example:
+		///
+		/// - If it is not an appropriate time to submit a registration extrinsic.
+		/// - If the local node has already registered a mixnode for the next session.
+		/// - If the local node is not permitted to register a mixnode for the next session.
+		///
+		/// `session_index` should match `session_status().current_index`; if it does not, `false`
+		/// is returned immediately.
+		fn maybe_register(session_index: SessionIndex, mixnode: Mixnode) -> bool;
+	}
+}
diff --git a/substrate/primitives/mixnet/src/types.rs b/substrate/primitives/mixnet/src/types.rs
new file mode 100644
index 0000000000000..fc214f94d1cbf
--- /dev/null
+++ b/substrate/primitives/mixnet/src/types.rs
@@ -0,0 +1,100 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Mixnet types used by both host and runtime.
+
+use codec::{Decode, Encode};
+use scale_info::TypeInfo;
+use sp_std::vec::Vec;
+
+mod app {
+	use sp_application_crypto::{app_crypto, key_types::MIXNET, sr25519};
+	app_crypto!(sr25519, MIXNET);
+}
+
+/// Authority public session key, used to verify registration signatures.
+pub type AuthorityId = app::Public;
+/// Authority signature, attached to mixnode registrations.
+pub type AuthoritySignature = app::Signature;
+
+/// Absolute session index.
+pub type SessionIndex = u32;
+
+/// Each session should progress through these phases in order.
+#[derive(Decode, Encode, TypeInfo, PartialEq, Eq)]
+pub enum SessionPhase {
+	/// Generate cover traffic to the current session's mixnode set.
+	CoverToCurrent,
+	/// Build requests using the current session's mixnode set.
+	RequestsToCurrent,
+	/// Only send cover (and forwarded) traffic to the previous session's mixnode set.
+	CoverToPrev,
+	/// Disconnect the previous session's mixnode set.
+	DisconnectFromPrev,
+}
+
+/// The index and phase of the current session.
+#[derive(Decode, Encode, TypeInfo)]
+pub struct SessionStatus {
+	/// Index of the current session.
+	pub current_index: SessionIndex,
+	/// Current session phase.
+	pub phase: SessionPhase,
+}
+
+/// Size in bytes of a [`KxPublic`].
+pub const KX_PUBLIC_SIZE: usize = 32;
+
+/// X25519 public key, used in key exchange between message senders and mixnodes. Mixnode public
+/// keys are published on-chain and change every session. Message senders generate a new key for
+/// every message they send.
+pub type KxPublic = [u8; KX_PUBLIC_SIZE];
+
+/// Ed25519 public key of a libp2p peer.
+pub type PeerId = [u8; 32];
+
+/// Information published on-chain for each mixnode every session.
+#[derive(Decode, Encode, TypeInfo)]
+pub struct Mixnode {
+	/// Key-exchange public key for the mixnode.
+	pub kx_public: KxPublic,
+	/// libp2p peer ID of the mixnode.
+	pub peer_id: PeerId,
+	/// External addresses for the mixnode, in multiaddr format, UTF-8 encoded.
+	pub external_addresses: Vec<Vec<u8>>,
+}
+
+/// Error querying the runtime for a session's mixnode set.
+#[derive(Decode, Encode, TypeInfo)]
+pub enum MixnodesErr {
+	/// Insufficient mixnodes were registered for the session.
+	InsufficientRegistrations {
+		/// The number of mixnodes that were registered for the session.
+		num: u32,
+		/// The minimum number of mixnodes that must be registered for the mixnet to operate.
+		min: u32,
+	},
+}
+
+impl sp_std::fmt::Display for MixnodesErr {
+	fn fmt(&self, fmt: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result {
+		match self {
+			MixnodesErr::InsufficientRegistrations { num, min } =>
+				write!(fmt, "{num} mixnode(s) registered; {min} is the minimum"),
+		}
+	}
+}
diff --git a/substrate/primitives/npos-elections/fuzzer/Cargo.toml b/substrate/primitives/npos-elections/fuzzer/Cargo.toml
index eeb9deebb71e9..5e75f926f87ca 100644
--- a/substrate/primitives/npos-elections/fuzzer/Cargo.toml
+++ b/substrate/primitives/npos-elections/fuzzer/Cargo.toml
@@ -14,7 +14,7 @@ publish = false
 targets = ["x86_64-unknown-linux-gnu"]
 
 [dependencies]
-clap = { version = "4.4.4", features = ["derive"] }
+clap = { version = "4.4.6", features = ["derive"] }
 honggfuzz = "0.5"
 rand = { version = "0.8", features = ["std", "small_rng"] }
 sp-npos-elections = { path = ".." }
diff --git a/substrate/primitives/npos-elections/src/lib.rs b/substrate/primitives/npos-elections/src/lib.rs
index 0afe1ec5bb692..62ae050211482 100644
--- a/substrate/primitives/npos-elections/src/lib.rs
+++ b/substrate/primitives/npos-elections/src/lib.rs
@@ -19,7 +19,7 @@
 //! - [`seq_phragmen`]: Implements the Phragmén Sequential Method. An un-ranked, relatively fast
 //!   election method that ensures PJR, but does not provide a constant factor approximation of the
 //!   maximin problem.
-//! - [`phragmms`](phragmms::phragmms): Implements a hybrid approach inspired by Phragmén which is
+//! - [`ghragmms`](phragmms::phragmms()): Implements a hybrid approach inspired by Phragmén which is
 //!   executed faster but it can achieve a constant factor approximation of the maximin problem,
 //!   similar to that of the MMS algorithm.
 //! - [`balance`](balancing::balance): Implements the star balancing algorithm. This iterative
diff --git a/substrate/primitives/runtime-interface/proc-macro/Cargo.toml b/substrate/primitives/runtime-interface/proc-macro/Cargo.toml
index 5569e31c93602..fbc49785ae970 100644
--- a/substrate/primitives/runtime-interface/proc-macro/Cargo.toml
+++ b/substrate/primitives/runtime-interface/proc-macro/Cargo.toml
@@ -20,4 +20,4 @@ Inflector = "0.11.4"
 proc-macro-crate = "1.1.3"
 proc-macro2 = "1.0.56"
 quote = "1.0.28"
-syn = { version = "2.0.37", features = ["full", "visit", "fold", "extra-traits"] }
+syn = { version = "2.0.38", features = ["full", "visit", "fold", "extra-traits"] }
diff --git a/substrate/primitives/runtime-interface/tests/ui/no_feature_gated_method.stderr b/substrate/primitives/runtime-interface/tests/ui/no_feature_gated_method.stderr
index 23e671f6ce3f3..10012ede793de 100644
--- a/substrate/primitives/runtime-interface/tests/ui/no_feature_gated_method.stderr
+++ b/substrate/primitives/runtime-interface/tests/ui/no_feature_gated_method.stderr
@@ -3,3 +3,15 @@ error[E0425]: cannot find function `bar` in module `test`
    |
 33 |     test::bar();
    |           ^^^ not found in `test`
+   |
+note: found an item that was configured out
+  --> tests/ui/no_feature_gated_method.rs:25:5
+   |
+25 |     fn bar() {}
+   |        ^^^
+   = note: the item is gated behind the `bar-feature` feature
+note: found an item that was configured out
+  --> tests/ui/no_feature_gated_method.rs:25:5
+   |
+25 |     fn bar() {}
+   |        ^^^
diff --git a/substrate/primitives/version/proc-macro/Cargo.toml b/substrate/primitives/version/proc-macro/Cargo.toml
index cc28b8f176b88..7fce559e3ed63 100644
--- a/substrate/primitives/version/proc-macro/Cargo.toml
+++ b/substrate/primitives/version/proc-macro/Cargo.toml
@@ -19,7 +19,7 @@ proc-macro = true
 codec = { package = "parity-scale-codec", version = "3.6.1", features = [ "derive" ] }
 proc-macro2 = "1.0.56"
 quote = "1.0.28"
-syn = { version = "2.0.37", features = ["full", "fold", "extra-traits", "visit"] }
+syn = { version = "2.0.38", features = ["full", "fold", "extra-traits", "visit"] }
 
 [dev-dependencies]
 sp-version = { path = ".." }
diff --git a/substrate/scripts/ci/deny.toml b/substrate/scripts/ci/deny.toml
index 5297d07143c22..ca059e384a358 100644
--- a/substrate/scripts/ci/deny.toml
+++ b/substrate/scripts/ci/deny.toml
@@ -68,6 +68,7 @@ exceptions = [
     { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-executor-wasmtime" },
     { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-informant" },
     { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-keystore" },
+    { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-mixnet" },
     { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network" },
     { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-bitswap" },
     { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-common" },
diff --git a/substrate/scripts/ci/node-template-release/Cargo.toml b/substrate/scripts/ci/node-template-release/Cargo.toml
index c0e0275872467..73ffce8645b86 100644
--- a/substrate/scripts/ci/node-template-release/Cargo.toml
+++ b/substrate/scripts/ci/node-template-release/Cargo.toml
@@ -11,7 +11,7 @@ publish = false
 targets = ["x86_64-unknown-linux-gnu"]
 
 [dependencies]
-clap = { version = "4.4.4", features = ["derive"] }
+clap = { version = "4.4.6", features = ["derive"] }
 flate2 = "1.0"
 fs_extra = "1.3"
 glob = "0.3"
diff --git a/substrate/utils/frame/benchmarking-cli/Cargo.toml b/substrate/utils/frame/benchmarking-cli/Cargo.toml
index 9ba22e24faacd..e32fe47b72971 100644
--- a/substrate/utils/frame/benchmarking-cli/Cargo.toml
+++ b/substrate/utils/frame/benchmarking-cli/Cargo.toml
@@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"]
 [dependencies]
 array-bytes = "6.1"
 chrono = "0.4"
-clap = { version = "4.4.4", features = ["derive"] }
+clap = { version = "4.4.6", features = ["derive"] }
 codec = { package = "parity-scale-codec", version = "3.6.1" }
 comfy-table = { version = "7.0.1", default-features = false }
 handlebars = "4.2.2"
diff --git a/substrate/utils/frame/benchmarking-cli/src/pallet/writer.rs b/substrate/utils/frame/benchmarking-cli/src/pallet/writer.rs
index 69c95d13c0985..9493a693bbed3 100644
--- a/substrate/utils/frame/benchmarking-cli/src/pallet/writer.rs
+++ b/substrate/utils/frame/benchmarking-cli/src/pallet/writer.rs
@@ -779,6 +779,7 @@ fn worst_case_pov(
 
 /// A simple match statement which outputs the log 16 of some value.
 fn easy_log_16(i: u32) -> u32 {
+	#[allow(clippy::redundant_guards)]
 	match i {
 		i if i == 0 => 0,
 		i if i <= 16 => 1,
diff --git a/substrate/utils/frame/frame-utilities-cli/Cargo.toml b/substrate/utils/frame/frame-utilities-cli/Cargo.toml
index 5a3365dc90038..24c04f47391e8 100644
--- a/substrate/utils/frame/frame-utilities-cli/Cargo.toml
+++ b/substrate/utils/frame/frame-utilities-cli/Cargo.toml
@@ -11,7 +11,7 @@ documentation = "https://docs.rs/substrate-frame-cli"
 readme = "README.md"
 
 [dependencies]
-clap = { version = "4.4.4", features = ["derive"] }
+clap = { version = "4.4.6", features = ["derive"] }
 frame-support = { path = "../../../frame/support" }
 frame-system = { path = "../../../frame/system" }
 sc-cli = { path = "../../../client/cli" }
diff --git a/substrate/utils/frame/generate-bags/node-runtime/Cargo.toml b/substrate/utils/frame/generate-bags/node-runtime/Cargo.toml
index e1490aa363ca7..13e6113835623 100644
--- a/substrate/utils/frame/generate-bags/node-runtime/Cargo.toml
+++ b/substrate/utils/frame/generate-bags/node-runtime/Cargo.toml
@@ -14,4 +14,4 @@ kitchensink-runtime = { path = "../../../../bin/node/runtime" }
 generate-bags = { path = ".." }
 
 # third-party
-clap = { version = "4.4.4", features = ["derive"] }
+clap = { version = "4.4.6", features = ["derive"] }
diff --git a/substrate/utils/frame/remote-externalities/Cargo.toml b/substrate/utils/frame/remote-externalities/Cargo.toml
index ad6ab006da1dc..7067aed238aca 100644
--- a/substrate/utils/frame/remote-externalities/Cargo.toml
+++ b/substrate/utils/frame/remote-externalities/Cargo.toml
@@ -23,7 +23,6 @@ sp-runtime = { path = "../../../primitives/runtime" }
 tokio = { version = "1.22.0", features = ["macros", "rt-multi-thread"] }
 substrate-rpc-client = { path = "../rpc/client" }
 futures = "0.3"
-async-recursion = "1.0.4"
 indicatif = "0.17.3"
 spinners = "4.1.0"
 tokio-retry = "0.3.0"
diff --git a/substrate/utils/frame/remote-externalities/src/lib.rs b/substrate/utils/frame/remote-externalities/src/lib.rs
index 072ea6ef5e597..71e9320ebeeb2 100644
--- a/substrate/utils/frame/remote-externalities/src/lib.rs
+++ b/substrate/utils/frame/remote-externalities/src/lib.rs
@@ -20,7 +20,6 @@
 //! An equivalent of `sp_io::TestExternalities` that can load its state from a remote substrate
 //! based chain, or a local state snapshot file.
 
-use async_recursion::async_recursion;
 use codec::{Compact, Decode, Encode};
 use indicatif::{ProgressBar, ProgressStyle};
 use jsonrpsee::{
@@ -44,7 +43,7 @@ use sp_runtime::{
 use sp_state_machine::TestExternalities;
 use spinners::{Spinner, Spinners};
 use std::{
-	cmp::max,
+	cmp::{max, min},
 	fs,
 	ops::{Deref, DerefMut},
 	path::{Path, PathBuf},
@@ -353,10 +352,11 @@ where
 	const PARALLEL_REQUESTS: usize = 4;
 	const BATCH_SIZE_INCREASE_FACTOR: f32 = 1.10;
 	const BATCH_SIZE_DECREASE_FACTOR: f32 = 0.50;
-	const INITIAL_BATCH_SIZE: usize = 5000;
+	const REQUEST_DURATION_TARGET: Duration = Duration::from_secs(15);
+	const INITIAL_BATCH_SIZE: usize = 10;
 	// nodes by default will not return more than 1000 keys per request
 	const DEFAULT_KEY_DOWNLOAD_PAGE: u32 = 1000;
-	const KEYS_PAGE_MAX_RETRIES: usize = 12;
+	const MAX_RETRIES: usize = 12;
 	const KEYS_PAGE_RETRY_INTERVAL: Duration = Duration::from_secs(5);
 
 	async fn rpc_get_storage(
@@ -411,8 +411,8 @@ where
 		let keys = loop {
 			// This loop can hit the node with very rapid requests, occasionally causing it to
 			// error out in CI (https://github.com/paritytech/substrate/issues/14129), so we retry.
-			let retry_strategy = FixedInterval::new(Self::KEYS_PAGE_RETRY_INTERVAL)
-				.take(Self::KEYS_PAGE_MAX_RETRIES);
+			let retry_strategy =
+				FixedInterval::new(Self::KEYS_PAGE_RETRY_INTERVAL).take(Self::MAX_RETRIES);
 			let get_page_closure =
 				|| self.get_keys_single_page(Some(prefix.clone()), last_key.clone(), at);
 			let page = Retry::spawn(retry_strategy, get_page_closure).await?;
@@ -448,8 +448,6 @@ where
 	///
 	/// * `client` - An `Arc` wrapped `HttpClient` used for making the requests.
 	/// * `payloads` - A vector of tuples containing a JSONRPC method name and `ArrayParams`
-	/// * `batch_size` - The initial batch size to use for the request. The batch size will be
-	///   adjusted dynamically in case of failure.
 	///
 	/// # Returns
 	///
@@ -485,80 +483,107 @@ where
 	///     }
 	/// }
 	/// ```
-	#[async_recursion]
 	async fn get_storage_data_dynamic_batch_size(
 		client: &HttpClient,
 		payloads: Vec<(String, ArrayParams)>,
-		batch_size: usize,
 		bar: &ProgressBar,
 	) -> Result<Vec<Option<StorageData>>, String> {
-		// All payloads have been processed
-		if payloads.is_empty() {
-			return Ok(vec![])
-		};
-
-		log::debug!(
-			target: LOG_TARGET,
-			"Remaining payloads: {} Batch request size: {}",
-			payloads.len(),
-			batch_size,
-		);
+		let mut all_data: Vec<Option<StorageData>> = vec![];
+		let mut start_index = 0;
+		let mut retries = 0usize;
+		let mut batch_size = Self::INITIAL_BATCH_SIZE;
+		let total_payloads = payloads.len();
+
+		while start_index < total_payloads {
+			log::debug!(
+				target: LOG_TARGET,
+				"Remaining payloads: {} Batch request size: {}",
+				total_payloads - start_index,
+				batch_size,
+			);
 
-		// Payloads to attempt to process this batch
-		let page = payloads.iter().take(batch_size).cloned().collect::<Vec<_>>();
+			let end_index = usize::min(start_index + batch_size, total_payloads);
+			let page = &payloads[start_index..end_index];
 
-		// Build the batch request
-		let mut batch = BatchRequestBuilder::new();
-		for (method, params) in page.iter() {
-			batch
-				.insert(method, params.clone())
-				.map_err(|_| "Invalid batch method and/or params")?
-		}
-		let batch_response = match client.batch_request::<Option<StorageData>>(batch).await {
-			Ok(batch_response) => batch_response,
-			Err(e) => {
-				if batch_size < 2 {
-					return Err(e.to_string())
-				}
+			// Build the batch request
+			let mut batch = BatchRequestBuilder::new();
+			for (method, params) in page.iter() {
+				batch
+					.insert(method, params.clone())
+					.map_err(|_| "Invalid batch method and/or params")?;
+			}
 
-				log::debug!(
-					target: LOG_TARGET,
-					"Batch request failed, trying again with smaller batch size. {}",
-					e.to_string()
-				);
+			let request_started = Instant::now();
+			let batch_response = match client.batch_request::<Option<StorageData>>(batch).await {
+				Ok(batch_response) => {
+					retries = 0;
+					batch_response
+				},
+				Err(e) => {
+					if retries > Self::MAX_RETRIES {
+						return Err(e.to_string())
+					}
+
+					retries += 1;
+					let failure_log = format!(
+						"Batch request failed ({}/{} retries). Error: {}",
+						retries,
+						Self::MAX_RETRIES,
+						e.to_string()
+					);
+					// after 2 subsequent failures something very wrong is happening. log a warning
+					// and reset the batch size down to 1.
+					if retries >= 2 {
+						log::warn!("{}", failure_log);
+						batch_size = 1;
+					} else {
+						log::debug!("{}", failure_log);
+						// Decrease batch size by DECREASE_FACTOR
+						batch_size =
+							(batch_size as f32 * Self::BATCH_SIZE_DECREASE_FACTOR) as usize;
+					}
+					continue
+				},
+			};
 
-				return Self::get_storage_data_dynamic_batch_size(
-					client,
-					payloads,
-					max(1, (batch_size as f32 * Self::BATCH_SIZE_DECREASE_FACTOR) as usize),
-					bar,
+			let request_duration = request_started.elapsed();
+			batch_size = if request_duration > Self::REQUEST_DURATION_TARGET {
+				// Decrease batch size
+				max(1, (batch_size as f32 * Self::BATCH_SIZE_DECREASE_FACTOR) as usize)
+			} else {
+				// Increase batch size, but not more than the remaining total payloads to process
+				min(
+					total_payloads - start_index,
+					max(
+						batch_size + 1,
+						(batch_size as f32 * Self::BATCH_SIZE_INCREASE_FACTOR) as usize,
+					),
 				)
-				.await
-			},
-		};
+			};
+
+			log::debug!(
+				target: LOG_TARGET,
+				"Request duration: {:?} Target duration: {:?} Last batch size: {} Next batch size: {}",
+				request_duration,
+				Self::REQUEST_DURATION_TARGET,
+				end_index - start_index,
+				batch_size
+			);
 
-		// Collect the data from this batch
-		let mut data: Vec<Option<StorageData>> = vec![];
-		let batch_response_len = batch_response.len();
-		for item in batch_response.into_iter() {
-			match item {
-				Ok(x) => data.push(x),
-				Err(e) => return Err(e.message().to_string()),
+			let batch_response_len = batch_response.len();
+			for item in batch_response.into_iter() {
+				match item {
+					Ok(x) => all_data.push(x),
+					Err(e) => return Err(e.message().to_string()),
+				}
 			}
+			bar.inc(batch_response_len as u64);
+
+			// Update the start index for the next iteration
+			start_index = end_index;
 		}
-		bar.inc(batch_response_len as u64);
 
-		// Return this data joined with the remaining keys
-		let remaining_payloads = payloads.iter().skip(batch_size).cloned().collect::<Vec<_>>();
-		let mut rest = Self::get_storage_data_dynamic_batch_size(
-			client,
-			remaining_payloads,
-			max(batch_size + 1, (batch_size as f32 * Self::BATCH_SIZE_INCREASE_FACTOR) as usize),
-			bar,
-		)
-		.await?;
-		data.append(&mut rest);
-		Ok(data)
+		Ok(all_data)
 	}
 
 	/// Synonym of `getPairs` that uses paged queries to first get the keys, and then
@@ -605,12 +630,7 @@ where
 		);
 		let payloads_chunked = payloads.chunks((&payloads.len() / Self::PARALLEL_REQUESTS).max(1));
 		let requests = payloads_chunked.map(|payload_chunk| {
-			Self::get_storage_data_dynamic_batch_size(
-				&client,
-				payload_chunk.to_vec(),
-				Self::INITIAL_BATCH_SIZE,
-				&bar,
-			)
+			Self::get_storage_data_dynamic_batch_size(&client, payload_chunk.to_vec(), &bar)
 		});
 		// Execute the requests and move the Result outside.
 		let storage_data_result: Result<Vec<_>, _> =
@@ -683,20 +703,14 @@ where
 			.collect::<Vec<_>>();
 
 		let bar = ProgressBar::new(payloads.len() as u64);
-		let storage_data = match Self::get_storage_data_dynamic_batch_size(
-			client,
-			payloads,
-			Self::INITIAL_BATCH_SIZE,
-			&bar,
-		)
-		.await
-		{
-			Ok(storage_data) => storage_data,
-			Err(e) => {
-				log::error!(target: LOG_TARGET, "batch processing failed: {:?}", e);
-				return Err("batch processing failed")
-			},
-		};
+		let storage_data =
+			match Self::get_storage_data_dynamic_batch_size(client, payloads, &bar).await {
+				Ok(storage_data) => storage_data,
+				Err(e) => {
+					log::error!(target: LOG_TARGET, "batch processing failed: {:?}", e);
+					return Err("batch processing failed")
+				},
+			};
 
 		assert_eq!(child_keys_len, storage_data.len());
 
diff --git a/substrate/utils/frame/try-runtime/cli/Cargo.toml b/substrate/utils/frame/try-runtime/cli/Cargo.toml
index 3f693ca6c82d8..65380a22ce6e0 100644
--- a/substrate/utils/frame/try-runtime/cli/Cargo.toml
+++ b/substrate/utils/frame/try-runtime/cli/Cargo.toml
@@ -35,7 +35,7 @@ frame-try-runtime = { path = "../../../../frame/try-runtime", optional = true}
 substrate-rpc-client = { path = "../../rpc/client" }
 
 async-trait = "0.1.57"
-clap = { version = "4.4.4", features = ["derive"] }
+clap = { version = "4.4.6", features = ["derive"] }
 hex = { version = "0.4.3", default-features = false }
 log = "0.4.17"
 parity-scale-codec = "3.6.1"