diff --git a/.cicd/base-images.yml b/.cicd/base-images.yml new file mode 100644 index 00000000000..045f51e1643 --- /dev/null +++ b/.cicd/base-images.yml @@ -0,0 +1,121 @@ +steps: + - wait + + - label: ":aws: Amazon_Linux 2 - Base Image Pinned" + command: + - "./.cicd/generate-base-images.sh" + env: + FORCE_BASE_IMAGE: true + IMAGE_TAG: amazon_linux-2-pinned + PLATFORM_TYPE: pinned + agents: + queue: "automation-eks-eos-builder-fleet" + timeout: 180 + + - label: ":centos: CentOS 7.7 - Base Image Pinned" + command: + - "./.cicd/generate-base-images.sh" + env: + FORCE_BASE_IMAGE: true + IMAGE_TAG: centos-7.7-pinned + PLATFORM_TYPE: pinned + agents: + queue: "automation-eks-eos-builder-fleet" + timeout: 180 + + - label: ":darwin: macOS 10.14 - Base Image Pinned" + command: + - "git clone git@github.com:EOSIO/eos.git eos && cd eos && git checkout -f $BUILDKITE_BRANCH" + - "cd eos && ./.cicd/platforms/pinned/macos-10.14-pinned.sh" + plugins: + - EOSIO/anka#v0.6.0: + debug: true + vm-name: "10.14.6_6C_14G_40G" + no-volume: true + always-pull: true + wait-network: true + vm-registry-tag: "clean::cicd::git-ssh::nas::brew::buildkite-agent" + failover-registries: + - "registry_1" + - "registry_2" + inherit-environment-vars: true + - EOSIO/skip-checkout#v0.1.1: + cd: ~ + agents: "queue=mac-anka-node-fleet" + timeout: 180 + + - label: ":ubuntu: Ubuntu 16.04 - Base Image Pinned" + command: + - "./.cicd/generate-base-images.sh" + env: + FORCE_BASE_IMAGE: true + IMAGE_TAG: ubuntu-16.04-pinned + PLATFORM_TYPE: pinned + agents: + queue: "automation-eks-eos-builder-fleet" + timeout: 180 + + - label: ":ubuntu: Ubuntu 18.04 - Base Image Pinned" + command: + - "./.cicd/generate-base-images.sh" + env: + FORCE_BASE_IMAGE: true + IMAGE_TAG: ubuntu-18.04-pinned + PLATFORM_TYPE: pinned + agents: + queue: "automation-eks-eos-builder-fleet" + timeout: 180 + + - label: ":aws: Amazon_Linux 2 - Base Image Unpinned" + command: + - "./.cicd/generate-base-images.sh" + env: + FORCE_BASE_IMAGE: true + IMAGE_TAG: amazon_linux-2-unpinned + PLATFORM_TYPE: unpinned + agents: + queue: "automation-eks-eos-builder-fleet" + timeout: 180 + + - label: ":centos: CentOS 7.7 - Base Image Unpinned" + command: + - "./.cicd/generate-base-images.sh" + env: + FORCE_BASE_IMAGE: true + IMAGE_TAG: centos-7.7-unpinned + PLATFORM_TYPE: unpinned + agents: + queue: "automation-eks-eos-builder-fleet" + timeout: 180 + + - label: ":darwin: macOS 10.14 - Base Image Unpinned" + command: + - "git clone git@github.com:EOSIO/eos.git eos && cd eos && git checkout -f $BUILDKITE_BRANCH" + - "cd eos && ./.cicd/platforms/unpinned/macos-10.14-unpinned.sh" + plugins: + - EOSIO/anka#v0.6.0: + debug: true + vm-name: "10.14.6_6C_14G_40G" + no-volume: true + always-pull: true + wait-network: true + vm-registry-tag: "clean::cicd::git-ssh::nas::brew::buildkite-agent" + failover-registries: + - "registry_1" + - "registry_2" + inherit-environment-vars: true + - EOSIO/skip-checkout#v0.1.1: + cd: ~ + agents: "queue=mac-anka-node-fleet" + timeout: 180 + + - label: ":ubuntu: Ubuntu 18.04 - Base Image Unpinned" + command: + - "./.cicd/generate-base-images.sh" + env: + FORCE_BASE_IMAGE: true + IMAGE_TAG: ubuntu-18.04-unpinned + PLATFORM_TYPE: unpinned + agents: + queue: "automation-eks-eos-builder-fleet" + timeout: 180 \ No newline at end of file diff --git a/.cicd/generate-base-images.sh b/.cicd/generate-base-images.sh index d05a4a25b99..d4d52233f0b 100755 --- a/.cicd/generate-base-images.sh +++ b/.cicd/generate-base-images.sh @@ -8,9 +8,14 @@ ORG_REPO=$(echo $FULL_TAG | cut -d: -f1) TAG=$(echo $FULL_TAG | cut -d: -f2) EXISTS=$(curl -s -H "Authorization: Bearer $(curl -sSL "https://auth.docker.io/token?service=registry.docker.io&scope=repository:${ORG_REPO}:pull" | jq --raw-output .token)" "https://registry.hub.docker.com/v2/${ORG_REPO}/manifests/$TAG") # build, if neccessary -if [[ $EXISTS =~ '404 page not found' || $EXISTS =~ 'manifest unknown' || $FORCE_BASE_IMAGE == 'true' ]]; then # if we cannot pull the image, we build and push it first - docker build -t $FULL_TAG -f $CICD_DIR/platforms/$PLATFORM_TYPE/$IMAGE_TAG.dockerfile . - docker push $FULL_TAG +if [[ $EXISTS =~ '404 page not found' || $EXISTS =~ 'manifest unknown' || $FORCE_BASE_IMAGE == true ]]; then # if we cannot pull the image, we build and push it first + docker build --no-cache -t $FULL_TAG -f $CICD_DIR/platforms/$PLATFORM_TYPE/$IMAGE_TAG.dockerfile . + if [[ $FORCE_BASE_IMAGE != true ]]; then + docker push $FULL_TAG + else + echo "Base image creation successful. Not pushing...". + exit 0 + fi else echo "$FULL_TAG already exists." fi \ No newline at end of file diff --git a/.cicd/submodule-regression-check.sh b/.cicd/submodule-regression-check.sh index 80999067204..9392ebb43b1 100755 --- a/.cicd/submodule-regression-check.sh +++ b/.cicd/submodule-regression-check.sh @@ -10,7 +10,7 @@ if [[ $BUILDKITE == true ]]; then else [[ -z $GITHUB_BASE_REF ]] && echo "Cannot find \$GITHUB_BASE_REF, so we have nothing to compare submodules to. Skipping submodule regression check." && exit 0 BASE_BRANCH=$GITHUB_BASE_REF - CURRENT_BRANCH=$GITHUB_SHA + CURRENT_BRANCH="refs/remotes/pull/$PR_NUMBER/merge" fi echo "getting submodule info for $CURRENT_BRANCH" @@ -25,12 +25,6 @@ while read -r a b; do BASE_MAP[$a]=$b done < <(git submodule --quiet foreach --recursive 'echo $path `git log -1 --format=%ct`') -# We need to switch back to the PR ref/head so we can git log properly -if [[ $BUILDKITE != true ]]; then - echo "git fetch origin +$GITHUB_REF:" - git fetch origin +${GITHUB_REF}: 1> /dev/null -fi - echo "switching back to $CURRENT_BRANCH..." echo "git checkout -qf $CURRENT_BRANCH" git checkout -qf $CURRENT_BRANCH 1> /dev/null diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 19fc006353c..20c04810660 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -1,25 +1,22 @@ name: Pull Request on: [pull_request] -jobs: - start-job: - name: Start Job - runs-on: ubuntu-latest - steps: - - name: Start Job. - run: echo "PR created. Builds will be triggered here for forked PRs or Buildkite for internal PRs." - +env: + PR_NUMBER: ${{ toJson(github.event.number) }} +jobs: submodule_regression_check: if: github.event.pull_request.base.repo.id != github.event.pull_request.head.repo.id name: Submodule Regression Check runs-on: ubuntu-latest - needs: start-job steps: - name: Checkout - uses: actions/checkout@50fbc622fc4ef5163becd7fab6573eac35f8462e - with: - submodules: recursive + run: | + git clone https://github.com/${GITHUB_REPOSITORY} . + git fetch -v --prune origin +refs/pull/${PR_NUMBER}/merge:refs/remotes/pull/${PR_NUMBER}/merge + git checkout --force --progress refs/remotes/pull/${PR_NUMBER}/merge + git submodule sync --recursive + git submodule update --init --force --recursive - name: Submodule Regression Check run: ./.cicd/submodule-regression-check.sh @@ -28,12 +25,14 @@ jobs: if: github.event.pull_request.base.repo.id != github.event.pull_request.head.repo.id name: Amazon_Linux 2 | Build runs-on: ubuntu-latest - needs: start-job steps: - name: Checkout - uses: actions/checkout@50fbc622fc4ef5163becd7fab6573eac35f8462e - with: - submodules: recursive + run: | + git clone https://github.com/${GITHUB_REPOSITORY} . + git fetch -v --prune origin +refs/pull/${PR_NUMBER}/merge:refs/remotes/pull/${PR_NUMBER}/merge + git checkout --force --progress refs/remotes/pull/${PR_NUMBER}/merge + git submodule sync --recursive + git submodule update --init --force --recursive - name: Build run: | ./.cicd/build.sh @@ -52,9 +51,12 @@ jobs: needs: amazon_linux-2-build steps: - name: Checkout - uses: actions/checkout@50fbc622fc4ef5163becd7fab6573eac35f8462e - with: - submodules: recursive + run: | + git clone https://github.com/${GITHUB_REPOSITORY} . + git fetch -v --prune origin +refs/pull/${PR_NUMBER}/merge:refs/remotes/pull/${PR_NUMBER}/merge + git checkout --force --progress refs/remotes/pull/${PR_NUMBER}/merge + git submodule sync --recursive + git submodule update --init --force --recursive - name: Download Build Artifact uses: actions/download-artifact@v1 with: @@ -72,9 +74,12 @@ jobs: needs: amazon_linux-2-build steps: - name: Checkout - uses: actions/checkout@50fbc622fc4ef5163becd7fab6573eac35f8462e - with: - submodules: recursive + run: | + git clone https://github.com/${GITHUB_REPOSITORY} . + git fetch -v --prune origin +refs/pull/${PR_NUMBER}/merge:refs/remotes/pull/${PR_NUMBER}/merge + git checkout --force --progress refs/remotes/pull/${PR_NUMBER}/merge + git submodule sync --recursive + git submodule update --init --force --recursive - name: Download Build Artifact uses: actions/download-artifact@v1 with: @@ -92,9 +97,12 @@ jobs: needs: amazon_linux-2-build steps: - name: Checkout - uses: actions/checkout@50fbc622fc4ef5163becd7fab6573eac35f8462e - with: - submodules: recursive + run: | + git clone https://github.com/${GITHUB_REPOSITORY} . + git fetch -v --prune origin +refs/pull/${PR_NUMBER}/merge:refs/remotes/pull/${PR_NUMBER}/merge + git checkout --force --progress refs/remotes/pull/${PR_NUMBER}/merge + git submodule sync --recursive + git submodule update --init --force --recursive - name: Download Build Artifact uses: actions/download-artifact@v1 with: @@ -112,12 +120,14 @@ jobs: if: github.event.pull_request.base.repo.id != github.event.pull_request.head.repo.id name: CentOS 7.7 | Build runs-on: ubuntu-latest - needs: start-job steps: - name: Checkout - uses: actions/checkout@50fbc622fc4ef5163becd7fab6573eac35f8462e - with: - submodules: recursive + run: | + git clone https://github.com/${GITHUB_REPOSITORY} . + git fetch -v --prune origin +refs/pull/${PR_NUMBER}/merge:refs/remotes/pull/${PR_NUMBER}/merge + git checkout --force --progress refs/remotes/pull/${PR_NUMBER}/merge + git submodule sync --recursive + git submodule update --init --force --recursive - name: Build run: | ./.cicd/build.sh @@ -136,9 +146,12 @@ jobs: needs: centos-77-build steps: - name: Checkout - uses: actions/checkout@50fbc622fc4ef5163becd7fab6573eac35f8462e - with: - submodules: recursive + run: | + git clone https://github.com/${GITHUB_REPOSITORY} . + git fetch -v --prune origin +refs/pull/${PR_NUMBER}/merge:refs/remotes/pull/${PR_NUMBER}/merge + git checkout --force --progress refs/remotes/pull/${PR_NUMBER}/merge + git submodule sync --recursive + git submodule update --init --force --recursive - name: Download Build Artifact uses: actions/download-artifact@v1 with: @@ -156,9 +169,12 @@ jobs: needs: centos-77-build steps: - name: Checkout - uses: actions/checkout@50fbc622fc4ef5163becd7fab6573eac35f8462e - with: - submodules: recursive + run: | + git clone https://github.com/${GITHUB_REPOSITORY} . + git fetch -v --prune origin +refs/pull/${PR_NUMBER}/merge:refs/remotes/pull/${PR_NUMBER}/merge + git checkout --force --progress refs/remotes/pull/${PR_NUMBER}/merge + git submodule sync --recursive + git submodule update --init --force --recursive - name: Download Build Artifact uses: actions/download-artifact@v1 with: @@ -176,9 +192,12 @@ jobs: needs: centos-77-build steps: - name: Checkout - uses: actions/checkout@50fbc622fc4ef5163becd7fab6573eac35f8462e - with: - submodules: recursive + run: | + git clone https://github.com/${GITHUB_REPOSITORY} . + git fetch -v --prune origin +refs/pull/${PR_NUMBER}/merge:refs/remotes/pull/${PR_NUMBER}/merge + git checkout --force --progress refs/remotes/pull/${PR_NUMBER}/merge + git submodule sync --recursive + git submodule update --init --force --recursive - name: Download Build Artifact uses: actions/download-artifact@v1 with: @@ -196,12 +215,14 @@ jobs: if: github.event.pull_request.base.repo.id != github.event.pull_request.head.repo.id name: Ubuntu 16.04 | Build runs-on: ubuntu-latest - needs: start-job steps: - name: Checkout - uses: actions/checkout@50fbc622fc4ef5163becd7fab6573eac35f8462e - with: - submodules: recursive + run: | + git clone https://github.com/${GITHUB_REPOSITORY} . + git fetch -v --prune origin +refs/pull/${PR_NUMBER}/merge:refs/remotes/pull/${PR_NUMBER}/merge + git checkout --force --progress refs/remotes/pull/${PR_NUMBER}/merge + git submodule sync --recursive + git submodule update --init --force --recursive - name: Build run: | ./.cicd/build.sh @@ -220,9 +241,12 @@ jobs: needs: ubuntu-1604-build steps: - name: Checkout - uses: actions/checkout@50fbc622fc4ef5163becd7fab6573eac35f8462e - with: - submodules: recursive + run: | + git clone https://github.com/${GITHUB_REPOSITORY} . + git fetch -v --prune origin +refs/pull/${PR_NUMBER}/merge:refs/remotes/pull/${PR_NUMBER}/merge + git checkout --force --progress refs/remotes/pull/${PR_NUMBER}/merge + git submodule sync --recursive + git submodule update --init --force --recursive - name: Download Build Artifact uses: actions/download-artifact@v1 with: @@ -240,9 +264,12 @@ jobs: needs: ubuntu-1604-build steps: - name: Checkout - uses: actions/checkout@50fbc622fc4ef5163becd7fab6573eac35f8462e - with: - submodules: recursive + run: | + git clone https://github.com/${GITHUB_REPOSITORY} . + git fetch -v --prune origin +refs/pull/${PR_NUMBER}/merge:refs/remotes/pull/${PR_NUMBER}/merge + git checkout --force --progress refs/remotes/pull/${PR_NUMBER}/merge + git submodule sync --recursive + git submodule update --init --force --recursive - name: Download Build Artifact uses: actions/download-artifact@v1 with: @@ -260,9 +287,12 @@ jobs: needs: ubuntu-1604-build steps: - name: Checkout - uses: actions/checkout@50fbc622fc4ef5163becd7fab6573eac35f8462e - with: - submodules: recursive + run: | + git clone https://github.com/${GITHUB_REPOSITORY} . + git fetch -v --prune origin +refs/pull/${PR_NUMBER}/merge:refs/remotes/pull/${PR_NUMBER}/merge + git checkout --force --progress refs/remotes/pull/${PR_NUMBER}/merge + git submodule sync --recursive + git submodule update --init --force --recursive - name: Download Build Artifact uses: actions/download-artifact@v1 with: @@ -280,12 +310,14 @@ jobs: if: github.event.pull_request.base.repo.id != github.event.pull_request.head.repo.id name: Ubuntu 18.04 | Build runs-on: ubuntu-latest - needs: start-job steps: - name: Checkout - uses: actions/checkout@50fbc622fc4ef5163becd7fab6573eac35f8462e - with: - submodules: recursive + run: | + git clone https://github.com/${GITHUB_REPOSITORY} . + git fetch -v --prune origin +refs/pull/${PR_NUMBER}/merge:refs/remotes/pull/${PR_NUMBER}/merge + git checkout --force --progress refs/remotes/pull/${PR_NUMBER}/merge + git submodule sync --recursive + git submodule update --init --force --recursive - name: Build run: | ./.cicd/build.sh @@ -304,9 +336,12 @@ jobs: needs: ubuntu-1804-build steps: - name: Checkout - uses: actions/checkout@50fbc622fc4ef5163becd7fab6573eac35f8462e - with: - submodules: recursive + run: | + git clone https://github.com/${GITHUB_REPOSITORY} . + git fetch -v --prune origin +refs/pull/${PR_NUMBER}/merge:refs/remotes/pull/${PR_NUMBER}/merge + git checkout --force --progress refs/remotes/pull/${PR_NUMBER}/merge + git submodule sync --recursive + git submodule update --init --force --recursive - name: Download Build Artifact uses: actions/download-artifact@v1 with: @@ -324,9 +359,12 @@ jobs: needs: ubuntu-1804-build steps: - name: Checkout - uses: actions/checkout@50fbc622fc4ef5163becd7fab6573eac35f8462e - with: - submodules: recursive + run: | + git clone https://github.com/${GITHUB_REPOSITORY} . + git fetch -v --prune origin +refs/pull/${PR_NUMBER}/merge:refs/remotes/pull/${PR_NUMBER}/merge + git checkout --force --progress refs/remotes/pull/${PR_NUMBER}/merge + git submodule sync --recursive + git submodule update --init --force --recursive - name: Download Build Artifact uses: actions/download-artifact@v1 with: @@ -344,9 +382,12 @@ jobs: needs: ubuntu-1804-build steps: - name: Checkout - uses: actions/checkout@50fbc622fc4ef5163becd7fab6573eac35f8462e - with: - submodules: recursive + run: | + git clone https://github.com/${GITHUB_REPOSITORY} . + git fetch -v --prune origin +refs/pull/${PR_NUMBER}/merge:refs/remotes/pull/${PR_NUMBER}/merge + git checkout --force --progress refs/remotes/pull/${PR_NUMBER}/merge + git submodule sync --recursive + git submodule update --init --force --recursive - name: Download Build Artifact uses: actions/download-artifact@v1 with: @@ -364,12 +405,14 @@ jobs: if: github.event.pull_request.base.repo.id != github.event.pull_request.head.repo.id name: MacOS 10.15 | Build runs-on: macos-latest - needs: start-job steps: - name: Checkout - uses: actions/checkout@50fbc622fc4ef5163becd7fab6573eac35f8462e - with: - submodules: recursive + run: | + git clone https://github.com/${GITHUB_REPOSITORY} . + git fetch -v --prune origin +refs/pull/${PR_NUMBER}/merge:refs/remotes/pull/${PR_NUMBER}/merge + git checkout --force --progress refs/remotes/pull/${PR_NUMBER}/merge + git submodule sync --recursive + git submodule update --init --force --recursive - name: Build run: | ./.cicd/platforms/unpinned/macos-10.14-unpinned.sh @@ -386,9 +429,12 @@ jobs: needs: macos-1015-build steps: - name: Checkout - uses: actions/checkout@50fbc622fc4ef5163becd7fab6573eac35f8462e - with: - submodules: recursive + run: | + git clone https://github.com/${GITHUB_REPOSITORY} . + git fetch -v --prune origin +refs/pull/${PR_NUMBER}/merge:refs/remotes/pull/${PR_NUMBER}/merge + git checkout --force --progress refs/remotes/pull/${PR_NUMBER}/merge + git submodule sync --recursive + git submodule update --init --force --recursive - name: Download Build Artifact uses: actions/download-artifact@v1 with: @@ -404,9 +450,12 @@ jobs: needs: macos-1015-build steps: - name: Checkout - uses: actions/checkout@50fbc622fc4ef5163becd7fab6573eac35f8462e - with: - submodules: recursive + run: | + git clone https://github.com/${GITHUB_REPOSITORY} . + git fetch -v --prune origin +refs/pull/${PR_NUMBER}/merge:refs/remotes/pull/${PR_NUMBER}/merge + git checkout --force --progress refs/remotes/pull/${PR_NUMBER}/merge + git submodule sync --recursive + git submodule update --init --force --recursive - name: Download Build Artifact uses: actions/download-artifact@v1 with: @@ -422,9 +471,12 @@ jobs: needs: macos-1015-build steps: - name: Checkout - uses: actions/checkout@50fbc622fc4ef5163becd7fab6573eac35f8462e - with: - submodules: recursive + run: | + git clone https://github.com/${GITHUB_REPOSITORY} . + git fetch -v --prune origin +refs/pull/${PR_NUMBER}/merge:refs/remotes/pull/${PR_NUMBER}/merge + git checkout --force --progress refs/remotes/pull/${PR_NUMBER}/merge + git submodule sync --recursive + git submodule update --init --force --recursive - name: Download Build Artifact uses: actions/download-artifact@v1 with: diff --git a/.gitignore b/.gitignore index 6dd6c1ca492..4c6f4f1a0f8 100644 --- a/.gitignore +++ b/.gitignore @@ -85,3 +85,5 @@ var/lib/node_* .idea/ *.iws .DS_Store + +!*.swagger.* diff --git a/CMakeLists.txt b/CMakeLists.txt index a3c0709b636..57894e10c8b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -25,7 +25,7 @@ set( CXX_STANDARD_REQUIRED ON) set(VERSION_MAJOR 2) set(VERSION_MINOR 0) -set(VERSION_PATCH 3) +set(VERSION_PATCH 4) #set(VERSION_SUFFIX rc3) if(VERSION_SUFFIX) @@ -80,9 +80,8 @@ if(CMAKE_SIZEOF_VOID_P EQUAL 8 AND NOT WIN32) endif() if(CMAKE_SIZEOF_VOID_P EQUAL 8 AND NOT WIN32) - list(APPEND EOSIO_WASM_RUNTIMES eos-vm) if(CMAKE_SYSTEM_PROCESSOR STREQUAL x86_64) - list(APPEND EOSIO_WASM_RUNTIMES eos-vm-jit) + list(APPEND EOSIO_WASM_RUNTIMES eos-vm eos-vm-jit) endif() endif() diff --git a/README.md b/README.md index a71d2b2b160..0e445ca5360 100644 --- a/README.md +++ b/README.md @@ -74,13 +74,13 @@ $ brew remove eosio #### Ubuntu 18.04 Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v2.0.3/eosio_2.0.3-1-ubuntu-18.04_amd64.deb -$ sudo apt install ./eosio_2.0.3-1-ubuntu-18.04_amd64.deb +$ wget https://github.com/eosio/eos/releases/download/v2.0.4/eosio_2.0.4-1-ubuntu-18.04_amd64.deb +$ sudo apt install ./eosio_2.0.4-1-ubuntu-18.04_amd64.deb ``` #### Ubuntu 16.04 Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v2.0.3/eosio_2.0.3-1-ubuntu-16.04_amd64.deb -$ sudo apt install ./eosio_2.0.3-1-ubuntu-16.04_amd64.deb +$ wget https://github.com/eosio/eos/releases/download/v2.0.4/eosio_2.0.4-1-ubuntu-16.04_amd64.deb +$ sudo apt install ./eosio_2.0.4-1-ubuntu-16.04_amd64.deb ``` #### Ubuntu Package Uninstall ```sh @@ -91,8 +91,8 @@ $ sudo apt remove eosio #### RPM Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v2.0.3/eosio-2.0.3-1.el7.x86_64.rpm -$ sudo yum install ./eosio-2.0.3-1.el7.x86_64.rpm +$ wget https://github.com/eosio/eos/releases/download/v2.0.4/eosio-2.0.4-1.el7.x86_64.rpm +$ sudo yum install ./eosio-2.0.4-1.el7.x86_64.rpm ``` #### RPM Package Uninstall ```sh @@ -126,7 +126,7 @@ To uninstall the EOSIO built/installed binaries and dependencies, run: ## Getting Started -Instructions detailing the process of getting the software, building it, running a simple test network that produces blocks, account creation and uploading a sample contract to the blockchain can be found in the [Getting Started](https://developers.eos.io/eosio-home/docs) walkthrough. +Instructions detailing the process of getting the software, building it, running a simple test network that produces blocks, account creation and uploading a sample contract to the blockchain can be found in the [Getting Started](https://developers.eos.io/welcome/latest/getting-started) walkthrough. ## Contributing diff --git a/docs.json b/docs.json new file mode 100644 index 00000000000..698c666cecb --- /dev/null +++ b/docs.json @@ -0,0 +1,65 @@ +{ + "name": "eos", + "generators": [ + { + "name": "collate_markdown", + "options": { + "docs_dir": "docs" + } + }, + { + "name": "swagger", + "options": { + "swagger_path": "plugins/chain_api_plugin/chain.swagger.yaml", + "swagger_dest_path": "nodeos/plugins/chain_api_plugin/api-reference", + "disable_filters": true, + "disable_summary_gen": true + } + }, + { + "name": "swagger", + "options": { + "swagger_path": "plugins/db_size_api_plugin/db_size.swagger.yaml", + "swagger_dest_path": "nodeos/plugins/db_size_api_plugin/api-reference", + "disable_filters": true, + "disable_summary_gen": true + } + }, + { + "name": "swagger", + "options": { + "swagger_path": "plugins/producer_api_plugin/producer.swagger.yaml", + "swagger_dest_path": "nodeos/plugins/producer_api_plugin/api-reference", + "disable_filters": true, + "disable_summary_gen": true + } + }, + { + "name": "swagger", + "options": { + "swagger_path": "plugins/net_api_plugin/net.swagger.yaml", + "swagger_dest_path": "nodeos/plugins/net_api_plugin/api-reference", + "disable_filters": true, + "disable_summary_gen": true + } + }, + { + "name": "swagger", + "options": { + "swagger_path": "plugins/test_control_api_plugin/test_control.swagger.yaml", + "swagger_dest_path": "nodeos/plugins/test_control_api_plugin/api-reference", + "disable_filters": true, + "disable_summary_gen": true + } + }, + { + "name": "swagger", + "options": { + "swagger_path": "plugins/trace_api_plugin/trace_api.swagger.yaml", + "swagger_dest_path": "nodeos/plugins/trace_api_plugin/api-reference", + "disable_filters": true, + "disable_summary_gen": true + } + } + ] +} diff --git a/docs/00_install/00_install-prebuilt-binaries.md b/docs/00_install/00_install-prebuilt-binaries.md index ae121a22fb5..9c62df641ed 100644 --- a/docs/00_install/00_install-prebuilt-binaries.md +++ b/docs/00_install/00_install-prebuilt-binaries.md @@ -25,13 +25,13 @@ brew remove eosio #### Ubuntu 18.04 Package Install ```sh -wget https://github.com/eosio/eos/releases/download/v2.0.3/eosio_2.0.3-1-ubuntu-18.04_amd64.deb -sudo apt install ./eosio_2.0.3-1-ubuntu-18.04_amd64.deb +wget https://github.com/eosio/eos/releases/download/v2.0.4/eosio_2.0.4-1-ubuntu-18.04_amd64.deb +sudo apt install ./eosio_2.0.4-1-ubuntu-18.04_amd64.deb ``` #### Ubuntu 16.04 Package Install ```sh -wget https://github.com/eosio/eos/releases/download/v2.0.3/eosio_2.0.3-1-ubuntu-16.04_amd64.deb -sudo apt install ./eosio_2.0.3-1-ubuntu-16.04_amd64.deb +wget https://github.com/eosio/eos/releases/download/v2.0.4/eosio_2.0.4-1-ubuntu-16.04_amd64.deb +sudo apt install ./eosio_2.0.4-1-ubuntu-16.04_amd64.deb ``` #### Ubuntu Package Uninstall ```sh @@ -42,8 +42,8 @@ sudo apt remove eosio #### RPM Package Install ```sh -wget https://github.com/eosio/eos/releases/download/v2.0.3/eosio-2.0.3-1.el7.x86_64.rpm -sudo yum install ./eosio-2.0.3-1.el7.x86_64.rpm +wget https://github.com/eosio/eos/releases/download/v2.0.4/eosio-2.0.4-1.el7.x86_64.rpm +sudo yum install ./eosio-2.0.4-1.el7.x86_64.rpm ``` #### RPM Package Uninstall ```sh diff --git a/docs/01_nodeos/02_usage/00_nodeos-options.md b/docs/01_nodeos/02_usage/00_nodeos-options.md index 3d4d0a79300..b2ab1561c33 100644 --- a/docs/01_nodeos/02_usage/00_nodeos-options.md +++ b/docs/01_nodeos/02_usage/00_nodeos-options.md @@ -30,422 +30,6 @@ Application Command Line Options: ## Plugin-specific Options -Plugin-specific options control the behavior of the nodeos plugins. Every plugin-specific option has a unique name, so it can be specified in any order within the command line or `config.ini` file. When specifying one or more plugin-specific option(s), the applicable plugin(s) must also be enabled using the `--plugin` option or else the corresponding option(s) will be ignored. A sample output from running `nodeos --help` is displayed below, showing an excerpt from the plugin-specific options: - -```console -Config Options for eosio::chain_plugin: - --blocks-dir arg (="blocks") the location of the blocks directory - (absolute path or relative to - application data dir) - --protocol-features-dir arg (="protocol_features") - the location of the protocol_features - directory (absolute path or relative to - application config dir) - --checkpoint arg Pairs of [BLOCK_NUM,BLOCK_ID] that - should be enforced as checkpoints. - --wasm-runtime runtime Override default WASM runtime - --abi-serializer-max-time-ms arg (=15000) - Override default maximum ABI - serialization time allowed in ms - --chain-state-db-size-mb arg (=1024) Maximum size (in MiB) of the chain - state database - --chain-state-db-guard-size-mb arg (=128) - Safely shut down node when free space - remaining in the chain state database - drops below this size (in MiB). - --reversible-blocks-db-size-mb arg (=340) - Maximum size (in MiB) of the reversible - blocks database - --reversible-blocks-db-guard-size-mb arg (=2) - Safely shut down node when free space - remaining in the reverseible blocks - database drops below this size (in - MiB). - --signature-cpu-billable-pct arg (=50) - Percentage of actual signature recovery - cpu to bill. Whole number percentages, - e.g. 50 for 50% - --chain-threads arg (=2) Number of worker threads in controller - thread pool - --contracts-console print contract's output to console - --actor-whitelist arg Account added to actor whitelist (may - specify multiple times) - --actor-blacklist arg Account added to actor blacklist (may - specify multiple times) - --contract-whitelist arg Contract account added to contract - whitelist (may specify multiple times) - --contract-blacklist arg Contract account added to contract - blacklist (may specify multiple times) - --action-blacklist arg Action (in the form code::action) added - to action blacklist (may specify - multiple times) - --key-blacklist arg Public key added to blacklist of keys - that should not be included in - authorities (may specify multiple - times) - --sender-bypass-whiteblacklist arg Deferred transactions sent by accounts - in this list do not have any of the - subjective whitelist/blacklist checks - applied to them (may specify multiple - times) - --read-mode arg (=speculative) Database read mode ("speculative", - "head", "read-only", "irreversible"). - In "speculative" mode database contains - changes done up to the head block plus - changes made by transactions not yet - included to the blockchain. - In "head" mode database contains - changes done up to the current head - block. - In "read-only" mode database contains - changes done up to the current head - block and transactions cannot be pushed - to the chain API. - In "irreversible" mode database - contains changes done up to the last - irreversible block and transactions - cannot be pushed to the chain API. - - --validation-mode arg (=full) Chain validation mode ("full" or - "light"). - In "full" mode all incoming blocks will - be fully validated. - In "light" mode all incoming blocks - headers will be fully validated; - transactions in those validated blocks - will be trusted - - --disable-ram-billing-notify-checks Disable the check which subjectively - fails a transaction if a contract bills - more RAM to another account within the - context of a notification handler (i.e. - when the receiver is not the code of - the action). - --maximum-variable-signature-length arg (=16384) - Subjectively limit the maximum length - of variable components in a variable - legnth signature to this size in bytes - --trusted-producer arg Indicate a producer whose blocks - headers signed by it will be fully - validated, but transactions in those - validated blocks will be trusted. - --database-map-mode arg (=mapped) Database map mode ("mapped", "heap", or - "locked"). - In "mapped" mode database is memory - mapped as a file. - In "heap" mode database is preloaded in - to swappable memory. - In "locked" mode database is preloaded - and locked in to memory. - - -Command Line Options for eosio::chain_plugin: - --genesis-json arg File to read Genesis State from - --genesis-timestamp arg override the initial timestamp in the - Genesis State file - --print-genesis-json extract genesis_state from blocks.log - as JSON, print to console, and exit - --extract-genesis-json arg extract genesis_state from blocks.log - as JSON, write into specified file, and - exit - --print-build-info print build environment information to - console as JSON and exit - --extract-build-info arg extract build environment information - as JSON, write into specified file, and - exit - --fix-reversible-blocks recovers reversible block database if - that database is in a bad state - --force-all-checks do not skip any checks that can be - skipped while replaying irreversible - blocks - --disable-replay-opts disable optimizations that specifically - target replay - --replay-blockchain clear chain state database and replay - all blocks - --hard-replay-blockchain clear chain state database, recover as - many blocks as possible from the block - log, and then replay those blocks - --delete-all-blocks clear chain state database and block - log - --truncate-at-block arg (=0) stop hard replay / block log recovery - at this block number (if set to - non-zero number) - --import-reversible-blocks arg replace reversible block database with - blocks imported from specified file and - then exit - --export-reversible-blocks arg export reversible block database in - portable format into specified file and - then exit - --snapshot arg File to read Snapshot State from - -Config Options for eosio::history_plugin: - -f [ --filter-on ] arg Track actions which match - receiver:action:actor. Actor may be - blank to include all. Action and Actor - both blank allows all from Recieiver. - Receiver may not be blank. - -F [ --filter-out ] arg Do not track actions which match - receiver:action:actor. Action and Actor - both blank excludes all from Reciever. - Actor blank excludes all from - reciever:action. Receiver may not be - blank. - -Config Options for eosio::http_client_plugin: - --https-client-root-cert arg PEM encoded trusted root certificate - (or path to file containing one) used - to validate any TLS connections made. - (may specify multiple times) - - --https-client-validate-peers arg (=1) - true: validate that the peer - certificates are valid and trusted, - false: ignore cert errors - -Config Options for eosio::http_plugin: - --unix-socket-path arg The filename (relative to data-dir) to - create a unix socket for HTTP RPC; set - blank to disable. - --http-server-address arg (=127.0.0.1:8888) - The local IP and port to listen for - incoming http connections; set blank to - disable. - --https-server-address arg The local IP and port to listen for - incoming https connections; leave blank - to disable. - --https-certificate-chain-file arg Filename with the certificate chain to - present on https connections. PEM - format. Required for https. - --https-private-key-file arg Filename with https private key in PEM - format. Required for https - --https-ecdh-curve arg (=secp384r1) Configure https ECDH curve to use: - secp384r1 or prime256v1 - --access-control-allow-origin arg Specify the Access-Control-Allow-Origin - to be returned on each request. - --access-control-allow-headers arg Specify the Access-Control-Allow-Header - s to be returned on each request. - --access-control-max-age arg Specify the Access-Control-Max-Age to - be returned on each request. - --access-control-allow-credentials Specify if Access-Control-Allow-Credent - ials: true should be returned on each - request. - --max-body-size arg (=1048576) The maximum body size in bytes allowed - for incoming RPC requests - --http-max-bytes-in-flight-mb arg (=500) - Maximum size in megabytes http_plugin - should use for processing http - requests. 503 error response when - exceeded. - --verbose-http-errors Append the error log to HTTP responses - --http-validate-host arg (=1) If set to false, then any incoming - "Host" header is considered valid - --http-alias arg Additionaly acceptable values for the - "Host" header of incoming HTTP - requests, can be specified multiple - times. Includes http/s_server_address - by default. - --http-threads arg (=2) Number of worker threads in http thread - pool - -Config Options for eosio::login_plugin: - --max-login-requests arg (=1000000) The maximum number of pending login - requests - --max-login-timeout arg (=60) The maximum timeout for pending login - requests (in seconds) - -Config Options for eosio::net_plugin: - --p2p-listen-endpoint arg (=0.0.0.0:9876) - The actual host:port used to listen for - incoming p2p connections. - --p2p-server-address arg An externally accessible host:port for - identifying this node. Defaults to - p2p-listen-endpoint. - --p2p-peer-address arg The public endpoint of a peer node to - connect to. Use multiple - p2p-peer-address options as needed to - compose a network. - Syntax: host:port[:|] - The optional 'trx' and 'blk' - indicates to node that only - transactions 'trx' or blocks 'blk' - should be sent. Examples: - p2p.eos.io:9876 - p2p.trx.eos.io:9876:trx - p2p.blk.eos.io:9876:blk - - --p2p-max-nodes-per-host arg (=1) Maximum number of client nodes from any - single IP address - --agent-name arg (="EOS Test Agent") The name supplied to identify this node - amongst the peers. - --allowed-connection arg (=any) Can be 'any' or 'producers' or - 'specified' or 'none'. If 'specified', - peer-key must be specified at least - once. If only 'producers', peer-key is - not required. 'producers' and - 'specified' may be combined. - --peer-key arg Optional public key of peer allowed to - connect. May be used multiple times. - --peer-private-key arg Tuple of [PublicKey, WIF private key] - (may specify multiple times) - --max-clients arg (=25) Maximum number of clients from which - connections are accepted, use 0 for no - limit - --connection-cleanup-period arg (=30) number of seconds to wait before - cleaning up dead connections - --max-cleanup-time-msec arg (=10) max connection cleanup time per cleanup - call in millisec - --net-threads arg (=2) Number of worker threads in net_plugin - thread pool - --sync-fetch-span arg (=100) number of blocks to retrieve in a chunk - from any individual peer during - synchronization - --use-socket-read-watermark arg (=0) Enable expirimental socket read - watermark optimization - --peer-log-format arg (=["${_name}" ${_ip}:${_port}]) - The string used to format peers when - logging messages about them. Variables - are escaped with ${}. - Available Variables: - _name self-reported name - - _id self-reported ID (64 hex - characters) - - _sid first 8 characters of - _peer.id - - _ip remote IP address of peer - - _port remote port number of peer - - _lip local IP address connected to - peer - - _lport local port number connected - to peer - - - -Config Options for eosio::producer_plugin: - - -e [ --enable-stale-production ] Enable block production, even if the - chain is stale. - -x [ --pause-on-startup ] Start this node in a state where - production is paused - --max-transaction-time arg (=30) Limits the maximum time (in - milliseconds) that is allowed a pushed - transaction's code to execute before - being considered invalid - --max-irreversible-block-age arg (=-1) - Limits the maximum age (in seconds) of - the DPOS Irreversible Block for a chain - this node will produce blocks on (use - negative value to indicate unlimited) - -p [ --producer-name ] arg ID of producer controlled by this node - (e.g. inita; may specify multiple - times) - --private-key arg (DEPRECATED - Use signature-provider - instead) Tuple of [public key, WIF - private key] (may specify multiple - times) - --signature-provider arg (=EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV=KEY:5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3) - Key=Value pairs in the form - = - Where: - is a string form of - a vaild EOSIO public - key - - is a string in the - form - : - - is KEY, or KEOSD - - KEY: is a string form of - a valid EOSIO - private key which - maps to the provided - public key - - KEOSD: is the URL where - keosd is available - and the approptiate - wallet(s) are - unlocked - --keosd-provider-timeout arg (=5) Limits the maximum time (in - milliseconds) that is allowed for - sending blocks to a keosd provider for - signing - --greylist-account arg account that can not access to extended - CPU/NET virtual resources - --greylist-limit arg (=1000) Limit (between 1 and 1000) on the - multiple that CPU/NET virtual resources - can extend during low usage (only - enforced subjectively; use 1000 to not - enforce any limit) - --produce-time-offset-us arg (=0) offset of non last block producing time - in microseconds. Negative number - results in blocks to go out sooner, and - positive number results in blocks to go - out later - --last-block-time-offset-us arg (=0) offset of last block producing time in - microseconds. Negative number results - in blocks to go out sooner, and - positive number results in blocks to go - out later - --max-scheduled-transaction-time-per-block-ms arg (=100) - Maximum wall-clock time, in - milliseconds, spent retiring scheduled - transactions in any block before - returning to normal transaction - processing. - --subjective-cpu-leeway-us arg (=31000) - Time in microseconds allowed for a - transaction that starts with - insufficient CPU quota to complete and - cover its CPU usage. - --incoming-defer-ratio arg (=1) ratio between incoming transations and - deferred transactions when both are - exhausted - --incoming-transaction-queue-size-mb arg (=1024) - Maximum size (in MiB) of the incoming - transaction queue. Exceeding this value - will subjectively drop transaction with - resource exhaustion. - --producer-threads arg (=2) Number of worker threads in producer - thread pool - --snapshots-dir arg (="snapshots") the location of the snapshots directory - (absolute path or relative to - application data dir) - -Config Options for eosio::state_history_plugin: - --state-history-dir arg (="state-history") - the location of the state-history - directory (absolute path or relative to - application data dir) - --trace-history enable trace history - --chain-state-history enable chain state history - --state-history-endpoint arg (=127.0.0.1:8080) - the endpoint upon which to listen for - incoming connections. Caution: only - expose this port to your internal - network. - --trace-history-debug-mode enable debug mode for trace history - -Command Line Options for eosio::state_history_plugin: - --delete-state-history clear state history files - -Config Options for eosio::txn_test_gen_plugin: - --txn-reference-block-lag arg (=0) Lag in number of blocks from the head - block when selecting the reference - block for transactions (-1 means Last - Irreversible Block) - --txn-test-gen-threads arg (=2) Number of worker threads in - txn_test_gen thread pool - --txn-test-gen-account-prefix arg (=txn.test.) - Prefix to use for accounts generated - and used by this plugin -``` +Plugin-specific options control the behavior of the nodeos plugins. Every plugin-specific option has a unique name, so it can be specified in any order within the command line or `config.ini` file. When specifying one or more plugin-specific option(s), the applicable plugin(s) must also be enabled using the `--plugin` option or else the corresponding option(s) will be ignored. For more information on each plugin-specific option, just visit the [Plugins](../03_plugins/index.md) section. diff --git a/docs/01_nodeos/03_plugins/chain_api_plugin/api-reference/index.md b/docs/01_nodeos/03_plugins/chain_api_plugin/api-reference/index.md new file mode 100644 index 00000000000..6451c708686 --- /dev/null +++ b/docs/01_nodeos/03_plugins/chain_api_plugin/api-reference/index.md @@ -0,0 +1 @@ + diff --git a/docs/01_nodeos/03_plugins/chain_plugin/index.md b/docs/01_nodeos/03_plugins/chain_plugin/index.md index c9934991125..b820df3775f 100644 --- a/docs/01_nodeos/03_plugins/chain_plugin/index.md +++ b/docs/01_nodeos/03_plugins/chain_plugin/index.md @@ -73,7 +73,8 @@ Config Options for eosio::chain_plugin: application config dir) --checkpoint arg Pairs of [BLOCK_NUM,BLOCK_ID] that should be enforced as checkpoints. - --wasm-runtime wavm/wabt Override default WASM runtime + --wasm-runtime eos-vm|eos-vm-jit Override default WASM runtime (wabt) + --eos-vm-oc-enable Enable optimized compilation in WASM --abi-serializer-max-time-ms arg (=15000) Override default maximum ABI serialization time allowed in ms diff --git a/docs/01_nodeos/03_plugins/db_size_api_plugin/api-reference/index.md b/docs/01_nodeos/03_plugins/db_size_api_plugin/api-reference/index.md new file mode 100644 index 00000000000..6451c708686 --- /dev/null +++ b/docs/01_nodeos/03_plugins/db_size_api_plugin/api-reference/index.md @@ -0,0 +1 @@ + diff --git a/docs/01_nodeos/03_plugins/net_api_plugin/api-reference/index.md b/docs/01_nodeos/03_plugins/net_api_plugin/api-reference/index.md new file mode 100644 index 00000000000..6451c708686 --- /dev/null +++ b/docs/01_nodeos/03_plugins/net_api_plugin/api-reference/index.md @@ -0,0 +1 @@ + diff --git a/docs/01_nodeos/03_plugins/producer_api_plugin/api-reference/index.md b/docs/01_nodeos/03_plugins/producer_api_plugin/api-reference/index.md new file mode 100644 index 00000000000..6451c708686 --- /dev/null +++ b/docs/01_nodeos/03_plugins/producer_api_plugin/api-reference/index.md @@ -0,0 +1 @@ + diff --git a/docs/01_nodeos/03_plugins/producer_plugin/10_block-producing-explained.md b/docs/01_nodeos/03_plugins/producer_plugin/10_block-producing-explained.md new file mode 100644 index 00000000000..1c3d56ef14a --- /dev/null +++ b/docs/01_nodeos/03_plugins/producer_plugin/10_block-producing-explained.md @@ -0,0 +1,90 @@ +--- +content_title: Block Production Explained +--- + +For simplicity of the explanation let's consider the following notations: + +m = max_block_cpu_usage + +t = block-time + +e = last-block-cpu-effort-percent + +w = block_time_interval = 500ms + +a = produce-block-early-amount = (w - w*e/100) ms + +p = produce-block-time; p = t - a + +c = billed_cpu_in_block = minimum(m, w - a) + +n = network tcp/ip latency + +peer validation for similar hardware/eosio-version/config will be <= m + +**Let's consider for exemplification the following four BPs and their network topology as depicted in below diagram** + + +```dot-svg +#p2p_local_chain_prunning.dot - local chain prunning +# +#notes: * to see image copy/paste to https://dreampuf.github.io/GraphvizOnline +# * image will be rendered by gatsby-remark-graphviz plugin in eosio docs. + +digraph { + newrank=true #allows ranks inside subgraphs (important!) + compound=true #allows edges connecting nodes with subgraphs + graph [rankdir=LR] + node [style=filled, fillcolor=lightgray, shape=square, fixedsize=true, width=.55, fontsize=10] + edge [dir=both, arrowsize=.6, weight=100] + splines=false + + subgraph cluster_chain { + label="Block Producers Peers"; labelloc="b" + graph [color=invis] + b0 [label="...", color=invis, style=""] + b1 [label="BP-A"]; b2 [label="BP-A\nPeer"]; b3 [label="BP-B\nPeer"]; b4 [label="BP-B"] + b5 [label="...", color=invis, style=""] + b0 -> b1 -> b2 -> b3 -> b4 -> b5 + } //cluster_chain + +} //digraph +``` + +`BP-A` will send block at `p` and, + +`BP-B` needs block at time `t` or otherwise will drop it. + +If `BP-A`is producing 12 blocks as follows `b(lock) at t(ime) 1`, `bt 1.5`, `bt 2`, `bt 2.5`, `bt 3`, `bt 3.5`, `bt 4`, `bt 4.5`, `bt 5`, `bt 5.5`, `bt 6`, `bt 6.5` then `BP-B` needs `bt 6.5` by time `6.5` so it has `.5` to produce `bt 7`. + +Please notice that the time of `bt 7` minus `.5` equals the time of `bt 6.5` therefore time `t` is the last block time of `BP-A` and when `BP-B` needs to start its first block. + +## Example 1 +`BP-A` has 50% e, m = 200ms, c = 200ms, n = 0ms, a = 250ms: +`BP-A` sends at (t-250ms) <-> `BP-A-Peer` processes for 200ms and sends at (t - 50ms) <-> `BP-B-Peer` processes for 200ms and sends at (t + 150ms) <-> arrive at `BP-B` 150ms too late. + +## Example 2 +`BP-A` has 40% e and m = 200ms, c = 200ms, n = 0ms, a = 300ms: +(t-300ms) <-> (+200ms) <-> (+200ms) <-> arrive at `BP-B` 100ms too late. + +## Example 3 +`BP-A` has 30% e and m = 200ms, c = 150ms, n = 0ms, a = 350ms: +(t-350ms) <-> (+150ms) <-> (+150ms) <-> arrive at `BP-B` with 50ms to spare. + +## Example 4 +`BP-A` has 25% e and m = 200ms, c = 125ms, n = 0ms, a = 375ms: +(t-375ms) <-> (+125ms) <-> (+125ms) <-> arrive at `BP-B` with 125ms to spare. + +## Example 5 +`BP-A` has 10% e and m = 200ms, c = 50ms, n = 0ms, a = 450ms: +(t-450ms) <-> (+50ms) <-> (+50ms) <-> arrive at `BP-B` with 350ms to spare. + +## Example 6 +`BP-A` has 10% e and m = 200ms, c = 50ms, n = 15ms, a = 450ms: +(t-450ms) <- +15ms -> (+50ms) <- +15ms -> (+50ms) <- +15ms -> `BP-B` <-> arrive with 305ms to spare. + +## Example 7 +Example world-wide network:`BP-A`has 10% e and m = 200ms, c = 50ms, n = 15ms/250ms, a = 450ms: +(t-450ms) <- +15ms -> (+50ms) <- +250ms -> (+50ms) <- +15ms -> `BP-B` <-> arrive with 70ms to spare. + +Running wasm-runtime=eos-vm-jit eos-vm-oc-enable on relay node will reduce the validation time. diff --git a/docs/01_nodeos/03_plugins/producer_plugin/index.md b/docs/01_nodeos/03_plugins/producer_plugin/index.md index 8a50fbeb23d..23204885236 100644 --- a/docs/01_nodeos/03_plugins/producer_plugin/index.md +++ b/docs/01_nodeos/03_plugins/producer_plugin/index.md @@ -5,7 +5,7 @@ The `producer_plugin` loads functionality required for a node to produce blocks. [[info]] -| Additional configuration is required to produce blocks. Please read [Configuring Block Producing Node](https://developers.eos.io/eosio-nodeos/docs/environment-producing-node). +| Additional configuration is required to produce blocks. Please read [Configuring Block Producing Node](../../02_usage/02_node-setups/00_producing-node.md). ## Usage @@ -38,6 +38,14 @@ Config Options for eosio::producer_plugin: the DPOS Irreversible Block for a chain this node will produce blocks on (use negative value to indicate unlimited) + --max-block-cpu-usage-threshold-us Threshold of CPU block production to + consider block full; when within threshold + of max-block-cpu-usage block can be + produced immediately. Default value 5000 + --max-block-net-usage-threshold-bytes Threshold of NET block production to + consider block full; when within threshold + of max-block-net-usage block can be produced + immediately. Default value 1024 -p [ --producer-name ] arg ID of producer controlled by this node (e.g. inita; may specify multiple times) @@ -92,9 +100,10 @@ Config Options for eosio::producer_plugin: transactions in any block before returning to normal transaction processing. - --incoming-defer-ratio arg (=1) ratio between incoming transations and + --incoming-defer-ratio arg (=1) ratio between incoming transactions and deferred transactions when both are - exhausted + queued for execution + --producer-threads arg (=2) Number of worker threads in producer thread pool --snapshots-dir arg (="snapshots") the location of the snapshots directory @@ -106,6 +115,21 @@ Config Options for eosio::producer_plugin: * [`chain_plugin`](../chain_plugin/index.md) +## The priority of transaction + +You can give one of the transaction types priority over another when the producer plugin has a queue of transactions pending. + +The option below sets the ratio between the incoming transaction and the deferred transaction: + +```console + --incoming-defer-ratio arg (=1) +``` + +By default value of `1`, the `producer` plugin processes one incoming transaction per deferred transaction. When `arg` sets to `10`, the `producer` plugin processes 10 incoming transactions per deferred transaction. + +If the `arg` is set to a sufficiently large number, the plugin always processes the incoming transaction first until the queue of the incoming transactions is empty. Respectively, if the `arg` is 0, the `producer` plugin processes the deferred transactions queue first. + + ### Load Dependency Examples ```console @@ -116,3 +140,5 @@ plugin = eosio::chain_plugin [operations] [options] # command-line nodeos ... --plugin eosio::chain_plugin [operations] [options] ``` + +For details about how blocks are produced please read the following [block producing explainer](10_block-producing-explained.md). diff --git a/docs/01_nodeos/03_plugins/test_control_api_plugin/api-reference/index.md b/docs/01_nodeos/03_plugins/test_control_api_plugin/api-reference/index.md new file mode 100644 index 00000000000..6451c708686 --- /dev/null +++ b/docs/01_nodeos/03_plugins/test_control_api_plugin/api-reference/index.md @@ -0,0 +1 @@ + diff --git a/docs/01_nodeos/03_plugins/trace_api_plugin/api-reference/index.md b/docs/01_nodeos/03_plugins/trace_api_plugin/api-reference/index.md new file mode 100644 index 00000000000..6451c708686 --- /dev/null +++ b/docs/01_nodeos/03_plugins/trace_api_plugin/api-reference/index.md @@ -0,0 +1 @@ + diff --git a/docs/01_nodeos/03_plugins/trace_api_plugin/index.md b/docs/01_nodeos/03_plugins/trace_api_plugin/index.md new file mode 100644 index 00000000000..18deaa23ce7 --- /dev/null +++ b/docs/01_nodeos/03_plugins/trace_api_plugin/index.md @@ -0,0 +1,121 @@ +# trace_api_plugin + +## Description + +The `trace_api_plugin` provides a consumer-focused long-term API for retrieving retired actions and related metadata from a specified block. The plugin defines a new HTTP endpoint accessible (see the [API reference](api-reference/index.md) for more information). + +## Usage + +```console +# config.ini +plugin = eosio::trace_api_plugin +[options] +``` +```sh +# command-line +nodeos ... --plugin eosio::trace_api_plugin [options] +``` + +## Options + +These can be specified from both the `nodeos` command-line or the `config.ini` file: + +```console +Config Options for eosio::trace_api_plugin: + + --trace-dir (="traces") the location of the trace directory + (absolute path or relative to + application data dir) + --trace-slice-stride (=10000) the number of blocks each "slice" of + trace data will contain on the + filesystem + --trace-minimum-irreversible-history-blocks (=-1) + Number of blocks to ensure are kept + past LIB for retrieval before "slice" + files can be automatically removed. + A value of -1 indicates that automatic + removal of "slice" files will be + turned off. + --trace-rpc-abi ABIs used when decoding trace RPC + responses. + There must be at least one ABI + specified OR the flag trace-no-abis + must be used. + ABIs are specified as "Key=Value" pairs + in the form = + Where can be: + an absolute path to a file + containing a valid JSON-encoded ABI + a relative path from `data-dir` to a + file containing valid JSON-encoded ABI. + --trace-no-abis Use to indicate that the RPC responses + will not use ABIs. + Failure to specify this option when + there are no trace-rpc-abi + configurations will result in an Error. + This option is mutually exclusive with + trace-rpc-api +``` + +## Dependencies + +* [`chain_plugin`](../chain_plugin/index.md) +* [`http_plugin`](../http_plugin/index.md) + +### Load Dependency Examples + +The following plugins are loaded with default settings if not specified on the command line or `config.ini`: + +```console +# config.ini +plugin = eosio::chain_plugin +[options] +plugin = eosio::http_plugin +[options] +``` +```sh +# command-line +nodeos ... --plugin eosio::chain_plugin [options] \ + --plugin eosio::http_plugin [options] +``` + +## Purpose + +While integrating applications such as block explorers and exchanges with an EOSIO blockchain, the user might require a complete transcript of actions that are processed by the blockchain, including those spawned from the execution of smart contracts and scheduled transactions. The `trace_api_plugin` aims to serve this need. The purpose of the plugin is to provide: + +* A transcript of retired actions and related metadata +* A consumer focused long-term API to retrieve blocks +* Maintainable resource commitments at the EOSIO nodes + +Therefore, one crucial goal of the `trace_api_plugin` is to have better maintenance of node resources (file system, disk, memory, etc.). This goal is different from the existing `history_plugin` which provides far more configurable filtering and querying capabilities, or the existing `state_history_plugin` which provides a binary streaming interface to access structural chain data, action data, as well as state deltas. + +## Examples + +Below it is a `nodeos` configuration example for the `trace_api_plugin` when tracing some EOSIO reference contracts: + +```sh +nodeos --data-dir data_dir --config-dir config_dir --trace-dir traces_dir +--plugin eosio::trace_api_plugin +--trace-rpc-abi=eosio=abis/eosio.abi +--trace-rpc-abi=eosio.token=abis/eosio.token.abi +--trace-rpc-abi=eosio.msig=abis/eosio.msig.abi +--trace-rpc-abi=eosio.wrap=abis/eosio.wrap.abi +``` + +## Maintenance Note + +To reduce the disk space consummed by the `trace_api_plugin`, you can configure the following option: + +```console + --trace-minimum-irreversible-history-blocks N (=-1) +``` + +Once the value is no longer `-1`, only `N` number of blocks before the current LIB block will be kept on disk. + +If resource usage cannot be effectively managed via the `trace-minimum-irreversible-history-blocks` configuration option, then there might be a need for ongoing maintenance. In that case, the user may prefer to manage resources with an external system or process. + +### Manual Filesystem Management + +The `trace-dir` configuration option defines a location on the filesystem where all artefacts created by the `trace_api_plugin` are stored. These files are stable once the LIB block has progressed past that slice and then can be deleted at any time to reclaim filesystem space. The conventions regarding these files are to-be-determined. However, the remainder of the system will tolerate any out-of-process management system that removes some or all of these files in this directory regardless of what data they represent, or whether there is a running `nodeos` instance accessing them or not. Data which would nominally be available, but is no longer so due to manual maintenance, will result in a HTTP 404 response from the appropriate API endpoint(s). + +In conjunction with the `trace-minimum-irreversible-history-blocks=-1` option, administrators can take full control over the lifetime of the data available via the `trace-api-plugin` and the associated filesystem resources. diff --git a/docs/01_nodeos/04_replays/how-to-generate-a-snapshot.md b/docs/01_nodeos/04_replays/how-to-generate-a-snapshot.md index b9ecb8e9bf8..09f44db9c92 100644 --- a/docs/01_nodeos/04_replays/how-to-generate-a-snapshot.md +++ b/docs/01_nodeos/04_replays/how-to-generate-a-snapshot.md @@ -15,5 +15,5 @@ curl --request POST \ --header 'content-type: application/x-www-form-urlencoded; charset=UTF-8' ``` -[[info | Other `blocks.log` files]] +[[info | Getting other `blocks.log` files]] | You can also download a `blocks.log` file from third party providers. diff --git a/docs/01_nodeos/08_troubleshooting/index.md b/docs/01_nodeos/08_troubleshooting/index.md index 77462fb4bd6..bb4a0d7edcb 100644 --- a/docs/01_nodeos/08_troubleshooting/index.md +++ b/docs/01_nodeos/08_troubleshooting/index.md @@ -43,3 +43,7 @@ To focus only on the version line within the block: ```sh cleos --url http://localhost:8888 get info | grep server_version ``` + +### Error 3070000: WASM Exception Error + +If you try to deploy the `eosio.bios` contract or `eosio.system` contract in an attempt to boot an EOSIO-based blockchain and you get the following error or similar: `Publishing contract... Error 3070000: WASM Exception Error Details: env.set_proposed_producers_ex unresolveable`, it is because you have to activate the `PREACTIVATE_FEATURE` protocol first. More details about it and how to enable it can be found in the [Bios Boot Sequence Tutorial](https://developers.eos.io/welcome/latest/tutorials/bios-boot-sequence/#112-set-the-eosiosystem-contract). For more information, you may also visit the [Nodeos Upgrade Guides](https://developers.eos.io/manuals/eos/latest/nodeos/upgrade-guides/). diff --git a/docs/02_cleos/03_command-reference/create/account.md b/docs/02_cleos/03_command-reference/create/account.md index 4a1edc32c46..976c986f561 100755 --- a/docs/02_cleos/03_command-reference/create/account.md +++ b/docs/02_cleos/03_command-reference/create/account.md @@ -30,7 +30,7 @@ Options: ``` ## Command -A set of EOS keys is required to create an account. A set of EOS keys can be generated by using `cleos create key`. +A set of EOSIO keys is required to create an account. The EOSIO keys can be generated by using `cleos create key`. ```sh cleos create account inita tester EOS4toFS3YXEQCkuuw1aqDLrtHim86Gz9u3hBdcBw5KNPZcursVHq EOS7d9A3uLe6As66jzN8j44TXJUqJSK3bFjjEEqR4oTvNAB3iM9SA diff --git a/docs/02_cleos/03_command-reference/net/status.md b/docs/02_cleos/03_command-reference/net/status.md index d24f42ef07c..318e21ce35b 100755 --- a/docs/02_cleos/03_command-reference/net/status.md +++ b/docs/02_cleos/03_command-reference/net/status.md @@ -14,3 +14,18 @@ Usage: cleos net status host Positionals: host TEXT The hostname:port to query status of connection ``` + +Given, a valid, existing `hostname:port` parameter the above command returns a json response looking similar to the one below: + +``` +{ + "peer": "hostname:port", + "connecting": false/true, + "syncing": false/true, + "last_handshake": { + ... + } +} +``` + +The `last_handshake` structure is explained in detail in the [Network Peer Protocol](https://developers.eos.io/welcome/latest/protocol/network_peer_protocol#421-handshake-message) documentation section. \ No newline at end of file diff --git a/libraries/appbase b/libraries/appbase index 72e93b39672..c51544732e3 160000 --- a/libraries/appbase +++ b/libraries/appbase @@ -1 +1 @@ -Subproject commit 72e93b396726916a596482897ab13f99a8197379 +Subproject commit c51544732e305207a4ba064cb3dd3cfd61e8af30 diff --git a/libraries/chain/CMakeLists.txt b/libraries/chain/CMakeLists.txt index a2bc309a8fb..e5f9c9ef988 100644 --- a/libraries/chain/CMakeLists.txt +++ b/libraries/chain/CMakeLists.txt @@ -11,6 +11,7 @@ else() try_run(POSIX_TIMER_TEST_RUN_RESULT POSIX_TIMER_TEST_COMPILE_RESULT ${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/platform_timer_posix_test.c) if(POSIX_TIMER_TEST_RUN_RESULT EQUAL 0) set(PLATFORM_TIMER_IMPL platform_timer_posix.cpp) + set(CHAIN_RT_LINKAGE rt) else() set(PLATFORM_TIMER_IMPL platform_timer_asio_fallback.cpp) endif() @@ -105,7 +106,7 @@ add_library( eosio_chain ) target_link_libraries( eosio_chain fc chainbase Logging IR WAST WASM Runtime - softfloat builtins wabt ${CHAIN_EOSVM_LIBRARIES} ${LLVM_LIBS} + softfloat builtins wabt ${CHAIN_EOSVM_LIBRARIES} ${LLVM_LIBS} ${CHAIN_RT_LINKAGE} ) target_include_directories( eosio_chain PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" "${CMAKE_CURRENT_BINARY_DIR}/include" diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 0d40e0425b2..b69b57cbf55 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -1203,6 +1203,10 @@ struct controller_impl { transaction_trace_ptr push_scheduled_transaction( const generated_transaction_object& gto, fc::time_point deadline, uint32_t billed_cpu_time_us, bool explicit_billed_cpu_time = false ) { try { + + const bool validating = !self.is_producing_block(); + EOS_ASSERT( !validating || explicit_billed_cpu_time, transaction_exception, "validating requires explicit billing" ); + maybe_session undo_session; if ( !self.skip_db_sessions() ) undo_session = maybe_session(db); @@ -1309,7 +1313,7 @@ struct controller_impl { // Only subjective OR soft OR hard failure logic below: - if( gtrx.sender != account_name() && !(explicit_billed_cpu_time ? failure_is_subjective(*trace->except) : scheduled_failure_is_subjective(*trace->except))) { + if( gtrx.sender != account_name() && !(validating ? failure_is_subjective(*trace->except) : scheduled_failure_is_subjective(*trace->except))) { // Attempt error handling for the generated transaction. auto error_trace = apply_onerror( gtrx, deadline, trx_context.pseudo_start, @@ -1331,7 +1335,7 @@ struct controller_impl { // subjectivity changes based on producing vs validating bool subjective = false; - if (explicit_billed_cpu_time) { + if (validating) { subjective = failure_is_subjective(*trace->except); } else { subjective = scheduled_failure_is_subjective(*trace->except); @@ -1340,15 +1344,18 @@ struct controller_impl { if ( !subjective ) { // hard failure logic - if( !explicit_billed_cpu_time ) { + if( !validating ) { auto& rl = self.get_mutable_resource_limits_manager(); rl.update_account_usage( trx_context.bill_to_accounts, block_timestamp_type(self.pending_block_time()).slot ); int64_t account_cpu_limit = 0; std::tie( std::ignore, account_cpu_limit, std::ignore, std::ignore ) = trx_context.max_bandwidth_billed_accounts_can_pay( true ); - cpu_time_to_bill_us = static_cast( std::min( std::min( static_cast(cpu_time_to_bill_us), - account_cpu_limit ), - trx_context.initial_objective_duration_limit.count() ) ); + uint32_t limited_cpu_time_to_bill_us = static_cast( std::min( + std::min( static_cast(cpu_time_to_bill_us), account_cpu_limit ), + trx_context.initial_objective_duration_limit.count() ) ); + EOS_ASSERT( !explicit_billed_cpu_time || (cpu_time_to_bill_us == limited_cpu_time_to_bill_us), + transaction_exception, "cpu to bill ${cpu} != limited ${limit}", ("cpu", cpu_time_to_bill_us)("limit", limited_cpu_time_to_bill_us) ); + cpu_time_to_bill_us = limited_cpu_time_to_bill_us; } resource_limits.add_transaction_usage( trx_context.bill_to_accounts, cpu_time_to_bill_us, 0, @@ -1395,7 +1402,7 @@ struct controller_impl { transaction_trace_ptr push_transaction( const transaction_metadata_ptr& trx, fc::time_point deadline, uint32_t billed_cpu_time_us, - bool explicit_billed_cpu_time = false ) + bool explicit_billed_cpu_time ) { EOS_ASSERT(deadline != fc::time_point(), transaction_exception, "deadline cannot be uninitialized"); @@ -1458,6 +1465,7 @@ struct controller_impl { ? transaction_receipt::executed : transaction_receipt::delayed; trace->receipt = push_receipt(*trx->packed_trx(), s, trx_context.billed_cpu_time_us, trace->net_usage); + trx->billed_cpu_time_us = trx_context.billed_cpu_time_us; pending->_block_stage.get()._pending_trx_metas.emplace_back(trx); } else { transaction_receipt_header r; @@ -2000,7 +2008,7 @@ struct controller_impl { fork_db.add( bsp ); - if (conf.trusted_producers.count(b->producer)) { + if (self.is_trusted_producer(b->producer)) { trusted_producer_light_validation = true; }; @@ -2670,22 +2678,20 @@ void controller::push_block( std::future& block_state_future, my->push_block( block_state_future, forked_branch_cb, trx_lookup ); } -bool controller::in_immutable_mode()const{ - return (db_mode_is_immutable(get_read_mode())); -} - -transaction_trace_ptr controller::push_transaction( const transaction_metadata_ptr& trx, fc::time_point deadline, uint32_t billed_cpu_time_us ) { +transaction_trace_ptr controller::push_transaction( const transaction_metadata_ptr& trx, fc::time_point deadline, + uint32_t billed_cpu_time_us, bool explicit_billed_cpu_time ) { validate_db_available_size(); - EOS_ASSERT( !in_immutable_mode(), transaction_type_exception, "push transaction not allowed in read-only mode" ); + EOS_ASSERT( get_read_mode() != db_read_mode::IRREVERSIBLE, transaction_type_exception, "push transaction not allowed in irreversible mode" ); EOS_ASSERT( trx && !trx->implicit && !trx->scheduled, transaction_type_exception, "Implicit/Scheduled transaction not allowed" ); - return my->push_transaction(trx, deadline, billed_cpu_time_us, billed_cpu_time_us > 0 ); + return my->push_transaction(trx, deadline, billed_cpu_time_us, explicit_billed_cpu_time ); } -transaction_trace_ptr controller::push_scheduled_transaction( const transaction_id_type& trxid, fc::time_point deadline, uint32_t billed_cpu_time_us ) +transaction_trace_ptr controller::push_scheduled_transaction( const transaction_id_type& trxid, fc::time_point deadline, + uint32_t billed_cpu_time_us, bool explicit_billed_cpu_time ) { - EOS_ASSERT( !in_immutable_mode(), transaction_type_exception, "push scheduled transaction not allowed in read-only mode" ); + EOS_ASSERT( get_read_mode() != db_read_mode::IRREVERSIBLE, transaction_type_exception, "push scheduled transaction not allowed in irreversible mode" ); validate_db_available_size(); - return my->push_scheduled_transaction( trxid, deadline, billed_cpu_time_us, billed_cpu_time_us > 0 ); + return my->push_scheduled_transaction( trxid, deadline, billed_cpu_time_us, explicit_billed_cpu_time ); } const flat_set& controller::get_actor_whitelist() const { @@ -2828,6 +2834,11 @@ block_id_type controller::last_irreversible_block_id() const { return get_block_id_for_num( lib_num ); } +time_point controller::last_irreversible_block_time() const { + return my->fork_db.root()->header.timestamp.to_time_point(); +} + + const dynamic_global_property_object& controller::get_dynamic_global_properties()const { return my->db.get(); } @@ -3045,6 +3056,10 @@ bool controller::skip_trx_checks() const { return light_validation_allowed(my->conf.disable_replay_opts); } +bool controller::is_trusted_producer( const account_name& producer) const { + return get_validation_mode() == chain::validation_mode::LIGHT || my->conf.trusted_producers.count(producer); +} + bool controller::contracts_console()const { return my->conf.contracts_console; } diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index c262f5446be..805bef4c412 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -50,8 +50,6 @@ namespace eosio { namespace chain { IRREVERSIBLE }; - inline bool db_mode_is_immutable(db_read_mode m) {return db_read_mode::READ_ONLY == m || db_read_mode::IRREVERSIBLE ==m;} - enum class validation_mode { FULL, LIGHT @@ -144,13 +142,15 @@ namespace eosio { namespace chain { /** * */ - transaction_trace_ptr push_transaction( const transaction_metadata_ptr& trx, fc::time_point deadline, uint32_t billed_cpu_time_us = 0 ); + transaction_trace_ptr push_transaction( const transaction_metadata_ptr& trx, fc::time_point deadline, + uint32_t billed_cpu_time_us, bool explicit_billed_cpu_time ); /** * Attempt to execute a specific transaction in our deferred trx database * */ - transaction_trace_ptr push_scheduled_transaction( const transaction_id_type& scheduled, fc::time_point deadline, uint32_t billed_cpu_time_us = 0 ); + transaction_trace_ptr push_scheduled_transaction( const transaction_id_type& scheduled, fc::time_point deadline, + uint32_t billed_cpu_time_us, bool explicit_billed_cpu_time ); block_state_ptr finalize_block( const signer_callback_type& signer_callback ); void sign_block( const signer_callback_type& signer_callback ); @@ -226,6 +226,7 @@ namespace eosio { namespace chain { uint32_t last_irreversible_block_num() const; block_id_type last_irreversible_block_id() const; + time_point last_irreversible_block_time() const; signed_block_ptr fetch_block_by_number( uint32_t block_num )const; signed_block_ptr fetch_block_by_id( block_id_type id )const; @@ -275,6 +276,7 @@ namespace eosio { namespace chain { bool skip_db_sessions( )const; bool skip_db_sessions( block_status bs )const; bool skip_trx_checks()const; + bool is_trusted_producer( const account_name& producer) const; bool contracts_console()const; @@ -282,7 +284,6 @@ namespace eosio { namespace chain { db_read_mode get_read_mode()const; validation_mode get_validation_mode()const; - bool in_immutable_mode()const; void set_subjective_cpu_leeway(fc::microseconds leeway); fc::optional get_subjective_cpu_leeway() const; diff --git a/libraries/chain/include/eosio/chain/transaction_context.hpp b/libraries/chain/include/eosio/chain/transaction_context.hpp index 01cdb7d732f..a10c7ba9e9a 100644 --- a/libraries/chain/include/eosio/chain/transaction_context.hpp +++ b/libraries/chain/include/eosio/chain/transaction_context.hpp @@ -96,7 +96,8 @@ namespace eosio { namespace chain { void schedule_transaction(); void record_transaction( const transaction_id_type& id, fc::time_point_sec expire ); - void validate_cpu_usage_to_bill( int64_t u, bool check_minimum = true )const; + void validate_cpu_usage_to_bill( int64_t billed_us, int64_t account_cpu_limit, bool check_minimum )const; + void validate_account_cpu_usage( int64_t billed_us, int64_t account_cpu_limit, bool estimate )const; void disallow_transaction_extensions( const char* error_msg )const; diff --git a/libraries/chain/include/eosio/chain/transaction_metadata.hpp b/libraries/chain/include/eosio/chain/transaction_metadata.hpp index 945dd9af5e7..bb48c71e9b4 100644 --- a/libraries/chain/include/eosio/chain/transaction_metadata.hpp +++ b/libraries/chain/include/eosio/chain/transaction_metadata.hpp @@ -34,7 +34,8 @@ class transaction_metadata { public: const bool implicit; const bool scheduled; - bool accepted = false; // not thread safe + bool accepted = false; // not thread safe + uint32_t billed_cpu_time_us = 0; // not thread safe private: struct private_type{}; diff --git a/libraries/chain/include/eosio/chain/wasm_interface_private.hpp b/libraries/chain/include/eosio/chain/wasm_interface_private.hpp index f891602bed4..793a8a28a86 100644 --- a/libraries/chain/include/eosio/chain/wasm_interface_private.hpp +++ b/libraries/chain/include/eosio/chain/wasm_interface_private.hpp @@ -8,7 +8,6 @@ #else #define _REGISTER_EOSVMOC_INTRINSIC(CLS, MOD, METHOD, WASM_SIG, NAME, SIG) #endif -#include #include #include #include @@ -23,7 +22,10 @@ #include "IR/Validate.h" #if defined(EOSIO_EOS_VM_RUNTIME_ENABLED) || defined(EOSIO_EOS_VM_JIT_RUNTIME_ENABLED) +#include #include +#else +#define _REGISTER_EOS_VM_INTRINSIC(CLS, MOD, METHOD, WASM_SIG, NAME, SIG) #endif using namespace fc; diff --git a/libraries/chain/resource_limits.cpp b/libraries/chain/resource_limits.cpp index 10edf69117d..e9ec714fabf 100644 --- a/libraries/chain/resource_limits.cpp +++ b/libraries/chain/resource_limits.cpp @@ -386,13 +386,17 @@ std::pair resource_limits_manager::get_account_cpu account_resource_limit arl; uint128_t window_size = config.account_cpu_usage_average_window; - uint64_t greylisted_virtual_cpu_limit = config.cpu_limit_parameters.max * greylist_limit; bool greylisted = false; uint128_t virtual_cpu_capacity_in_window = window_size; - if( greylisted_virtual_cpu_limit < state.virtual_cpu_limit ) { - virtual_cpu_capacity_in_window *= greylisted_virtual_cpu_limit; - greylisted = true; + if( greylist_limit < config::maximum_elastic_resource_multiplier ) { + uint64_t greylisted_virtual_cpu_limit = config.cpu_limit_parameters.max * greylist_limit; + if( greylisted_virtual_cpu_limit < state.virtual_cpu_limit ) { + virtual_cpu_capacity_in_window *= greylisted_virtual_cpu_limit; + greylisted = true; + } else { + virtual_cpu_capacity_in_window *= state.virtual_cpu_limit; + } } else { virtual_cpu_capacity_in_window *= state.virtual_cpu_limit; } @@ -433,13 +437,17 @@ std::pair resource_limits_manager::get_account_net account_resource_limit arl; uint128_t window_size = config.account_net_usage_average_window; - uint64_t greylisted_virtual_net_limit = config.net_limit_parameters.max * greylist_limit; bool greylisted = false; uint128_t virtual_network_capacity_in_window = window_size; - if( greylisted_virtual_net_limit < state.virtual_net_limit ) { - virtual_network_capacity_in_window *= greylisted_virtual_net_limit; - greylisted = true; + if( greylist_limit < config::maximum_elastic_resource_multiplier ) { + uint64_t greylisted_virtual_net_limit = config.net_limit_parameters.max * greylist_limit; + if( greylisted_virtual_net_limit < state.virtual_net_limit ) { + virtual_network_capacity_in_window *= greylisted_virtual_net_limit; + greylisted = true; + } else { + virtual_network_capacity_in_window *= state.virtual_net_limit; + } } else { virtual_network_capacity_in_window *= state.virtual_net_limit; } diff --git a/libraries/chain/transaction_context.cpp b/libraries/chain/transaction_context.cpp index 49a67dff882..9f814e02d00 100644 --- a/libraries/chain/transaction_context.cpp +++ b/libraries/chain/transaction_context.cpp @@ -121,8 +121,8 @@ namespace eosio { namespace chain { initial_objective_duration_limit = objective_duration_limit; - if( billed_cpu_time_us > 0 ) // could also call on explicit_billed_cpu_time but it would be redundant - validate_cpu_usage_to_bill( billed_cpu_time_us, false ); // Fail early if the amount to be billed is too high + if( explicit_billed_cpu_time ) + validate_cpu_usage_to_bill( billed_cpu_time_us, std::numeric_limits::max(), false ); // Fail early if the amount to be billed is too high // Record accounts to be billed for network and CPU usage if( control.is_builtin_activated(builtin_protocol_feature_t::only_bill_first_authorizer) ) { @@ -172,6 +172,11 @@ namespace eosio { namespace chain { deadline_exception_code = billing_timer_exception_code; } + if( !explicit_billed_cpu_time ) { + // if account no longer has enough cpu to exec trx, don't try + validate_account_cpu_usage( billed_cpu_time_us, account_cpu_limit, true ); + } + eager_net_limit = (eager_net_limit/8)*8; // Round down to nearest multiple of word size (8 bytes) so check_net_usage can be efficient if( initial_net_usage > 0 ) @@ -330,7 +335,7 @@ namespace eosio { namespace chain { update_billed_cpu_time( now ); - validate_cpu_usage_to_bill( billed_cpu_time_us ); + validate_cpu_usage_to_bill( billed_cpu_time_us, account_cpu_limit, true ); rl.add_transaction_usage( bill_to_accounts, static_cast(billed_cpu_time_us), net_usage, block_timestamp_type(control.pending_block_time()).slot ); // Should never fail @@ -420,7 +425,7 @@ namespace eosio { namespace chain { transaction_timer.start(_deadline); } - void transaction_context::validate_cpu_usage_to_bill( int64_t billed_us, bool check_minimum )const { + void transaction_context::validate_cpu_usage_to_bill( int64_t billed_us, int64_t account_cpu_limit, bool check_minimum )const { if (!control.skip_trx_checks()) { if( check_minimum ) { const auto& cfg = control.get_global_properties().configuration; @@ -430,25 +435,35 @@ namespace eosio { namespace chain { ); } - if( billing_timer_exception_code == block_cpu_usage_exceeded::code_value ) { + validate_account_cpu_usage( billed_us, account_cpu_limit, false ); + } + } + + void transaction_context::validate_account_cpu_usage( int64_t billed_us, int64_t account_cpu_limit, bool estimate )const { + if( (billed_us > 0) && !control.skip_trx_checks() ) { + const bool cpu_limited_by_account = (account_cpu_limit <= objective_duration_limit.count()); + + if( !cpu_limited_by_account && (billing_timer_exception_code == block_cpu_usage_exceeded::code_value) ) { EOS_ASSERT( billed_us <= objective_duration_limit.count(), block_cpu_usage_exceeded, - "billed CPU time (${billed} us) is greater than the billable CPU time left in the block (${billable} us)", - ("billed", billed_us)("billable", objective_duration_limit.count()) - ); + "${desc} CPU time (${billed} us) is greater than the billable CPU time left in the block (${billable} us)", + ("desc", (estimate ? "estimated" : "billed"))("billed", billed_us)( "billable", objective_duration_limit.count() ) + ); } else { - if (cpu_limit_due_to_greylist) { - EOS_ASSERT( billed_us <= objective_duration_limit.count(), + if( cpu_limit_due_to_greylist && cpu_limited_by_account ) { + EOS_ASSERT( billed_us <= account_cpu_limit, greylist_cpu_usage_exceeded, - "billed CPU time (${billed} us) is greater than the maximum greylisted billable CPU time for the transaction (${billable} us)", - ("billed", billed_us)("billable", objective_duration_limit.count()) + "${desc} CPU time (${billed} us) is greater than the maximum greylisted billable CPU time for the transaction (${billable} us)", + ("desc", (estimate ? "estimated" : "billed"))("billed", billed_us)( "billable", account_cpu_limit ) ); } else { - EOS_ASSERT( billed_us <= objective_duration_limit.count(), + // exceeds trx.max_cpu_usage_ms or cfg.max_transaction_cpu_usage if objective_duration_limit is greater + const int64_t cpu_limit = (cpu_limited_by_account ? account_cpu_limit : objective_duration_limit.count()); + EOS_ASSERT( billed_us <= cpu_limit, tx_cpu_usage_exceeded, - "billed CPU time (${billed} us) is greater than the maximum billable CPU time for the transaction (${billable} us)", - ("billed", billed_us)("billable", objective_duration_limit.count()) - ); + "${desc} CPU time (${billed} us) is greater than the maximum billable CPU time for the transaction (${billable} us)", + ("desc", (estimate ? "estimated" : "billed"))("billed", billed_us)( "billable", cpu_limit ) + ); } } } @@ -504,6 +519,9 @@ namespace eosio { namespace chain { } } + EOS_ASSERT( (!force_elastic_limits && control.is_producing_block()) || (!greylisted_cpu && !greylisted_net), + transaction_exception, "greylisted when not producing block" ); + return std::make_tuple(account_net_limit, account_cpu_limit, greylisted_net, greylisted_cpu); } @@ -643,7 +661,7 @@ namespace eosio { namespace chain { EOS_ASSERT( actor != nullptr, transaction_exception, "action's authorizing actor '${account}' does not exist", ("account", auth.actor) ); EOS_ASSERT( auth_manager.find_permission(auth) != nullptr, transaction_exception, - "action's authorizations include a non-existent permission: {permission}", + "action's authorizations include a non-existent permission: ${permission}", ("permission", auth) ); if( enforce_actor_whitelist_blacklist ) actors.insert( auth.actor ); diff --git a/libraries/chain/webassembly/eos-vm-oc/code_cache.cpp b/libraries/chain/webassembly/eos-vm-oc/code_cache.cpp index b12388eb3da..9d93b8a2111 100644 --- a/libraries/chain/webassembly/eos-vm-oc/code_cache.cpp +++ b/libraries/chain/webassembly/eos-vm-oc/code_cache.cpp @@ -262,8 +262,11 @@ code_cache_base::code_cache_base(const boost::filesystem::path data_dir, const e for(unsigned i = 0; i < number_entries; ++i) { code_descriptor cd; fc::raw::unpack(ds, cd); - if(cd.codegen_version != 0) + if(cd.codegen_version != 0) { + allocator->deallocate(code_mapping + cd.code_begin); + allocator->deallocate(code_mapping + cd.initdata_begin); continue; + } _cache_index.push_back(std::move(cd)); } allocator->deallocate(code_mapping + cache_header.serialized_descriptor_index); diff --git a/libraries/fc b/libraries/fc index 5c3740a5efe..89905627463 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit 5c3740a5efec1e1d592e0f8ce092872df46c92d2 +Subproject commit 89905627463081c15bfa708a04cc7e68edb25dd0 diff --git a/libraries/testing/include/eosio/testing/tester.hpp b/libraries/testing/include/eosio/testing/tester.hpp index 6df94c8862b..3b779dc97af 100644 --- a/libraries/testing/include/eosio/testing/tester.hpp +++ b/libraries/testing/include/eosio/testing/tester.hpp @@ -175,6 +175,8 @@ namespace eosio { namespace testing { virtual signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) ) = 0; virtual signed_block_ptr produce_empty_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) ) = 0; virtual signed_block_ptr finish_block() = 0; + // produce one block and return traces for all applied transactions, both failed and executed + signed_block_ptr produce_block( std::vector& traces ); void produce_blocks( uint32_t n = 1, bool empty = false ); void produce_blocks_until_end_of_round(); void produce_blocks_for_n_rounds(const uint32_t num_of_rounds = 1); @@ -413,7 +415,10 @@ namespace eosio { namespace testing { } protected: - signed_block_ptr _produce_block( fc::microseconds skip_time, bool skip_pending_trxs = false ); + signed_block_ptr _produce_block( fc::microseconds skip_time, bool skip_pending_trxs ); + signed_block_ptr _produce_block( fc::microseconds skip_time, bool skip_pending_trxs, + bool no_throw, std::vector& traces ); + void _start_block(fc::time_point block_time); signed_block_ptr _finish_block(); @@ -478,6 +483,8 @@ namespace eosio { namespace testing { } } + using base_tester::produce_block; + signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) )override { return _produce_block(skip_time, false); } diff --git a/libraries/testing/tester.cpp b/libraries/testing/tester.cpp index 074de397217..06204dbf62c 100644 --- a/libraries/testing/tester.cpp +++ b/libraries/testing/tester.cpp @@ -312,7 +312,13 @@ namespace eosio { namespace testing { } } - signed_block_ptr base_tester::_produce_block( fc::microseconds skip_time, bool skip_pending_trxs) { + signed_block_ptr base_tester::_produce_block( fc::microseconds skip_time, bool skip_pending_trxs ) { + std::vector traces; + return _produce_block( skip_time, skip_pending_trxs, false, traces ); + } + + signed_block_ptr base_tester::_produce_block( fc::microseconds skip_time, bool skip_pending_trxs, + bool no_throw, std::vector& traces ) { auto head = control->head_block_state(); auto head_time = control->head_block_time(); auto next_time = head_time + skip_time; @@ -323,8 +329,9 @@ namespace eosio { namespace testing { if( !skip_pending_trxs ) { for( auto itr = unapplied_transactions.begin(); itr != unapplied_transactions.end(); ) { - auto trace = control->push_transaction( itr->trx_meta, fc::time_point::maximum(), DEFAULT_BILLED_CPU_TIME_US ); - if(trace->except) { + auto trace = control->push_transaction( itr->trx_meta, fc::time_point::maximum(), DEFAULT_BILLED_CPU_TIME_US, true ); + traces.emplace_back( trace ); + if(!no_throw && trace->except) { trace->except->dynamic_rethrow_exception(); } itr = unapplied_transactions.erase( itr ); @@ -333,8 +340,9 @@ namespace eosio { namespace testing { vector scheduled_trxs; while ((scheduled_trxs = get_scheduled_transactions()).size() > 0 ) { for( const auto& trx : scheduled_trxs ) { - auto trace = control->push_scheduled_transaction( trx, fc::time_point::maximum(), DEFAULT_BILLED_CPU_TIME_US ); - if( trace->except ) { + auto trace = control->push_scheduled_transaction( trx, fc::time_point::maximum(), DEFAULT_BILLED_CPU_TIME_US, true ); + traces.emplace_back( trace ); + if( !no_throw && trace->except ) { trace->except->dynamic_rethrow_exception(); } } @@ -411,6 +419,10 @@ namespace eosio { namespace testing { return control->head_block_state()->block; } + signed_block_ptr base_tester::produce_block( std::vector& traces ) { + return _produce_block( fc::milliseconds(config::block_interval_ms), false, true, traces ); + } + void base_tester::produce_blocks( uint32_t n, bool empty ) { if( empty ) { for( uint32_t i = 0; i < n; ++i ) @@ -535,7 +547,7 @@ namespace eosio { namespace testing { fc::microseconds::maximum() : fc::microseconds( deadline - fc::time_point::now() ); auto fut = transaction_metadata::start_recover_keys( ptrx, control->get_thread_pool(), control->get_chain_id(), time_limit ); - auto r = control->push_transaction( fut.get(), deadline, billed_cpu_time_us ); + auto r = control->push_transaction( fut.get(), deadline, billed_cpu_time_us, billed_cpu_time_us > 0 ); if( r->except_ptr ) std::rethrow_exception( r->except_ptr ); if( r->except ) throw *r->except; return r; @@ -560,7 +572,7 @@ namespace eosio { namespace testing { fc::microseconds( deadline - fc::time_point::now() ); auto ptrx = std::make_shared( trx, c ); auto fut = transaction_metadata::start_recover_keys( ptrx, control->get_thread_pool(), control->get_chain_id(), time_limit ); - auto r = control->push_transaction( fut.get(), deadline, billed_cpu_time_us ); + auto r = control->push_transaction( fut.get(), deadline, billed_cpu_time_us, billed_cpu_time_us > 0 ); if (no_throw) return r; if( r->except_ptr ) std::rethrow_exception( r->except_ptr ); if( r->except) throw *r->except; diff --git a/plugins/CMakeLists.txt b/plugins/CMakeLists.txt index e07a10c5b8d..5f196cc8c4f 100644 --- a/plugins/CMakeLists.txt +++ b/plugins/CMakeLists.txt @@ -9,6 +9,7 @@ add_subdirectory(producer_api_plugin) add_subdirectory(history_plugin) add_subdirectory(history_api_plugin) add_subdirectory(state_history_plugin) +add_subdirectory(trace_api_plugin) add_subdirectory(wallet_plugin) add_subdirectory(wallet_api_plugin) diff --git a/plugins/chain_api_plugin/chain.swagger.yaml b/plugins/chain_api_plugin/chain.swagger.yaml new file mode 100644 index 00000000000..f52db0cd9ee --- /dev/null +++ b/plugins/chain_api_plugin/chain.swagger.yaml @@ -0,0 +1,682 @@ +openapi: 3.0.0 +info: + title: Chain API + description: "OAS 3.0 Nodeos [chain_api_plugin](https://eosio.github.io/eos/latest/nodeos/plugins/chain_api_plugin/index) API Specification\r" + version: 1.0.0 + license: + name: MIT + url: https://opensource.org/licenses/MIT + contact: + url: https://eos.io +servers: + - url: "{protocol}://{host}:{port}/v1/chain" + variables: + protocol: + enum: + - http + - https + default: http + host: + default: localhost + port: + default: "8080" +components: + schemas: {} +paths: + /get_account: + post: + description: Returns an object containing various details about a specific account on the blockchain. + operationId: get_account + requestBody: + description: JSON Object with single member "account_name" + content: + application/json: + schema: + type: object + required: + - account_name + properties: + account_name: + $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "https://eosio.github.io/schemata/v2.0/oas/Account.yaml" + /get_block: + post: + description: Returns an object containing various details about a specific block on the blockchain. + operationId: get_block + requestBody: + content: + application/json: + schema: + type: object + required: + - block_num_or_id + properties: + block_num_or_id: + type: string + description: Provide a `block number` or a `block id` + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "https://eosio.github.io/schemata/v2.0/oas/Block.yaml" + /get_info: + post: + description: Returns an object containing various details about the blockchain. + operationId: get_info + security: [] + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "https://eosio.github.io/schemata/v2.0/oas/Info.yaml" + + /push_transaction: + post: + description: This method expects a transaction in JSON format and will attempt to apply it to the blockchain. + operationId: push_transaction + requestBody: + content: + application/json: + schema: + type: object + properties: + signatures: + type: array + description: array of signatures required to authorize transaction + items: + $ref: "https://eosio.github.io/schemata/v2.0/oas/Signature.yaml" + compression: + type: boolean + description: Compression used, usually false + packed_context_free_data: + type: string + description: json to hex + packed_trx: + type: string + description: Transaction object json to hex + responses: + "200": + description: OK + content: + application/json: + schema: + description: Returns Nothing + + /send_transaction: + post: + description: This method expects a transaction in JSON format and will attempt to apply it to the blockchain. + operationId: send_transaction + requestBody: + content: + application/json: + schema: + type: object + properties: + signatures: + type: array + description: array of signatures required to authorize transaction + items: + $ref: "https://eosio.github.io/schemata/v2.0/oas/Signature.yaml" + compression: + type: boolean + description: Compression used, usually false + packed_context_free_data: + type: string + description: json to hex + packed_trx: + type: string + description: Transaction object json to hex + + responses: + "200": + description: OK + content: + application/json: + schema: + description: Returns Nothing + + /push_transactions: + post: + description: This method expects a transaction in JSON format and will attempt to apply it to the blockchain. + operationId: push_transactions + requestBody: + content: + application/json: + schema: + type: array + items: + $ref: "https://eosio.github.io/schemata/v2.0/oas/Transaction.yaml" + responses: + "200": + description: OK + content: + application/json: + schema: + description: Returns Nothing + + /get_block_header_state: + post: + description: Retrieves the glock header state + operationId: get_block_header_state + requestBody: + content: + application/json: + schema: + type: object + required: + - block_num_or_id + properties: + block_num_or_id: + type: string + description: Provide a block_number or a block_id + + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "https://eosio.github.io/schemata/v2.0/oas/BlockHeaderState.yaml" + + /get_abi: + post: + description: Retrieves the ABI for a contract based on its account name + operationId: get_abi + requestBody: + content: + application/json: + schema: + type: object + required: + - account_name + properties: + account_name: + $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "https://eosio.github.io/schemata/v2.0/oas/Abi.yaml" + /get_currency_balance: + post: + description: Retrieves the current balance + operationId: get_currency_balance + requestBody: + content: + application/json: + schema: + type: object + required: + - code + - account + - symbol + properties: + code: + $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml" + account: + $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml" + symbol: + $ref: "https://eosio.github.io/schemata/v2.0/oas/Symbol.yaml" + + responses: + "200": + description: OK + content: + application/json: + schema: + type: array + items: + $ref: "https://eosio.github.io/schemata/v2.0/oas/Symbol.yaml" + + /get_currency_stats: + post: + description: Retrieves currency stats + operationId: get_currency_stats + requestBody: + content: + application/json: + schema: + type: object + properties: + code: + $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml" + symbol: + $ref: "https://eosio.github.io/schemata/v2.0/oas/Symbol.yaml" + + responses: + "200": + description: OK + content: + application/json: + schema: + description: "Returns an object with one member labeled as the symbol you requested, the object has three members: supply (Symbol), max_supply (Symbol) and issuer (Name)" + + /get_required_keys: + post: + description: Returns the required keys needed to sign a transaction. + operationId: get_required_keys + requestBody: + content: + application/json: + schema: + type: object + required: + - transaction + - available_keys + properties: + transaction: + $ref: "https://eosio.github.io/schemata/v2.0/oas/Transaction.yaml" + available_keys: + type: array + description: Provide the available keys + items: + $ref: "https://eosio.github.io/schemata/v2.0/oas/PublicKey.yaml" + responses: + "200": + description: OK + content: + application/json: + schema: + {} + + /get_producers: + post: + description: Retrieves producers list + operationId: get_producers + requestBody: + content: + application/json: + schema: + title: "GetProducersRequest" + type: object + required: + - limit + - lower_bound + properties: + limit: + type: string + description: total number of producers to retrieve + lower_bound: + type: string + description: In conjunction with limit can be used to paginate through the results. For example, limit=10 and lower_bound=10 would be page 2 + json: + type: boolean + description: return result in JSON format + + responses: + "200": + description: OK + content: + application/json: + schema: + title: "GetProducersResponse" + type: object + additionalProperties: false + minProperties: 3 + required: + - active + - pending + - proposed + properties: + active: + type: array + nullable: true + items: + $ref: "https://eosio.github.io/schemata/v2.0/oas/ProducerSchedule.yaml" + pending: + type: array + nullable: true + items: + $ref: "https://eosio.github.io/schemata/v2.0/oas/ProducerSchedule.yaml" + proposed: + type: array + nullable: true + items: + $ref: "https://eosio.github.io/schemata/v2.0/oas/ProducerSchedule.yaml" + + + /get_raw_code_and_abi: + post: + description: Retrieves raw code and ABI for a contract based on account name + operationId: get_raw_code_and_abi + requestBody: + content: + application/json: + schema: + type: object + required: + - account_name + properties: + account_name: + $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml" + + responses: + "200": + description: OK + content: + application/json: + schema: + type: object + properties: + account_name: + $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml" + wasm: + type: string + description: base64 encoded wasm + abi: + type: string + description: base64 encoded ABI + + /get_scheduled_transaction: + post: + description: Retrieves the scheduled transaction + operationId: get_scheduled_transaction + requestBody: + content: + application/json: + schema: + type: object + properties: + lower_bound: + $ref: "https://eosio.github.io/schemata/v2.0/oas/DateTimeSeconds.yaml" + limit: + description: The maximum number of transactions to return + type: integer + json: + description: true/false whether the packed transaction is converted to json + type: boolean + responses: + "200": + description: OK + content: + application/json: + schema: + type: object + properties: + transactions: + type: array + items: + $ref: "https://eosio.github.io/schemata/v2.0/oas/Transaction.yaml" + + + /get_table_by_scope: + post: + description: Retrieves table scope + operationId: get_table_by_scope + requestBody: + content: + application/json: + schema: + type: object + required: + - code + properties: + code: + type: string + description: "`name` of the contract to return table data for" + table: + type: string + description: Filter results by table + lower_bound: + type: string + description: Filters results to return the first element that is not less than provided value in set + upper_bound: + type: string + description: Filters results to return the first element that is greater than provided value in set + limit: + type: integer + description: Limit number of results returned. + format: int32 + reverse: + type: boolean + description: Reverse the order of returned results + responses: + "200": + description: OK + content: + application/json: + schema: + type: object + properties: + rows: + type: array + items: + $ref: "https://eosio.github.io/schemata/v2.0/oas/TableScope.yaml" + more: + $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml" + + /get_table_rows: + post: + description: Returns an object containing rows from the specified table. + operationId: get_table_rows + requestBody: + content: + application/json: + schema: + type: object + required: + - code + - table + - scope + properties: + code: + type: string + description: The name of the smart contract that controls the provided table + table: + type: string + description: The name of the table to query + scope: + type: string + description: The account to which this data belongs + index_position: + type: string + description: Position of the index used, accepted parameters `primary`, `secondary`, `tertiary`, `fourth`, `fifth`, `sixth`, `seventh`, `eighth`, `ninth` , `tenth` + key_type: + type: string + description: Type of key specified by index_position (for example - `uint64_t` or `name`) + encode_type: + type: string + upper_bound: + type: string + lower_bound: + type: string + + responses: + "200": + description: OK + content: + application/json: + schema: + type: object + properties: + rows: + type: array + items: {} + + /abi_json_to_bin: + post: + description: Returns an object containing rows from the specified table. + operationId: abi_json_to_bin + requestBody: + content: + application/json: + schema: + type: object + title: AbiJsonToBinRequest + properties: + binargs: + $ref: "https://eosio.github.io/schemata/v2.0/oas/Hex.yaml" + + responses: + "200": + description: OK + content: + application/json: + schema: + type: object + properties: + binargs: + $ref: "https://eosio.github.io/schemata/v2.0/oas/Hex.yaml" + + /abi_bin_to_json: + post: + description: Returns an object containing rows from the specified table. + operationId: abi_bin_to_json + requestBody: + content: + application/json: + schema: + type: object + title: AbiBinToJsonRequest + properties: + code: + $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml" + action: + $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml" + binargs: + $ref: "https://eosio.github.io/schemata/v2.0/oas/Hex.yaml" + responses: + "200": + description: OK + content: + application/json: + schema: + type: string + + /get_code: + post: + description: Returns an object containing rows from the specified table. + operationId: get_code + requestBody: + content: + application/json: + schema: + type: object + required: + - account_name + - code_as_wasm + properties: + account_name: + $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml" + code_as_wasm: + type: integer + default: 1 + description: This must be 1 (true) + responses: + "200": + description: OK + content: + application/json: + schema: + type: object + title: GetCodeResponse.yaml + properties: + name: + $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml" + code_hash: + $ref: "https://eosio.github.io/schemata/v2.0/oas/Sha256.yaml" + wast: + type: string + wasm: + type: string + abi: + $ref: "https://eosio.github.io/schemata/v2.0/oas/Abi.yaml" + + /get_raw_abi: + post: + description: Returns an object containing rows from the specified table. + operationId: get_raw_abi + requestBody: + content: + application/json: + schema: + type: object + required: + - account_name + properties: + account_name: + $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml" + responses: + "200": + description: OK + content: + application/json: + schema: + type: object + properties: + account_name: + $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml" + code_hash: + $ref: "https://eosio.github.io/schemata/v2.0/oas/Sha256.yaml" + abi_hash: + allOf: + - $ref: "https://eosio.github.io/schemata/v2.0/oas/Sha256.yaml" + abi: + type: string + + + /get_activated_protocol_features: + post: + description: Retreives the activated protocol features for producer node + operationId: get_activated_protocol_features + requestBody: + content: + application/json: + schema: + type: object + required: + - params + properties: + params: + type: object + description: Defines the filters to retreive the protocol features by + required: + - search_by_block_num + - reverse + properties: + lower_bound: + type: integer + description: Lower bound + upper_bound: + type: integer + description: Upper bound + limit: + type: integer + description: The limit, default is 10 + search_by_block_num: + type: boolean + description: Flag to indicate it is has to search by block number + reverse: + type: boolean + description: Flag to indicate it has to search in reverse + responses: + "200": + description: OK + content: + application/json: + schema: + type: object + description: Returns activated protocol features + required: + - activated_protocol_features + properties: + activated_protocol_features: + type: array + description: Variant type, an array of strings with the activated protocol features + items: + type: string + more: + type: integer + description: "In case there's more activated protocol features than the input parameter `limit` requested, returns the ordinal of the next activated protocol feature which was not returned, otherwise zero." diff --git a/plugins/chain_api_plugin/chain_api_plugin.cpp b/plugins/chain_api_plugin/chain_api_plugin.cpp index 334d560baae..7b2be3bf414 100644 --- a/plugins/chain_api_plugin/chain_api_plugin.cpp +++ b/plugins/chain_api_plugin/chain_api_plugin.cpp @@ -79,7 +79,8 @@ void chain_api_plugin::plugin_startup() { ro_api.set_shorten_abi_errors( !_http_plugin.verbose_errors() ); _http_plugin.add_api({ - CHAIN_RO_CALL(get_info, 200l), + CHAIN_RO_CALL(get_info, 200)}, appbase::priority::medium); + _http_plugin.add_api({ CHAIN_RO_CALL(get_activated_protocol_features, 200), CHAIN_RO_CALL(get_block, 200), CHAIN_RO_CALL(get_block_header_state, 200), diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index 4400c1d69d1..b36d7fbabca 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -43,7 +43,7 @@ std::ostream& operator<<(std::ostream& osm, eosio::chain::db_read_mode m) { osm << "speculative"; } else if ( m == eosio::chain::db_read_mode::HEAD ) { osm << "head"; - } else if ( m == eosio::chain::db_read_mode::READ_ONLY ) { + } else if ( m == eosio::chain::db_read_mode::READ_ONLY ) { // deprecated osm << "read-only"; } else if ( m == eosio::chain::db_read_mode::IRREVERSIBLE ) { osm << "irreversible"; @@ -140,6 +140,9 @@ class chain_plugin_impl { bfs::path blocks_dir; bool readonly = false; flat_map loaded_checkpoints; + bool accept_transactions = false; + bool api_accept_transactions = true; + fc::optional fork_db; fc::optional block_logger; @@ -235,11 +238,12 @@ void chain_plugin::set_program_options(options_description& cli, options_descrip "Deferred transactions sent by accounts in this list do not have any of the subjective whitelist/blacklist checks applied to them (may specify multiple times)") ("read-mode", boost::program_options::value()->default_value(eosio::chain::db_read_mode::SPECULATIVE), "Database read mode (\"speculative\", \"head\", \"read-only\", \"irreversible\").\n" - "In \"speculative\" mode database contains changes done up to the head block plus changes made by transactions not yet included to the blockchain.\n" - "In \"head\" mode database contains changes done up to the current head block.\n" - "In \"read-only\" mode database contains changes done up to the current head block and transactions cannot be pushed to the chain API.\n" - "In \"irreversible\" mode database contains changes done up to the last irreversible block and transactions cannot be pushed to the chain API.\n" + "In \"speculative\" mode: database contains state changes by transactions in the blockchain up to the head block as well as some transactions not yet included in the blockchain.\n" + "In \"head\" mode: database contains state changes by only transactions in the blockchain up to the head block; transactions received by the node are relayed if valid.\n" + "In \"read-only\" mode: (DEPRECATED: see p2p-accept-transactions & api-accept-transactions) database contains state changes by only transactions in the blockchain up to the head block; transactions received via the P2P network are not relayed and transactions cannot be pushed via the chain API.\n" + "In \"irreversible\" mode: database contains state changes by only transactions in the blockchain up to the last irreversible block; transactions received via the P2P network are not relayed and transactions cannot be pushed via the chain API.\n" ) + ( "api-accept-transactions", bpo::value()->default_value(true), "Allow API transactions to be evaluated and relayed if valid.") ("validation-mode", boost::program_options::value()->default_value(eosio::chain::validation_mode::FULL), "Chain validation mode (\"full\" or \"light\").\n" "In \"full\" mode all incoming blocks will be fully validated.\n" @@ -991,6 +995,21 @@ void chain_plugin::plugin_initialize(const variables_map& options) { if ( options.count("read-mode") ) { my->chain_config->read_mode = options.at("read-mode").as(); } + my->api_accept_transactions = options.at( "api-accept-transactions" ).as(); + + if( my->chain_config->read_mode == db_read_mode::IRREVERSIBLE || my->chain_config->read_mode == db_read_mode::READ_ONLY ) { + if( my->chain_config->read_mode == db_read_mode::READ_ONLY ) { + wlog( "read-mode = read-only is deprecated use p2p-accept-transactions = false, api-accept-transactions = false instead." ); + } + if( my->api_accept_transactions ) { + my->api_accept_transactions = false; + std::stringstream ss; ss << my->chain_config->read_mode; + wlog( "api-accept-transactions set to false due to read-mode: ${m}", ("m", ss.str()) ); + } + } + if( my->api_accept_transactions ) { + enable_accept_transactions(); + } if ( options.count("validation-mode") ) { my->chain_config->block_validation_mode = options.at("validation-mode").as(); @@ -1077,6 +1096,8 @@ void chain_plugin::plugin_initialize(const variables_map& options) { void chain_plugin::plugin_startup() { try { + EOS_ASSERT( my->chain_config->read_mode != db_read_mode::IRREVERSIBLE || !accept_transactions(), plugin_config_exception, + "read-mode = irreversible. transactions should not be enabled by enable_accept_transactions" ); try { auto shutdown = [](){ return app().is_quiting(); }; if (my->snapshot_path) { @@ -1123,14 +1144,16 @@ void chain_plugin::plugin_shutdown() { my->chain.reset(); } -chain_apis::read_write::read_write(controller& db, const fc::microseconds& abi_serializer_max_time) +chain_apis::read_write::read_write(controller& db, const fc::microseconds& abi_serializer_max_time, bool api_accept_transactions) : db(db) , abi_serializer_max_time(abi_serializer_max_time) +, api_accept_transactions(api_accept_transactions) { } void chain_apis::read_write::validate() const { - EOS_ASSERT( !db.in_immutable_mode(), missing_chain_api_plugin_exception, "Not allowed, node in read-only mode" ); + EOS_ASSERT( api_accept_transactions, missing_chain_api_plugin_exception, + "Not allowed, node has api-accept-transactions = false" ); } bool chain_plugin::accept_block(const signed_block_ptr& block, const block_id_type& id ) { @@ -1366,6 +1389,19 @@ fc::microseconds chain_plugin::get_abi_serializer_max_time() const { return my->abi_serializer_max_time_us; } +bool chain_plugin::api_accept_transactions() const{ + return my->api_accept_transactions; +} + +bool chain_plugin::accept_transactions() const { + return my->accept_transactions; +} + +void chain_plugin::enable_accept_transactions() { + my->accept_transactions = true; +} + + void chain_plugin::log_guard_exception(const chain::guard_exception&e ) { if (e.code() == chain::database_guard_exception::code_value) { elog("Database has reached an unsafe level of usage, shutting down to avoid corrupting the database. " diff --git a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp index 2b608c4e4a8..1bc4c52dc86 100644 --- a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp +++ b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp @@ -592,8 +592,9 @@ class read_only { class read_write { controller& db; const fc::microseconds abi_serializer_max_time; + const bool api_accept_transactions; public: - read_write(controller& db, const fc::microseconds& abi_serializer_max_time); + read_write(controller& db, const fc::microseconds& abi_serializer_max_time, bool api_accept_transactions); void validate() const; using push_block_params = chain::signed_block; @@ -704,7 +705,7 @@ class chain_plugin : public plugin { void plugin_shutdown(); chain_apis::read_only get_read_only_api() const { return chain_apis::read_only(chain(), get_abi_serializer_max_time()); } - chain_apis::read_write get_read_write_api() { return chain_apis::read_write(chain(), get_abi_serializer_max_time()); } + chain_apis::read_write get_read_write_api() { return chain_apis::read_write(chain(), get_abi_serializer_max_time(), api_accept_transactions()); } bool accept_block( const chain::signed_block_ptr& block, const chain::block_id_type& id ); void accept_transaction(const chain::packed_transaction_ptr& trx, chain::plugin_interface::next_function next); @@ -733,6 +734,10 @@ class chain_plugin : public plugin { chain::chain_id_type get_chain_id() const; fc::microseconds get_abi_serializer_max_time() const; + bool api_accept_transactions() const; + // set true by other plugins if any plugin allows transactions + bool accept_transactions() const; + void enable_accept_transactions(); static void handle_guard_exception(const chain::guard_exception& e); void do_hard_replay(const variables_map& options); diff --git a/plugins/db_size_api_plugin/db_size.swagger.yaml b/plugins/db_size_api_plugin/db_size.swagger.yaml new file mode 100644 index 00000000000..95e39183450 --- /dev/null +++ b/plugins/db_size_api_plugin/db_size.swagger.yaml @@ -0,0 +1,60 @@ +openapi: 3.0.0 +info: + title: DB Size API + version: 1.0.0 + license: + name: MIT + url: https://opensource.org/licenses/MIT + contact: + url: https://eos.io +servers: + - url: '{protocol}://{host}:{port}/v1/' + variables: + protocol: + enum: + - http + - https + default: http + host: + default: localhost + port: + default: "8080" +components: + schemas: {} +paths: + /db_size/get: + post: + summary: get + description: Retrieves database stats + operationId: get + parameters: [] + requestBody: + content: + application/json: + schema: + type: object + properties: {} + responses: + '200': + description: OK + content: + application/json: + schema: + type: object + description: Defines the database stats + properties: + free_bytes: + type: integer + used_bytes: + type: integer + size: + type: integer + indices: + type: array + items: + type: object + properties: + index: + type: string + row_count: + type: integer diff --git a/plugins/http_plugin/http_plugin.cpp b/plugins/http_plugin/http_plugin.cpp index 887a2958c92..e4ef905479c 100644 --- a/plugins/http_plugin/http_plugin.cpp +++ b/plugins/http_plugin/http_plugin.cpp @@ -140,7 +140,8 @@ namespace eosio { class http_plugin_impl { public: - map url_handlers; + // key -> priority, url_handler + map> url_handlers; optional listen_endpoint; string access_control_allow_origin; string access_control_allow_headers; @@ -327,7 +328,7 @@ namespace eosio { if( handler_itr != url_handlers.end()) { con->defer_http_response(); bytes_in_flight += body.size(); - app().post( appbase::priority::low, + app().post( handler_itr->second.first, [&ioc = thread_pool->get_executor(), &bytes_in_flight = this->bytes_in_flight, handler_itr, this, resource{std::move( resource )}, body{std::move( body )}, con]() mutable { const size_t body_size = body.size(); @@ -337,7 +338,7 @@ namespace eosio { return; } try { - handler_itr->second( std::move( resource ), std::move( body ), + handler_itr->second.second( std::move( resource ), std::move( body ), [&ioc, &bytes_in_flight, con, this]( int code, fc::variant response_body ) { size_t response_size = 0; try { @@ -695,9 +696,9 @@ namespace eosio { app().post( 0, [me = my](){} ); // keep my pointer alive until queue is drained } - void http_plugin::add_handler(const string& url, const url_handler& handler) { + void http_plugin::add_handler(const string& url, const url_handler& handler, int priority) { fc_ilog( logger, "add api url: ${c}", ("c", url) ); - my->url_handlers.insert(std::make_pair(url,handler)); + my->url_handlers.insert(std::make_pair(url,std::make_pair(priority, handler))); } void http_plugin::handle_exception( const char *api_name, const char *call_name, const string& body, url_response_callback cb ) { diff --git a/plugins/http_plugin/include/eosio/http_plugin/http_plugin.hpp b/plugins/http_plugin/include/eosio/http_plugin/http_plugin.hpp index 29c31474fec..515ae6c796a 100644 --- a/plugins/http_plugin/include/eosio/http_plugin/http_plugin.hpp +++ b/plugins/http_plugin/include/eosio/http_plugin/http_plugin.hpp @@ -77,10 +77,10 @@ namespace eosio { void plugin_shutdown(); void handle_sighup() override; - void add_handler(const string& url, const url_handler&); - void add_api(const api_description& api) { + void add_handler(const string& url, const url_handler&, int priority = appbase::priority::medium_low); + void add_api(const api_description& api, int priority = appbase::priority::medium_low) { for (const auto& call : api) - add_handler(call.first, call.second); + add_handler(call.first, call.second, priority); } // standard exception handling for api handlers diff --git a/plugins/net_api_plugin/net.swagger.yaml b/plugins/net_api_plugin/net.swagger.yaml new file mode 100644 index 00000000000..4bba46ef39e --- /dev/null +++ b/plugins/net_api_plugin/net.swagger.yaml @@ -0,0 +1,225 @@ +openapi: 3.0.0 +info: + title: Net API + version: 1.0.0 + license: + name: MIT + url: https://opensource.org/licenses/MIT + contact: + url: https://eos.io +servers: + - url: '{protocol}://{host}:{port}/v1/' + variables: + protocol: + enum: + - http + - https + default: http + host: + default: localhost + port: + default: "8080" +components: + schemas: {} +paths: + /net/connections: + post: + summary: connections + description: Returns an array of all peer connection statuses. + operationId: connections + parameters: [] + requestBody: + content: + application/json: + schema: + type: object + properties: {} + responses: + '200': + description: OK + content: + application/json: + schema: + type: array + items: + type: object + properties: + peer: + description: The IP address or URL of the peer + type: string + connecting: + description: True if the peer is connecting, otherwise false + type: boolean + syncing: + description: True if the peer is syncing, otherwise false + type: boolean + last_handshake: + description: Structure holding detailed information about the connection + type: object + properties: + network_version: + description: Incremental value above a computed base + type: integer + chain_id: + $ref: 'https://eosio.github.io/schemata/v2.0/oas/Sha256.yaml' + node_id: + $ref: 'https://eosio.github.io/schemata/v2.0/oas/Sha256.yaml' + key: + $ref: 'https://eosio.github.io/schemata/v2.0/oas/PublicKey.yaml' + time: + $ref: 'https://eosio.github.io/schemata/v2.0/oas/DateTimeSeconds.yaml' + token: + $ref: 'https://eosio.github.io/schemata/v2.0/oas/Sha256.yaml' + sig: + $ref: 'https://eosio.github.io/schemata/v2.0/oas/Signature.yaml' + p2p_address: + description: IP address or URL of the peer + type: string + last_irreversible_block_num: + description: Last irreversible block number + type: integer + last_irreversible_block_id: + $ref: 'https://eosio.github.io/schemata/v2.0/oas/Sha256.yaml' + head_num: + description: Head number + type: integer + head_id: + $ref: 'https://eosio.github.io/schemata/v2.0/oas/Sha256.yaml' + os: + description: Operating system name + type: string + agent: + description: Agent name + type: string + generation: + description: Generation number + type: integer + + /net/connect: + post: + summary: connect + description: Initiate a connection to a specified peer. + operationId: connect + parameters: [] + requestBody: + content: + application/json: + schema: + type: object + required: + - endpoint + properties: + endpoint: + type: string + description: the endpoint to connect to expressed as either IP address or URL + + responses: + '200': + description: OK + content: + application/json: + schema: + type: string + description: '"already connected" or "added connection"' + /net/disconnect: + post: + summary: disconnect + description: Initiate disconnection from a specified peer. + operationId: disconnect + parameters: [] + requestBody: + content: + application/json: + schema: + type: object + required: + - endpoint + properties: + endpoint: + type: string + description: the endpoint to disconnect from, expressed as either IP address or URL + + responses: + '200': + description: OK + content: + application/json: + schema: + type: string + description: '"connection removed" or "no known connection for host"' + /net/status: + post: + summary: status + description: Retrieves the connection status for a specified peer. + operationId: status + parameters: [] + requestBody: + content: + application/json: + schema: + type: object + required: + - endpoint + properties: + endpoint: + type: string + description: the endpoint to get the status for, to expressed as either IP address or URL + + responses: + '200': + description: OK + content: + application/json: + schema: + type: object + properties: + peer: + description: The IP address or URL of the peer + type: string + connecting: + description: True if the peer is connecting, otherwise false + type: boolean + syncing: + description: True if the peer is syncing, otherwise false + type: boolean + last_handshake: + description: Structure holding detailed information about the connection + type: object + properties: + network_version: + description: Incremental value above a computed base + type: integer + chain_id: + $ref: 'https://eosio.github.io/schemata/v2.0/oas/Sha256.yaml' + node_id: + $ref: 'https://eosio.github.io/schemata/v2.0/oas/Sha256.yaml' + key: + $ref: 'https://eosio.github.io/schemata/v2.0/oas/PublicKey.yaml' + time: + $ref: 'https://eosio.github.io/schemata/v2.0/oas/DateTimeSeconds.yaml' + token: + $ref: 'https://eosio.github.io/schemata/v2.0/oas/Sha256.yaml' + sig: + $ref: 'https://eosio.github.io/schemata/v2.0/oas/Signature.yaml' + p2p_address: + description: IP address or URL of the peer + type: string + last_irreversible_block_num: + description: Last irreversible block number + type: integer + last_irreversible_block_id: + $ref: 'https://eosio.github.io/schemata/v2.0/oas/Sha256.yaml' + head_num: + description: Head number + type: integer + head_id: + $ref: 'https://eosio.github.io/schemata/v2.0/oas/Sha256.yaml' + os: + description: Operating system name + type: string + agent: + description: Agent name + type: string + generation: + description: Generation number + type: integer diff --git a/plugins/net_api_plugin/net_api_plugin.cpp b/plugins/net_api_plugin/net_api_plugin.cpp index 17d6af921d5..948b79d4187 100644 --- a/plugins/net_api_plugin/net_api_plugin.cpp +++ b/plugins/net_api_plugin/net_api_plugin.cpp @@ -75,7 +75,7 @@ void net_api_plugin::plugin_startup() { INVOKE_R_V(net_mgr, connections), 201), // CALL(net, net_mgr, open, // INVOKE_V_R(net_mgr, open, std::string), 200), - }); + }, appbase::priority::medium); } void net_api_plugin::plugin_initialize(const variables_map& options) { diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index a2226f67824..19107c7ebf0 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -180,7 +180,7 @@ namespace eosio { void bcast_transaction(const packed_transaction& trx); void rejected_transaction(const packed_transaction_ptr& trx, uint32_t head_blk_num); - void bcast_block(const block_state_ptr& bs); + void bcast_block( const signed_block_ptr& b, const block_id_type& id ); void bcast_notice( const block_id_type& id ); void rejected_block(const block_id_type& id); @@ -237,6 +237,7 @@ namespace eosio { int max_cleanup_time_ms = 0; uint32_t max_client_count = 0; uint32_t max_nodes_per_host = 1; + bool p2p_accept_transactions = true; /// Peer clock may be no more than 1 second skewed from our clock, including network latency. const std::chrono::system_clock::duration peer_authentication_interval{std::chrono::seconds{1}}; @@ -245,7 +246,6 @@ namespace eosio { fc::sha256 node_id; string user_agent_name; - eosio::db_read_mode db_read_mode = eosio::db_read_mode::SPECULATIVE; chain_plugin* chain_plug = nullptr; producer_plugin* producer_plug = nullptr; bool use_socket_read_watermark = false; @@ -267,7 +267,6 @@ namespace eosio { std::atomic in_shutdown{false}; compat::channels::transaction_ack::channel_type::handle incoming_transaction_ack_subscription; - channels::irreversible_block::channel_type::handle incoming_irreversible_block_subscription; uint16_t thread_pool_size = 2; optional thread_pool; @@ -289,6 +288,7 @@ namespace eosio { void start_listen_loop(); void on_accepted_block( const block_state_ptr& bs ); + void on_pre_accepted_block( const signed_block_ptr& bs ); void transaction_ack(const std::pair&); void on_irreversible_block( const block_state_ptr& blk ); @@ -1919,8 +1919,8 @@ namespace eosio { } // thread safe - void dispatch_manager::bcast_block(const block_state_ptr& bs) { - fc_dlog( logger, "bcast block ${b}", ("b", bs->block_num) ); + void dispatch_manager::bcast_block(const signed_block_ptr& b, const block_id_type& id) { + fc_dlog( logger, "bcast block ${b}", ("b", b->block_num()) ); if( my_impl->sync_master->syncing_with_peer() ) return; @@ -1937,19 +1937,18 @@ namespace eosio { } ); if( !have_connection ) return; - std::shared_ptr> send_buffer = create_send_buffer( bs->block ); + std::shared_ptr> send_buffer = create_send_buffer( b ); - for_each_block_connection( [this, bs, send_buffer]( auto& cp ) { + for_each_block_connection( [this, &id, bnum = b->block_num(), &send_buffer]( auto& cp ) { if( !cp->current() ) { return true; } - cp->strand.post( [this, cp, bs, send_buffer]() { - uint32_t bnum = bs->block_num; + cp->strand.post( [this, cp, id, bnum, send_buffer]() { std::unique_lock g_conn( cp->conn_mtx ); bool has_block = cp->last_handshake_recv.last_irreversible_block_num >= bnum; g_conn.unlock(); if( !has_block ) { - if( !add_peer_block( bs->id, cp->connection_id ) ) { + if( !add_peer_block( id, cp->connection_id ) ) { fc_dlog( logger, "not bcast block ${b} to ${p}", ("b", bnum)("p", cp->peer_name()) ); return; } @@ -2433,6 +2432,12 @@ namespace eosio { handle_message( blk_id, std::move( ptr ) ); } else if( which == packed_transaction_which ) { + if( !my_impl->p2p_accept_transactions ) { + fc_dlog( logger, "p2p-accept-transaction=false - dropping txn" ); + pending_message_buffer.advance_read_ptr( message_length ); + return true; + } + auto ds = pending_message_buffer.create_datastream(); fc::raw::unpack( ds, which ); // throw away shared_ptr ptr = std::make_shared(); @@ -2606,7 +2611,7 @@ namespace eosio { uint32_t peer_lib = msg.last_irreversible_block_num; connection_wptr weak = shared_from_this(); - app().post( priority::low, [peer_lib, chain_plug = my_impl->chain_plug, weak{std::move(weak)}, + app().post( priority::medium, [peer_lib, chain_plug = my_impl->chain_plug, weak{std::move(weak)}, msg_lib_id = msg.last_irreversible_block_id]() { connection_ptr c = weak.lock(); if( !c ) return; @@ -2832,11 +2837,6 @@ namespace eosio { } void connection::handle_message( packed_transaction_ptr trx ) { - if( db_mode_is_immutable(my_impl->db_read_mode) ) { - fc_dlog( logger, "got a txn in read-only mode - dropping" ); - return; - } - const auto& tid = trx->id(); peer_dlog( this, "received packed_transaction ${id}", ("id", tid) ); @@ -2882,7 +2882,8 @@ namespace eosio { // called from connection strand void connection::handle_message( const block_id_type& id, signed_block_ptr ptr ) { peer_dlog( this, "received signed_block ${id}", ("id", ptr->block_num() ) ); - app().post(priority::high, [ptr{std::move(ptr)}, id, c = shared_from_this()]() mutable { + auto priority = my_impl->sync_master->syncing_with_peer() ? priority::medium : priority::high; + app().post(priority, [ptr{std::move(ptr)}, id, c = shared_from_this()]() mutable { c->process_signed_block( id, std::move( ptr ) ); }); } @@ -2923,21 +2924,17 @@ namespace eosio { if( !accepted ) return; reason = no_reason; } catch( const unlinkable_block_exception &ex) { - peer_elog(c, "bad signed_block ${n} ${id}...: ${m}", ("n", blk_num)("id", blk_id.str().substr(8,16))("m",ex.what())); + peer_elog(c, "unlinkable_block_exception #${n} ${id}...: ${m}", ("n", blk_num)("id", blk_id.str().substr(8,16))("m",ex.to_string())); reason = unlinkable; } catch( const block_validate_exception &ex) { - peer_elog(c, "bad signed_block ${n} ${id}...: ${m}", ("n", blk_num)("id", blk_id.str().substr(8,16))("m",ex.what())); - fc_elog( logger, "block_validate_exception accept block #${n} syncing from ${p}",("n",blk_num)("p",c->peer_name()) ); + peer_elog(c, "block_validate_exception #${n} ${id}...: ${m}", ("n", blk_num)("id", blk_id.str().substr(8,16))("m",ex.to_string())); reason = validation; } catch( const assert_exception &ex) { - peer_elog(c, "bad signed_block ${n} ${id}...: ${m}", ("n", blk_num)("id", blk_id.str().substr(8,16))("m",ex.what())); - fc_elog( logger, "unable to accept block on assert exception ${n} from ${p}",("n",ex.to_string())("p",c->peer_name())); + peer_elog(c, "block assert_exception #${n} ${id}...: ${m}", ("n", blk_num)("id", blk_id.str().substr(8,16))("m",ex.to_string())); } catch( const fc::exception &ex) { - peer_elog(c, "bad signed_block ${n} ${id}...: ${m}", ("n", blk_num)("id", blk_id.str().substr(8,16))("m",ex.what())); - fc_elog( logger, "accept_block threw a non-assert exception ${x} from ${p}",( "x",ex.to_string())("p",c->peer_name())); + peer_elog(c, "bad block exception #${n} ${id}...: ${m}", ("n", blk_num)("id", blk_id.str().substr(8,16))("m",ex.to_string())); } catch( ...) { - peer_elog(c, "bad signed_block ${n} ${id}...: unknown exception", ("n", blk_num)("id", blk_id.str().substr(8,16))); - fc_elog( logger, "handle sync block caught something else from ${p}",("p",c->peer_name())); + peer_elog(c, "bad block #${n} ${id}...: unknown exception", ("n", blk_num)("id", blk_id.str().substr(8,16))); } if( reason == no_reason ) { @@ -3088,14 +3085,28 @@ namespace eosio { } // called from application thread - void net_plugin_impl::on_accepted_block(const block_state_ptr& block) { + void net_plugin_impl::on_accepted_block(const block_state_ptr& bs) { update_chain_info(); - dispatcher->strand.post( [this, block]() { - fc_dlog( logger, "signaled, blk num = ${num}, id = ${id}", ("num", block->block_num)("id", block->id) ); - dispatcher->bcast_block( block ); + controller& cc = chain_plug->chain(); + dispatcher->strand.post( [this, bs]() { + fc_dlog( logger, "signaled accepted_block, blk num = ${num}, id = ${id}", ("num", bs->block_num)("id", bs->id) ); + dispatcher->bcast_block( bs->block, bs->id ); }); } + // called from application thread + void net_plugin_impl::on_pre_accepted_block(const signed_block_ptr& block) { + update_chain_info(); + controller& cc = chain_plug->chain(); + if( cc.is_trusted_producer(block->producer) ) { + dispatcher->strand.post( [this, block]() { + auto id = block->id(); + fc_dlog( logger, "signaled pre_accepted_block, blk num = ${num}, id = ${id}", ("num", block->block_num())("id", id) ); + dispatcher->bcast_block( block, id ); + }); + } + } + // called from application thread void net_plugin_impl::on_irreversible_block( const block_state_ptr& block) { fc_dlog( logger, "on_irreversible_block, blk num = ${num}, id = ${id}", ("num", block->block_num)("id", block->id) ); @@ -3258,6 +3269,7 @@ namespace eosio { " p2p.trx.eos.io:9876:trx\n" " p2p.blk.eos.io:9876:blk\n") ( "p2p-max-nodes-per-host", bpo::value()->default_value(def_max_nodes_per_host), "Maximum number of client nodes from any single IP address") + ( "p2p-accept-transactions", bpo::value()->default_value(true), "Allow transactions received over p2p network to be evaluated and relayed if valid.") ( "agent-name", bpo::value()->default_value("\"EOS Test Agent\""), "The name supplied to identify this node amongst the peers.") ( "allowed-connection", bpo::value>()->multitoken()->default_value({"any"}, "any"), "Can be 'any' or 'producers' or 'specified' or 'none'. If 'specified', peer-key must be specified at least once. If only 'producers', peer-key is not required. 'producers' and 'specified' may be combined.") ( "peer-key", bpo::value>()->composing()->multitoken(), "Optional public key of peer allowed to connect. May be used multiple times.") @@ -3269,7 +3281,7 @@ namespace eosio { ( "net-threads", bpo::value()->default_value(my->thread_pool_size), "Number of worker threads in net_plugin thread pool" ) ( "sync-fetch-span", bpo::value()->default_value(def_sync_fetch_span), "number of blocks to retrieve in a chunk from any individual peer during synchronization") - ( "use-socket-read-watermark", bpo::value()->default_value(false), "Enable expirimental socket read watermark optimization") + ( "use-socket-read-watermark", bpo::value()->default_value(false), "Enable experimental socket read watermark optimization") ( "peer-log-format", bpo::value()->default_value( "[\"${_name}\" ${_ip}:${_port}]" ), "The string used to format peers when logging messages about them. Variables are escaped with ${}.\n" "Available Variables:\n" @@ -3301,6 +3313,7 @@ namespace eosio { my->resp_expected_period = def_resp_expected_wait; my->max_client_count = options.at( "max-clients" ).as(); my->max_nodes_per_host = options.at( "p2p-max-nodes-per-host" ).as(); + my->p2p_accept_transactions = options.at( "p2p-accept-transactions" ).as(); my->use_socket_read_watermark = options.at( "use-socket-read-watermark" ).as(); @@ -3367,6 +3380,18 @@ namespace eosio { EOS_ASSERT( my->chain_plug, chain::missing_chain_plugin_exception, "" ); my->chain_id = my->chain_plug->get_chain_id(); fc::rand_pseudo_bytes( my->node_id.data(), my->node_id.data_size()); + const controller& cc = my->chain_plug->chain(); + + if( cc.get_read_mode() == db_read_mode::IRREVERSIBLE || cc.get_read_mode() == db_read_mode::READ_ONLY ) { + if( my->p2p_accept_transactions ) { + my->p2p_accept_transactions = false; + string m = cc.get_read_mode() == db_read_mode::IRREVERSIBLE ? "irreversible" : "read-only"; + wlog( "p2p-accept-transactions set to false due to read-mode: ${m}", ("m", m) ); + } + } + if( my->p2p_accept_transactions ) { + my->chain_plug->enable_accept_transactions(); + } } FC_LOG_AND_RETHROW() } @@ -3383,14 +3408,12 @@ namespace eosio { my->dispatcher.reset( new dispatch_manager( my_impl->thread_pool->get_executor() ) ); - chain::controller&cc = my->chain_plug->chain(); - my->db_read_mode = cc.get_read_mode(); - if( cc.in_immutable_mode() && my->p2p_address.size() ) { - fc_wlog( logger, "\n" - "**********************************\n" - "* Read Only Mode *\n" - "* - Transactions not forwarded - *\n" - "**********************************\n" ); + if( !my->p2p_accept_transactions && my->p2p_address.size() ) { + fc_ilog( logger, "\n" + "***********************************\n" + "* p2p-accept-transactions = false *\n" + "* Transactions not forwarded *\n" + "***********************************\n" ); } tcp::endpoint listen_endpoint; @@ -3437,9 +3460,13 @@ namespace eosio { my->start_listen_loop(); } { + chain::controller& cc = my->chain_plug->chain(); cc.accepted_block.connect( [my = my]( const block_state_ptr& s ) { my->on_accepted_block( s ); } ); + cc.pre_accepted_block.connect( [my = my]( const signed_block_ptr& s ) { + my->on_pre_accepted_block( s ); + } ); cc.irreversible_block.connect( [my = my]( const block_state_ptr& s ) { my->on_irreversible_block( s ); } ); diff --git a/plugins/producer_api_plugin/producer.swagger.yaml b/plugins/producer_api_plugin/producer.swagger.yaml new file mode 100644 index 00000000000..34a87875d25 --- /dev/null +++ b/plugins/producer_api_plugin/producer.swagger.yaml @@ -0,0 +1,497 @@ +openapi: 3.0.0 +info: + title: EOSIO API + version: 1.0.0 + license: + name: MIT + url: https://opensource.org/licenses/MIT + contact: + url: https://eos.io +tags: + - name: eosio +servers: + - url: "{protocol}://{host}:{port}/v1/" + variables: + protocol: + enum: + - http + - https + default: http + host: + default: localhost + port: + default: "8080" +components: + securitySchemes: {} + schemas: {} +security: + - {} +paths: + /producer/pause: + post: + summary: pause + description: Pause producer node + operationId: pause + parameters: [] + requestBody: + content: + application/json: + schema: + type: object + properties: {} + responses: + "200": + description: OK + content: + application/json: + schema: + type: boolean + description: "returns status" + /producer/resume: + post: + summary: resume + description: Resume producer node + operationId: resume + parameters: [] + requestBody: + content: + application/json: + schema: + type: object + description: Resumes activity for producer + responses: + "200": + description: OK + content: + application/json: + schema: + description: Returns Nothing + /producer/paused: + post: + summary: paused + description: Retreives paused status for producer node + operationId: paused + parameters: [] + requestBody: + content: + application/json: + schema: + type: object + properties: {} + responses: + "200": + description: OK + content: + application/json: + schema: + type: boolean + description: True if producer is paused, false otherwise + /producer/get_runtime_options: + post: + summary: get_runtime_options + description: Retreives run time options for producer node + operationId: get_runtime_options + parameters: [] + requestBody: + content: + application/json: + schema: + type: object + properties: {} + responses: + "200": + description: OK + content: + application/json: + schema: + type: object + description: Returns run time options set for the producer + properties: + max_transaction_time: + type: integer + description: Max transaction time + max_irreversible_block_age: + type: integer + description: Max irreversible block age + produce_time_offset_us: + type: integer + description: Time offset + last_block_time_offset_us: + type: integer + description: Last block time offset + max_scheduled_transaction_time_per_block_ms: + type: integer + description: Max scheduled transaction time per block in ms + subjective_cpu_leeway_us: + type: integer + description: Subjective CPU leeway + incoming_defer_ratio: + type: integer + description: Incoming defer ration + /producer/update_runtime_options: + post: + summary: update_runtime_options + description: Update run time options for producer node + operationId: update_runtime_options + parameters: [] + requestBody: + content: + application/json: + schema: + type: object + required: + - options + properties: + options: + type: object + description: Defines the run time options to set for the producer + properties: + max_transaction_time: + type: integer + description: Max transaction time + max_irreversible_block_age: + type: integer + description: Max irreversible block age + produce_time_offset_us: + type: integer + description: Time offset + last_block_time_offset_us: + type: integer + description: Last block time offset + max_scheduled_transaction_time_per_block_ms: + type: integer + description: Max scheduled transaction time per block in ms + subjective_cpu_leeway_us: + type: integer + description: Subjective CPU leeway + incoming_defer_ratio: + type: integer + description: Incoming defer ration + + responses: + "200": + description: OK + content: + application/json: + schema: + description: Returns Nothing + /producer/get_greylist: + post: + summary: get_greylist + description: Retreives the greylist for producer node + operationId: get_greylist + parameters: [] + requestBody: + content: + application/json: + schema: + type: object + properties: {} + responses: + "200": + description: OK + content: + application/json: + schema: + type: array + description: List of account names stored in the greylist + items: + $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml" + + /producer/add_greylist_accounts: + post: + summary: add_greylist_accounts + description: Adds accounts to grey list for producer node + operationId: add_greylist_accounts + parameters: [] + requestBody: + content: + application/json: + schema: + type: object + required: + - params + properties: + params: + type: object + properties: + accounts: + type: array + description: List of account names to add + items: + $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml" + + responses: + "200": + description: OK + content: + application/json: + schema: + description: Returns Nothing + /producer/remove_greylist_accounts: + post: + summary: remove_greylist_accounts + description: Removes accounts from greylist for producer node + operationId: remove_greylist_accounts + parameters: [] + requestBody: + content: + application/json: + schema: + type: object + required: + - params + properties: + params: + type: object + properties: + accounts: + type: array + description: List of account names to remove + items: + $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml" + + responses: + "200": + description: OK + content: + application/json: + schema: + type: array + description: List of account names stored in the greylist + items: + $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml" + + /producer/get_whitelist_blacklist: + post: + summary: get_whitelist_blacklist + description: Retreives the white list and black list for producer node + operationId: get_whitelist_blacklist + parameters: [] + requestBody: + content: + application/json: + schema: + type: object + properties: {} + + responses: + "200": + description: OK + content: + application/json: + schema: + type: object + description: Defines the actor whitelist and blacklist, the contract whitelist and blacklist, the action blacklist and key blacklist + properties: + actor_whitelist: + type: array + items: + $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml" + actor_blacklist: + type: array + items: + $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml" + contract_whitelist: + type: array + items: + $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml" + contract_blacklist: + type: array + items: + $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml" + action_blacklist: + type: array + items: + type: array + description: Array of two string values, the account name as the first and action name as the second + items: + allOf: + - $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml" + - $ref: "https://eosio.github.io/schemata/v2.0/oas/CppSignature.yaml" + key_blacklist: + type: array + items: + $ref: "https://eosio.github.io/schemata/v2.0/oas/KeyType.yaml" + + /producer/set_whitelist_blacklist: + post: + summary: set_whitelist_blacklist + description: Sets the white list and black list for producer node + operationId: set_whitelist_blacklist + parameters: [] + requestBody: + content: + application/json: + schema: + type: object + required: + - params + properties: + params: + type: object + description: Defines the actor whitelist and blacklist, the contract whitelist and blacklist, the action blacklist and key blacklist + properties: + actor_whitelist: + type: array + items: + $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml" + actor_blacklist: + type: array + items: + $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml" + contract_whitelist: + type: array + items: + $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml" + contract_blacklist: + type: array + items: + $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml" + action_blacklist: + type: array + items: + type: array + description: Array of two string values, the account name as the first and action name as the second + items: + allOf: + - $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml" + - $ref: "https://eosio.github.io/schemata/v2.0/oas/CppSignature.yaml" + key_blacklist: + type: array + items: + $ref: "https://eosio.github.io/schemata/v2.0/oas/KeyType.yaml" + + responses: + "200": + description: OK + content: + application/json: + schema: + description: Returns Nothing + /producer/create_snapshot: + post: + summary: create_snapshot + description: Creates a snapshot for producer node + operationId: create_snapshot + parameters: [] + requestBody: + content: + application/json: + schema: + type: object + required: + - next + properties: + next: + type: object + description: Defines the snapshot to be created + properties: + snapshot_name: + type: string + description: The name of the snapshot + head_block_id: + $ref: "https://eosio.github.io/schemata/v2.0/oas/Sha256.yaml" + + + responses: + "200": + description: OK + content: + application/json: + schema: + description: Returns Nothing + + /producer/get_integrity_hash: + post: + summary: get_integrity_hash + description: Retreives the integrity hash for producer node + operationId: get_integrity_hash + parameters: [] + requestBody: + content: + application/json: + schema: + type: object + properties: {} + + responses: + "200": + description: OK + content: + application/json: + schema: + type: object + description: Defines the integrity hash information details + properties: + integrity_hash: + $ref: "https://eosio.github.io/schemata/v2.0/oas/Sha256.yaml" + head_block_id: + $ref: "https://eosio.github.io/schemata/v2.0/oas/Sha256.yaml" + + /producer/schedule_protocol_feature_activations: + post: + summary: schedule_protocol_feature_activations + description: Schedule protocol feature activation for producer node + operationId: schedule_protocol_feature_activations + parameters: [] + requestBody: + content: + application/json: + schema: + type: object + required: + - schedule + properties: + schedule: + type: object + properties: + protocol_features_to_activate: + type: array + description: List of protocol features to activate + items: + $ref: "https://eosio.github.io/schemata/v2.0/oas/Sha256.yaml" + + responses: + "200": + description: OK + content: + application/json: + schema: + description: Returns Nothing + + /producer/get_supported_protocol_features: + post: + summary: get_supported_protocol_features + description: Retreives supported protocol features for producer node + operationId: get_supported_protocol_features + parameters: [] + requestBody: + content: + application/json: + schema: + type: object + required: + - params + properties: + params: + type: object + description: Defines filters based on which to return the supported protocol features + properties: + exclude_disabled: + type: boolean + description: Exclude disabled protocol features + exclude_unactivatable: + type: boolean + description: Exclude unactivatable protocol features + + responses: + "200": + description: OK + content: + application/json: + schema: + type: array + description: Variant type, an array of strings with the supported protocol features + items: + type: string diff --git a/plugins/producer_api_plugin/producer_api_plugin.cpp b/plugins/producer_api_plugin/producer_api_plugin.cpp index bd3feb1e9bb..6c30ee04cfb 100644 --- a/plugins/producer_api_plugin/producer_api_plugin.cpp +++ b/plugins/producer_api_plugin/producer_api_plugin.cpp @@ -124,7 +124,7 @@ void producer_api_plugin::plugin_startup() { producer_plugin::get_supported_protocol_features_params), 201), CALL(producer, producer, get_account_ram_corrections, INVOKE_R_R(producer, get_account_ram_corrections, producer_plugin::get_account_ram_corrections_params), 201), - }); + }, appbase::priority::medium); } void producer_api_plugin::plugin_initialize(const variables_map& options) { diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index c67f53a1e11..63998650cec 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -78,7 +78,7 @@ using namespace eosio::chain; using namespace eosio::chain::plugin_interface; namespace { - bool failure_is_subjective(const fc::exception& e, bool deadline_is_subjective) { + bool exception_is_exhausted(const fc::exception& e, bool deadline_is_subjective) { auto code = e.code(); return (code == block_cpu_usage_exceeded::code_value) || (code == block_net_usage_exceeded::code_value) || @@ -181,12 +181,14 @@ class producer_plugin_impl : public std::enable_shared_from_this calculate_next_block_time(const account_name& producer_name, const block_timestamp_type& current_block_time) const; void schedule_production_loop(); + void schedule_maybe_produce_block( bool exhausted ); void produce_block(); bool maybe_produce_block(); + bool block_is_exhausted() const; bool remove_expired_persisted_trxs( const fc::time_point& deadline ); bool remove_expired_blacklisted_trxs( const fc::time_point& deadline ); bool process_unapplied_trxs( const fc::time_point& deadline ); - bool process_scheduled_and_incoming_trxs( const fc::time_point& deadline, size_t& pending_incoming_process_limit ); + void process_scheduled_and_incoming_trxs( const fc::time_point& deadline, size_t& pending_incoming_process_limit ); bool process_incoming_trxs( const fc::time_point& deadline, size_t& pending_incoming_process_limit ); boost::program_options::variables_map _options; @@ -207,6 +209,8 @@ class producer_plugin_impl : public std::enable_shared_from_this& block_id) { auto& chain = chain_plug->chain(); if ( _pending_block_mode == pending_block_mode::producing ) { - fc_wlog( _log, "dropped incoming block #${num} while producing #${pbn} for ${bt}, id: ${id}", - ("num", block->block_num())("pbn", chain.head_block_num() + 1) - ("bt", chain.pending_block_time())("id", block_id ? (*block_id).str() : "UNKNOWN") ); + fc_wlog( _log, "dropped incoming block #${num} id: ${id}", + ("num", block->block_num())("id", block_id ? (*block_id).str() : "UNKNOWN") ); return false; } @@ -463,14 +466,19 @@ class producer_plugin_impl : public std::enable_shared_from_thisprocess_incoming_transaction_async( future.get(), persist_until_expired, std::move( next ) ); + if( !self->process_incoming_transaction_async( future.get(), persist_until_expired, std::move( next ) ) ) { + if( self->_pending_block_mode == pending_block_mode::producing ) { + self->schedule_maybe_produce_block( true ); + } + } } CATCH_AND_CALL(next); } ); } }); } - void process_incoming_transaction_async(const transaction_metadata_ptr& trx, bool persist_until_expired, next_function next) { + bool process_incoming_transaction_async(const transaction_metadata_ptr& trx, bool persist_until_expired, next_function next) { + bool exhausted = false; chain::controller& chain = chain_plug->chain(); auto send_response = [this, &trx, &chain, &next](const fc::static_variant& response) { @@ -511,18 +519,18 @@ class producer_plugin_impl : public std::enable_shared_from_this( FC_LOG_MESSAGE( error, "expired transaction ${id}, expiration ${e}, block time ${bt}", ("id", id)("e", trx->packed_trx()->expiration())( "bt", bt ))))); - return; + return true; } if( chain.is_known_unexpired_transaction( id )) { send_response( std::static_pointer_cast( std::make_shared( FC_LOG_MESSAGE( error, "duplicate transaction ${id}", ("id", id)))) ); - return; + return true; } if( !chain.is_building_block()) { _pending_incoming_transactions.add( trx, persist_until_expired, next ); - return; + return true; } auto deadline = fc::time_point::now() + fc::milliseconds( _max_transaction_time_ms ); @@ -534,9 +542,9 @@ class producer_plugin_impl : public std::enable_shared_from_thisbilled_cpu_time_us, false ); if( trace->except ) { - if( failure_is_subjective( *trace->except, deadline_is_subjective )) { + if( exception_is_exhausted( *trace->except, deadline_is_subjective )) { _pending_incoming_transactions.add( trx, persist_until_expired, next ); if( _pending_block_mode == pending_block_mode::producing ) { fc_dlog( _trx_trace_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} COULD NOT FIT, tx: ${txid} RETRYING ", @@ -547,6 +555,8 @@ class producer_plugin_impl : public std::enable_shared_from_thisid())); } + if( !exhausted ) + exhausted = block_is_exhausted(); } else { auto e_ptr = trace->except->dynamic_copy_exception(); send_response( e_ptr ); @@ -567,6 +577,8 @@ class producer_plugin_impl : public std::enable_shared_from_this()->default_value(config::default_block_cpu_effort_pct / config::percent_1), "Percentage of cpu block production time used to produce last block. Whole number percentages, e.g. 80 for 80%") + ("max-block-cpu-usage-threshold-us", bpo::value()->default_value( 5000 ), + "Threshold of CPU block production to consider block full; when within threshold of max-block-cpu-usage block can be produced immediately") + ("max-block-net-usage-threshold-bytes", bpo::value()->default_value( 1024 ), + "Threshold of NET block production to consider block full; when within threshold of max-block-net-usage block can be produced immediately") ("max-scheduled-transaction-time-per-block-ms", boost::program_options::value()->default_value(100), "Maximum wall-clock time, in milliseconds, spent retiring scheduled transactions in any block before returning to normal transaction processing.") ("subjective-cpu-leeway-us", boost::program_options::value()->default_value( config::default_subjective_cpu_leeway_us ), "Time in microseconds allowed for a transaction that starts with insufficient CPU quota to complete and cover its CPU usage.") ("incoming-defer-ratio", bpo::value()->default_value(1.0), - "ratio between incoming transations and deferred transactions when both are exhausted") + "ratio between incoming transactions and deferred transactions when both are queued for execution") ("incoming-transaction-queue-size-mb", bpo::value()->default_value( 1024 ), "Maximum size (in MiB) of the incoming transaction queue. Exceeding this value will subjectively drop transaction with resource exhaustion.") ("producer-threads", bpo::value()->default_value(config::default_controller_thread_pool_size), @@ -838,6 +854,12 @@ void producer_plugin::plugin_initialize(const boost::program_options::variables_ my->_produce_time_offset_us = std::min( my->_produce_time_offset_us, cpu_effort_offset_us ); my->_last_block_time_offset_us = std::min( my->_last_block_time_offset_us, last_block_cpu_effort_offset_us ); + my->_max_block_cpu_usage_threshold_us = options.at( "max-block-cpu-usage-threshold-us" ).as(); + EOS_ASSERT( my->_max_block_cpu_usage_threshold_us < config::block_interval_us, plugin_config_exception, + "max-block-cpu-usage-threshold-us ${t} must be 0 .. ${bi}", ("bi", config::block_interval_us)("t", my->_max_block_cpu_usage_threshold_us) ); + + my->_max_block_net_usage_threshold_bytes = options.at( "max-block-net-usage-threshold-bytes" ).as(); + my->_max_scheduled_transaction_time_per_block_ms = options.at("max-scheduled-transaction-time-per-block-ms").as(); if( options.at( "subjective-cpu-leeway-us" ).as() != config::default_subjective_cpu_leeway_us ) { @@ -931,6 +953,9 @@ void producer_plugin::plugin_startup() EOS_ASSERT( my->_producers.empty() || chain.get_validation_mode() == chain::validation_mode::FULL, plugin_config_exception, "node cannot have any producer-name configured because block production is not safe when validation_mode is not \"full\"" ); + EOS_ASSERT( my->_producers.empty() || my->chain_plug->accept_transactions(), plugin_config_exception, + "node cannot have any producer-name configured because no block production is possible with no [api|p2p]-accepted-transactions" ); + my->_accepted_block_connection.emplace(chain.accepted_block.connect( [this]( const auto& bsp ){ my->on_block( bsp ); } )); my->_accepted_block_header_connection.emplace(chain.accepted_block_header.connect( [this]( const auto& bsp ){ my->on_block_header( bsp ); } )); my->_irreversible_block_connection.emplace(chain.irreversible_block.connect( [this]( const auto& bsp ){ my->on_irreversible_block( bsp->block ); } )); @@ -983,6 +1008,7 @@ void producer_plugin::handle_sighup() { } void producer_plugin::pause() { + fc_ilog(_log, "Producer paused."); my->_pause_production = true; } @@ -994,7 +1020,10 @@ void producer_plugin::resume() { if (my->_pending_block_mode == pending_block_mode::speculating) { chain::controller& chain = my->chain_plug->chain(); my->_unapplied_transactions.add_aborted( chain.abort_block() ); + fc_ilog(_log, "Producer resumed. Scheduling production."); my->schedule_production_loop(); + } else { + fc_ilog(_log, "Producer resumed."); } } @@ -1382,11 +1411,6 @@ fc::time_point producer_plugin_impl::calculate_pending_block_time() const { const fc::time_point base = std::max(now, chain.head_block_time()); const int64_t min_time_to_next_block = (config::block_interval_us) - (base.time_since_epoch().count() % (config::block_interval_us) ); fc::time_point block_time = base + fc::microseconds(min_time_to_next_block); - - - if((block_time - now) < fc::microseconds(config::block_interval_us/10) ) { // we must sleep for at least 50ms - block_time += fc::microseconds(config::block_interval_us); - } return block_time; } @@ -1398,13 +1422,11 @@ fc::time_point producer_plugin_impl::calculate_block_deadline( const fc::time_po producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { chain::controller& chain = chain_plug->chain(); - if( chain.in_immutable_mode() ) + if( !chain_plug->accept_transactions() ) return start_block_result::waiting_for_block; const auto& hbs = chain.head_block_state(); - //Schedule for the next second's tick regardless of chain state - // If we would wait less than 50ms (1/10 of block_interval), wait for the whole block interval. const fc::time_point now = fc::time_point::now(); const fc::time_point block_time = calculate_pending_block_time(); @@ -1583,7 +1605,7 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { if( app().is_quiting() ) // db guard exception above in LOG_AND_DROP could have called app().quit() return start_block_result::failed; - if (preprocess_deadline <= fc::time_point::now()) { + if (preprocess_deadline <= fc::time_point::now() || block_is_exhausted()) { return start_block_result::exhausted; } else { if( !process_incoming_trxs( preprocess_deadline, pending_incoming_process_limit ) ) @@ -1654,12 +1676,14 @@ bool producer_plugin_impl::remove_expired_blacklisted_trxs( const fc::time_point { bool exhausted = false; auto& blacklist_by_expiry = _blacklisted_transactions.get(); - auto now = fc::time_point::now(); if(!blacklist_by_expiry.empty()) { + const chain::controller& chain = chain_plug->chain(); + const auto lib_time = chain.last_irreversible_block_time(); + int num_expired = 0; int orig_count = _blacklisted_transactions.size(); - while (!blacklist_by_expiry.empty() && blacklist_by_expiry.begin()->expiry <= now) { + while (!blacklist_by_expiry.empty() && blacklist_by_expiry.begin()->expiry <= lib_time) { if (deadline <= fc::time_point::now()) { exhausted = true; break; @@ -1702,12 +1726,14 @@ bool producer_plugin_impl::process_unapplied_trxs( const fc::time_point& deadlin trx_deadline = deadline; } - auto trace = chain.push_transaction( trx, trx_deadline ); + auto trace = chain.push_transaction( trx, trx_deadline, trx->billed_cpu_time_us, false ); if( trace->except ) { - if( failure_is_subjective( *trace->except, deadline_is_subjective ) ) { - exhausted = true; - // don't erase, subjective failure so try again next time - break; + if( exception_is_exhausted( *trace->except, deadline_is_subjective ) ) { + if( block_is_exhausted() ) { + exhausted = true; + // don't erase, subjective failure so try again next time + break; + } } else { // this failed our configured maximum transaction time, we don't want to replay it ++num_failed; @@ -1729,7 +1755,7 @@ bool producer_plugin_impl::process_unapplied_trxs( const fc::time_point& deadlin return !exhausted; } -bool producer_plugin_impl::process_scheduled_and_incoming_trxs( const fc::time_point& deadline, size_t& pending_incoming_process_limit ) +void producer_plugin_impl::process_scheduled_and_incoming_trxs( const fc::time_point& deadline, size_t& pending_incoming_process_limit ) { // scheduled transactions int num_applied = 0; @@ -1746,21 +1772,22 @@ bool producer_plugin_impl::process_scheduled_and_incoming_trxs( const fc::time_p auto sch_itr = sch_idx.begin(); while( sch_itr != sch_idx.end() ) { if( sch_itr->delay_until > pending_block_time) break; // not scheduled yet + if( exhausted || deadline <= fc::time_point::now() ) { + exhausted = true; + break; + } if( sch_itr->published >= pending_block_time ) { ++sch_itr; continue; // do not allow schedule and execute in same block } - if( deadline <= fc::time_point::now() ) { - exhausted = true; - break; - } - const transaction_id_type trx_id = sch_itr->trx_id; // make copy since reference could be invalidated - if (blacklist_by_id.find(trx_id) != blacklist_by_id.end()) { + if (blacklist_by_id.find(sch_itr->trx_id) != blacklist_by_id.end()) { ++sch_itr; continue; } + const transaction_id_type trx_id = sch_itr->trx_id; // make copy since reference could be invalidated + const auto sch_expiration = sch_itr->expiration; auto sch_itr_next = sch_itr; // save off next since sch_itr may be invalidated by loop ++sch_itr_next; const auto next_delay_until = sch_itr_next != sch_idx.end() ? sch_itr_next->delay_until : sch_itr->delay_until; @@ -1778,10 +1805,13 @@ bool producer_plugin_impl::process_scheduled_and_incoming_trxs( const fc::time_p auto e = _pending_incoming_transactions.pop_front(); --pending_incoming_process_limit; incoming_trx_weight -= 1.0; - process_incoming_transaction_async(std::get<0>(e), std::get<1>(e), std::get<2>(e)); + if( !process_incoming_transaction_async(std::get<0>(e), std::get<1>(e), std::get<2>(e)) ) { + exhausted = true; + break; + } } - if (deadline <= fc::time_point::now()) { + if (exhausted || deadline <= fc::time_point::now()) { exhausted = true; break; } @@ -1794,15 +1824,16 @@ bool producer_plugin_impl::process_scheduled_and_incoming_trxs( const fc::time_p trx_deadline = deadline; } - auto trace = chain.push_scheduled_transaction(trx_id, trx_deadline); + auto trace = chain.push_scheduled_transaction(trx_id, trx_deadline, 0, false); if (trace->except) { - if (failure_is_subjective(*trace->except, deadline_is_subjective)) { - exhausted = true; - break; + if (exception_is_exhausted(*trace->except, deadline_is_subjective)) { + if( block_is_exhausted() ) { + exhausted = true; + break; + } } else { - auto expiration = fc::time_point::now() + fc::seconds(chain.get_global_properties().configuration.deferred_trx_expiration_window); // this failed our configured maximum transaction time, we don't want to replay it add it to a blacklist - _blacklisted_transactions.insert(transaction_id_with_expiry{trx_id, expiration}); + _blacklisted_transactions.insert(transaction_id_with_expiry{trx_id, sch_expiration}); num_failed++; } } else { @@ -1822,8 +1853,6 @@ bool producer_plugin_impl::process_scheduled_and_incoming_trxs( const fc::time_p "Processed ${m} of ${n} scheduled transactions, Applied ${applied}, Failed/Dropped ${failed}", ( "m", num_processed )( "n", scheduled_trxs_size )( "applied", num_applied )( "failed", num_failed ) ); } - - return !exhausted; } bool producer_plugin_impl::process_incoming_trxs( const fc::time_point& deadline, size_t& pending_incoming_process_limit ) @@ -1839,14 +1868,28 @@ bool producer_plugin_impl::process_incoming_trxs( const fc::time_point& deadline } auto e = _pending_incoming_transactions.pop_front(); --pending_incoming_process_limit; - process_incoming_transaction_async(std::get<0>(e), std::get<1>(e), std::get<2>(e)); ++processed; + if( !process_incoming_transaction_async(std::get<0>(e), std::get<1>(e), std::get<2>(e)) ) { + exhausted = true; + break; + } } fc_dlog(_log, "Processed ${n} pending transactions, ${p} left", ("n", processed)("p", _pending_incoming_transactions.size())); } return !exhausted; } +bool producer_plugin_impl::block_is_exhausted() const { + const chain::controller& chain = chain_plug->chain(); + const auto& rl = chain.get_resource_limits_manager(); + + const uint64_t cpu_limit = rl.get_block_cpu_limit(); + if( cpu_limit < _max_block_cpu_usage_threshold_us ) return true; + const uint64_t net_limit = rl.get_block_net_limit(); + if( net_limit < _max_block_net_usage_threshold_bytes ) return true; + return false; +} + // Example: // --> Start block A (block time x.500) at time x.000 // -> start_block() @@ -1854,9 +1897,7 @@ bool producer_plugin_impl::process_incoming_trxs( const fc::time_point& deadline // -> Idle // --> Start block B (block time y.000) at time x.500 void producer_plugin_impl::schedule_production_loop() { - chain::controller& chain = chain_plug->chain(); _timer.cancel(); - std::weak_ptr weak_this = shared_from_this(); auto result = start_block(); @@ -1866,7 +1907,7 @@ void producer_plugin_impl::schedule_production_loop() { // we failed to start a block, so try again later? _timer.async_wait( app().get_priority_queue().wrap( priority::high, - [weak_this, cid = ++_timer_corelation_id]( const boost::system::error_code& ec ) { + [weak_this = weak_from_this(), cid = ++_timer_corelation_id]( const boost::system::error_code& ec ) { auto self = weak_this.lock(); if( self && ec != boost::asio::error::operation_aborted && cid == self->_timer_corelation_id ) { self->schedule_production_loop(); @@ -1875,7 +1916,7 @@ void producer_plugin_impl::schedule_production_loop() { } else if (result == start_block_result::waiting_for_block){ if (!_producers.empty() && !production_disabled_by_policy()) { fc_dlog(_log, "Waiting till another block is received and scheduling Speculative/Production Change"); - schedule_delayed_production_loop(weak_this, calculate_producer_wake_up_time(calculate_pending_block_time())); + schedule_delayed_production_loop(weak_from_this(), calculate_producer_wake_up_time(calculate_pending_block_time())); } else { fc_dlog(_log, "Waiting till another block is received"); // nothing to do until more blocks arrive @@ -1885,52 +1926,52 @@ void producer_plugin_impl::schedule_production_loop() { // scheduled in start_block() } else if (_pending_block_mode == pending_block_mode::producing) { + schedule_maybe_produce_block( result == start_block_result::exhausted ); - // we succeeded but block may be exhausted - static const boost::posix_time::ptime epoch(boost::gregorian::date(1970, 1, 1)); - auto deadline = calculate_block_deadline(chain.pending_block_time()); - - if (deadline > fc::time_point::now()) { - // ship this block off no later than its deadline - EOS_ASSERT( chain.is_building_block(), missing_pending_block_state, "producing without pending_block_state, start_block succeeded" ); - _timer.expires_at( epoch + boost::posix_time::microseconds( deadline.time_since_epoch().count() )); - fc_dlog(_log, "Scheduling Block Production on Normal Block #${num} for ${time}", - ("num", chain.head_block_num()+1)("time",deadline)); - } else { - EOS_ASSERT( chain.is_building_block(), missing_pending_block_state, "producing without pending_block_state" ); - auto expect_time = chain.pending_block_time() - fc::microseconds(config::block_interval_us); - // ship this block off up to 1 block time earlier or immediately - if (fc::time_point::now() >= expect_time) { - _timer.expires_from_now( boost::posix_time::microseconds( 0 )); - fc_dlog(_log, "Scheduling Block Production on Exhausted Block #${num} immediately", - ("num", chain.head_block_num()+1)); - } else { - _timer.expires_at(epoch + boost::posix_time::microseconds(expect_time.time_since_epoch().count())); - fc_dlog(_log, "Scheduling Block Production on Exhausted Block #${num} at ${time}", - ("num", chain.head_block_num()+1)("time",expect_time)); - } - } - - _timer.async_wait( app().get_priority_queue().wrap( priority::high, - [&chain,weak_this,cid=++_timer_corelation_id](const boost::system::error_code& ec) { - auto self = weak_this.lock(); - if( self && ec != boost::asio::error::operation_aborted && cid == self->_timer_corelation_id ) { - // pending_block_state expected, but can't assert inside async_wait - auto block_num = chain.is_building_block() ? chain.head_block_num() + 1 : 0; - fc_dlog( _log, "Produce block timer for ${num} running at ${time}", ("num", block_num)("time", fc::time_point::now()) ); - auto res = self->maybe_produce_block(); - fc_dlog( _log, "Producing Block #${num} returned: ${res}", ("num", block_num)( "res", res ) ); - } - } ) ); } else if (_pending_block_mode == pending_block_mode::speculating && !_producers.empty() && !production_disabled_by_policy()){ + chain::controller& chain = chain_plug->chain(); fc_dlog(_log, "Speculative Block Created; Scheduling Speculative/Production Change"); EOS_ASSERT( chain.is_building_block(), missing_pending_block_state, "speculating without pending_block_state" ); - schedule_delayed_production_loop(weak_this, calculate_producer_wake_up_time(chain.pending_block_time())); + schedule_delayed_production_loop(weak_from_this(), calculate_producer_wake_up_time(chain.pending_block_time())); } else { fc_dlog(_log, "Speculative Block Created"); } } +void producer_plugin_impl::schedule_maybe_produce_block( bool exhausted ) { + chain::controller& chain = chain_plug->chain(); + + // we succeeded but block may be exhausted + static const boost::posix_time::ptime epoch( boost::gregorian::date( 1970, 1, 1 ) ); + auto deadline = calculate_block_deadline( chain.pending_block_time() ); + + if( !exhausted && deadline > fc::time_point::now() ) { + // ship this block off no later than its deadline + EOS_ASSERT( chain.is_building_block(), missing_pending_block_state, + "producing without pending_block_state, start_block succeeded" ); + _timer.expires_at( epoch + boost::posix_time::microseconds( deadline.time_since_epoch().count() ) ); + fc_dlog( _log, "Scheduling Block Production on Normal Block #${num} for ${time}", + ("num", chain.head_block_num() + 1)( "time", deadline ) ); + } else { + EOS_ASSERT( chain.is_building_block(), missing_pending_block_state, "producing without pending_block_state" ); + _timer.expires_from_now( boost::posix_time::microseconds( 0 ) ); + fc_dlog( _log, "Scheduling Block Production on ${desc} Block #${num} immediately", + ("num", chain.head_block_num() + 1)("desc", block_is_exhausted() ? "Exhausted" : "Deadline exceeded") ); + } + + _timer.async_wait( app().get_priority_queue().wrap( priority::high, + [&chain, weak_this = weak_from_this(), cid=++_timer_corelation_id](const boost::system::error_code& ec) { + auto self = weak_this.lock(); + if( self && ec != boost::asio::error::operation_aborted && cid == self->_timer_corelation_id ) { + // pending_block_state expected, but can't assert inside async_wait + auto block_num = chain.is_building_block() ? chain.head_block_num() + 1 : 0; + fc_dlog( _log, "Produce block timer for ${num} running at ${time}", ("num", block_num)("time", fc::time_point::now()) ); + auto res = self->maybe_produce_block(); + fc_dlog( _log, "Producing Block #${num} returned: ${res}", ("num", block_num)( "res", res ) ); + } + } ) ); +} + optional producer_plugin_impl::calculate_producer_wake_up_time( const block_timestamp_type& ref_block_time ) const { // if we have any producers then we should at least set a timer for our next available slot optional wake_up_time; diff --git a/plugins/test_control_api_plugin/test_control.swagger.yaml b/plugins/test_control_api_plugin/test_control.swagger.yaml new file mode 100644 index 00000000000..906fe76ea8c --- /dev/null +++ b/plugins/test_control_api_plugin/test_control.swagger.yaml @@ -0,0 +1,58 @@ +openapi: 3.0.0 +info: + title: Test Control API + version: 1.0.0 + license: + name: MIT + url: https://opensource.org/licenses/MIT + contact: + url: https://eos.io +tags: + - name: eosio +servers: + - url: '{protocol}://{host}:{port}/v1/' + variables: + protocol: + enum: + - http + - https + default: http + host: + default: localhost + port: + default: "8080" +components: + schemas: {} +paths: + /test_control/kill_node_or_producer: + post: + tags: + - TestControl + summary: kill_node_or_producer + description: Kills node or producer + operationId: kill_node_or_producer + parameters: [] + requestBody: + content: + application/json: + schema: + type: object + required: + - params + properties: + params: + type: object + properties: + producer: + $ref: 'https://eosio.github.io/schemata/v2.0/oas/Name.yaml' + where_in_sequence: + type: integer + based_on_lib: + type: integer + responses: + '200': + description: OK + content: + application/json: + schema: + description: Returns Nothing diff --git a/plugins/trace_api_plugin/.clang-format b/plugins/trace_api_plugin/.clang-format new file mode 100644 index 00000000000..42dd5b7832c --- /dev/null +++ b/plugins/trace_api_plugin/.clang-format @@ -0,0 +1,8 @@ +BasedOnStyle: LLVM +IndentWidth: 3 +ColumnLimit: 120 +PointerAlignment: Left +AlwaysBreakTemplateDeclarations: true +AlignConsecutiveAssignments: true +AlignConsecutiveDeclarations: true +BreakConstructorInitializers: BeforeComma diff --git a/plugins/trace_api_plugin/CMakeLists.txt b/plugins/trace_api_plugin/CMakeLists.txt new file mode 100644 index 00000000000..c07c7fe7924 --- /dev/null +++ b/plugins/trace_api_plugin/CMakeLists.txt @@ -0,0 +1,12 @@ +file(GLOB HEADERS "include/eosio/trace_api_plugin/*.hpp") +add_library( trace_api_plugin + request_handler.cpp + store_provider.cpp + abi_data_handler.cpp + trace_api_plugin.cpp + ${HEADERS} ) + +target_link_libraries( trace_api_plugin chain_plugin http_plugin eosio_chain appbase ) +target_include_directories( trace_api_plugin PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" ) + +add_subdirectory( test ) diff --git a/plugins/trace_api_plugin/abi_data_handler.cpp b/plugins/trace_api_plugin/abi_data_handler.cpp new file mode 100644 index 00000000000..cb8113658b3 --- /dev/null +++ b/plugins/trace_api_plugin/abi_data_handler.cpp @@ -0,0 +1,26 @@ +#include +#include + +namespace eosio::trace_api { + + void abi_data_handler::add_abi( const chain::name& name, const chain::abi_def& abi ) { + abi_serializer_by_account.emplace(name, std::make_shared(abi, fc::microseconds::maximum())); + } + + fc::variant abi_data_handler::process_data(const action_trace_v0& action, const yield_function& yield ) { + if (abi_serializer_by_account.count(action.account) > 0) { + const auto& serializer_p = abi_serializer_by_account.at(action.account); + auto type_name = serializer_p->get_action_type(action.action); + + if (!type_name.empty()) { + try { + return serializer_p->binary_to_variant(type_name, action.data, fc::microseconds::maximum()); + } catch (...) { + except_handler(MAKE_EXCEPTION_WITH_CONTEXT(std::current_exception())); + } + } + } + + return {}; + } +} \ No newline at end of file diff --git a/plugins/trace_api_plugin/examples/abis/eosio.abi b/plugins/trace_api_plugin/examples/abis/eosio.abi new file mode 100644 index 00000000000..a2881a67dca --- /dev/null +++ b/plugins/trace_api_plugin/examples/abis/eosio.abi @@ -0,0 +1,2089 @@ +{ + "version": "eosio::abi/1.1", + "types": [], + "structs": [ + { + "name": "abi_hash", + "base": "", + "fields": [ + { + "name": "owner", + "type": "name" + }, + { + "name": "hash", + "type": "checksum256" + } + ] + }, + { + "name": "activate", + "base": "", + "fields": [ + { + "name": "feature_digest", + "type": "checksum256" + } + ] + }, + { + "name": "authority", + "base": "", + "fields": [ + { + "name": "threshold", + "type": "uint32" + }, + { + "name": "keys", + "type": "key_weight[]" + }, + { + "name": "accounts", + "type": "permission_level_weight[]" + }, + { + "name": "waits", + "type": "wait_weight[]" + } + ] + }, + { + "name": "bid_refund", + "base": "", + "fields": [ + { + "name": "bidder", + "type": "name" + }, + { + "name": "amount", + "type": "asset" + } + ] + }, + { + "name": "bidname", + "base": "", + "fields": [ + { + "name": "bidder", + "type": "name" + }, + { + "name": "newname", + "type": "name" + }, + { + "name": "bid", + "type": "asset" + } + ] + }, + { + "name": "bidrefund", + "base": "", + "fields": [ + { + "name": "bidder", + "type": "name" + }, + { + "name": "newname", + "type": "name" + } + ] + }, + { + "name": "block_header", + "base": "", + "fields": [ + { + "name": "timestamp", + "type": "uint32" + }, + { + "name": "producer", + "type": "name" + }, + { + "name": "confirmed", + "type": "uint16" + }, + { + "name": "previous", + "type": "checksum256" + }, + { + "name": "transaction_mroot", + "type": "checksum256" + }, + { + "name": "action_mroot", + "type": "checksum256" + }, + { + "name": "schedule_version", + "type": "uint32" + }, + { + "name": "new_producers", + "type": "producer_schedule?" + } + ] + }, + { + "name": "blockchain_parameters", + "base": "", + "fields": [ + { + "name": "max_block_net_usage", + "type": "uint64" + }, + { + "name": "target_block_net_usage_pct", + "type": "uint32" + }, + { + "name": "max_transaction_net_usage", + "type": "uint32" + }, + { + "name": "base_per_transaction_net_usage", + "type": "uint32" + }, + { + "name": "net_usage_leeway", + "type": "uint32" + }, + { + "name": "context_free_discount_net_usage_num", + "type": "uint32" + }, + { + "name": "context_free_discount_net_usage_den", + "type": "uint32" + }, + { + "name": "max_block_cpu_usage", + "type": "uint32" + }, + { + "name": "target_block_cpu_usage_pct", + "type": "uint32" + }, + { + "name": "max_transaction_cpu_usage", + "type": "uint32" + }, + { + "name": "min_transaction_cpu_usage", + "type": "uint32" + }, + { + "name": "max_transaction_lifetime", + "type": "uint32" + }, + { + "name": "deferred_trx_expiration_window", + "type": "uint32" + }, + { + "name": "max_transaction_delay", + "type": "uint32" + }, + { + "name": "max_inline_action_size", + "type": "uint32" + }, + { + "name": "max_inline_action_depth", + "type": "uint16" + }, + { + "name": "max_authority_depth", + "type": "uint16" + } + ] + }, + { + "name": "buyram", + "base": "", + "fields": [ + { + "name": "payer", + "type": "name" + }, + { + "name": "receiver", + "type": "name" + }, + { + "name": "quant", + "type": "asset" + } + ] + }, + { + "name": "buyrambytes", + "base": "", + "fields": [ + { + "name": "payer", + "type": "name" + }, + { + "name": "receiver", + "type": "name" + }, + { + "name": "bytes", + "type": "uint32" + } + ] + }, + { + "name": "buyrex", + "base": "", + "fields": [ + { + "name": "from", + "type": "name" + }, + { + "name": "amount", + "type": "asset" + } + ] + }, + { + "name": "canceldelay", + "base": "", + "fields": [ + { + "name": "canceling_auth", + "type": "permission_level" + }, + { + "name": "trx_id", + "type": "checksum256" + } + ] + }, + { + "name": "claimrewards", + "base": "", + "fields": [ + { + "name": "owner", + "type": "name" + } + ] + }, + { + "name": "closerex", + "base": "", + "fields": [ + { + "name": "owner", + "type": "name" + } + ] + }, + { + "name": "cnclrexorder", + "base": "", + "fields": [ + { + "name": "owner", + "type": "name" + } + ] + }, + { + "name": "connector", + "base": "", + "fields": [ + { + "name": "balance", + "type": "asset" + }, + { + "name": "weight", + "type": "float64" + } + ] + }, + { + "name": "consolidate", + "base": "", + "fields": [ + { + "name": "owner", + "type": "name" + } + ] + }, + { + "name": "defcpuloan", + "base": "", + "fields": [ + { + "name": "from", + "type": "name" + }, + { + "name": "loan_num", + "type": "uint64" + }, + { + "name": "amount", + "type": "asset" + } + ] + }, + { + "name": "defnetloan", + "base": "", + "fields": [ + { + "name": "from", + "type": "name" + }, + { + "name": "loan_num", + "type": "uint64" + }, + { + "name": "amount", + "type": "asset" + } + ] + }, + { + "name": "delegatebw", + "base": "", + "fields": [ + { + "name": "from", + "type": "name" + }, + { + "name": "receiver", + "type": "name" + }, + { + "name": "stake_net_quantity", + "type": "asset" + }, + { + "name": "stake_cpu_quantity", + "type": "asset" + }, + { + "name": "transfer", + "type": "bool" + } + ] + }, + { + "name": "delegated_bandwidth", + "base": "", + "fields": [ + { + "name": "from", + "type": "name" + }, + { + "name": "to", + "type": "name" + }, + { + "name": "net_weight", + "type": "asset" + }, + { + "name": "cpu_weight", + "type": "asset" + } + ] + }, + { + "name": "deleteauth", + "base": "", + "fields": [ + { + "name": "account", + "type": "name" + }, + { + "name": "permission", + "type": "name" + } + ] + }, + { + "name": "deposit", + "base": "", + "fields": [ + { + "name": "owner", + "type": "name" + }, + { + "name": "amount", + "type": "asset" + } + ] + }, + { + "name": "eosio_global_state", + "base": "blockchain_parameters", + "fields": [ + { + "name": "max_ram_size", + "type": "uint64" + }, + { + "name": "total_ram_bytes_reserved", + "type": "uint64" + }, + { + "name": "total_ram_stake", + "type": "int64" + }, + { + "name": "last_producer_schedule_update", + "type": "block_timestamp_type" + }, + { + "name": "last_pervote_bucket_fill", + "type": "time_point" + }, + { + "name": "pervote_bucket", + "type": "int64" + }, + { + "name": "perblock_bucket", + "type": "int64" + }, + { + "name": "total_unpaid_blocks", + "type": "uint32" + }, + { + "name": "total_activated_stake", + "type": "int64" + }, + { + "name": "thresh_activated_stake_time", + "type": "time_point" + }, + { + "name": "last_producer_schedule_size", + "type": "uint16" + }, + { + "name": "total_producer_vote_weight", + "type": "float64" + }, + { + "name": "last_name_close", + "type": "block_timestamp_type" + } + ] + }, + { + "name": "eosio_global_state2", + "base": "", + "fields": [ + { + "name": "new_ram_per_block", + "type": "uint16" + }, + { + "name": "last_ram_increase", + "type": "block_timestamp_type" + }, + { + "name": "last_block_num", + "type": "block_timestamp_type" + }, + { + "name": "total_producer_votepay_share", + "type": "float64" + }, + { + "name": "revision", + "type": "uint8" + } + ] + }, + { + "name": "eosio_global_state3", + "base": "", + "fields": [ + { + "name": "last_vpay_state_update", + "type": "time_point" + }, + { + "name": "total_vpay_share_change_rate", + "type": "float64" + } + ] + }, + { + "name": "eosio_global_state4", + "base": "", + "fields": [ + { + "name": "continuous_rate", + "type": "float64" + }, + { + "name": "inflation_pay_factor", + "type": "int64" + }, + { + "name": "votepay_factor", + "type": "int64" + } + ] + }, + { + "name": "exchange_state", + "base": "", + "fields": [ + { + "name": "supply", + "type": "asset" + }, + { + "name": "base", + "type": "connector" + }, + { + "name": "quote", + "type": "connector" + } + ] + }, + { + "name": "fundcpuloan", + "base": "", + "fields": [ + { + "name": "from", + "type": "name" + }, + { + "name": "loan_num", + "type": "uint64" + }, + { + "name": "payment", + "type": "asset" + } + ] + }, + { + "name": "fundnetloan", + "base": "", + "fields": [ + { + "name": "from", + "type": "name" + }, + { + "name": "loan_num", + "type": "uint64" + }, + { + "name": "payment", + "type": "asset" + } + ] + }, + { + "name": "init", + "base": "", + "fields": [ + { + "name": "version", + "type": "varuint32" + }, + { + "name": "core", + "type": "symbol" + } + ] + }, + { + "name": "key_weight", + "base": "", + "fields": [ + { + "name": "key", + "type": "public_key" + }, + { + "name": "weight", + "type": "uint16" + } + ] + }, + { + "name": "linkauth", + "base": "", + "fields": [ + { + "name": "account", + "type": "name" + }, + { + "name": "code", + "type": "name" + }, + { + "name": "type", + "type": "name" + }, + { + "name": "requirement", + "type": "name" + } + ] + }, + { + "name": "mvfrsavings", + "base": "", + "fields": [ + { + "name": "owner", + "type": "name" + }, + { + "name": "rex", + "type": "asset" + } + ] + }, + { + "name": "mvtosavings", + "base": "", + "fields": [ + { + "name": "owner", + "type": "name" + }, + { + "name": "rex", + "type": "asset" + } + ] + }, + { + "name": "name_bid", + "base": "", + "fields": [ + { + "name": "newname", + "type": "name" + }, + { + "name": "high_bidder", + "type": "name" + }, + { + "name": "high_bid", + "type": "int64" + }, + { + "name": "last_bid_time", + "type": "time_point" + } + ] + }, + { + "name": "newaccount", + "base": "", + "fields": [ + { + "name": "creator", + "type": "name" + }, + { + "name": "name", + "type": "name" + }, + { + "name": "owner", + "type": "authority" + }, + { + "name": "active", + "type": "authority" + } + ] + }, + { + "name": "onblock", + "base": "", + "fields": [ + { + "name": "header", + "type": "block_header" + } + ] + }, + { + "name": "onerror", + "base": "", + "fields": [ + { + "name": "sender_id", + "type": "uint128" + }, + { + "name": "sent_trx", + "type": "bytes" + } + ] + }, + { + "name": "pair_time_point_sec_int64", + "base": "", + "fields": [ + { + "name": "key", + "type": "time_point_sec" + }, + { + "name": "value", + "type": "int64" + } + ] + }, + { + "name": "permission_level", + "base": "", + "fields": [ + { + "name": "actor", + "type": "name" + }, + { + "name": "permission", + "type": "name" + } + ] + }, + { + "name": "permission_level_weight", + "base": "", + "fields": [ + { + "name": "permission", + "type": "permission_level" + }, + { + "name": "weight", + "type": "uint16" + } + ] + }, + { + "name": "producer_info", + "base": "", + "fields": [ + { + "name": "owner", + "type": "name" + }, + { + "name": "total_votes", + "type": "float64" + }, + { + "name": "producer_key", + "type": "public_key" + }, + { + "name": "is_active", + "type": "bool" + }, + { + "name": "url", + "type": "string" + }, + { + "name": "unpaid_blocks", + "type": "uint32" + }, + { + "name": "last_claim_time", + "type": "time_point" + }, + { + "name": "location", + "type": "uint16" + } + ] + }, + { + "name": "producer_info2", + "base": "", + "fields": [ + { + "name": "owner", + "type": "name" + }, + { + "name": "votepay_share", + "type": "float64" + }, + { + "name": "last_votepay_share_update", + "type": "time_point" + } + ] + }, + { + "name": "producer_key", + "base": "", + "fields": [ + { + "name": "producer_name", + "type": "name" + }, + { + "name": "block_signing_key", + "type": "public_key" + } + ] + }, + { + "name": "producer_schedule", + "base": "", + "fields": [ + { + "name": "version", + "type": "uint32" + }, + { + "name": "producers", + "type": "producer_key[]" + } + ] + }, + { + "name": "refund", + "base": "", + "fields": [ + { + "name": "owner", + "type": "name" + } + ] + }, + { + "name": "refund_request", + "base": "", + "fields": [ + { + "name": "owner", + "type": "name" + }, + { + "name": "request_time", + "type": "time_point_sec" + }, + { + "name": "net_amount", + "type": "asset" + }, + { + "name": "cpu_amount", + "type": "asset" + } + ] + }, + { + "name": "regproducer", + "base": "", + "fields": [ + { + "name": "producer", + "type": "name" + }, + { + "name": "producer_key", + "type": "public_key" + }, + { + "name": "url", + "type": "string" + }, + { + "name": "location", + "type": "uint16" + } + ] + }, + { + "name": "regproxy", + "base": "", + "fields": [ + { + "name": "proxy", + "type": "name" + }, + { + "name": "isproxy", + "type": "bool" + } + ] + }, + { + "name": "rentcpu", + "base": "", + "fields": [ + { + "name": "from", + "type": "name" + }, + { + "name": "receiver", + "type": "name" + }, + { + "name": "loan_payment", + "type": "asset" + }, + { + "name": "loan_fund", + "type": "asset" + } + ] + }, + { + "name": "rentnet", + "base": "", + "fields": [ + { + "name": "from", + "type": "name" + }, + { + "name": "receiver", + "type": "name" + }, + { + "name": "loan_payment", + "type": "asset" + }, + { + "name": "loan_fund", + "type": "asset" + } + ] + }, + { + "name": "rex_balance", + "base": "", + "fields": [ + { + "name": "version", + "type": "uint8" + }, + { + "name": "owner", + "type": "name" + }, + { + "name": "vote_stake", + "type": "asset" + }, + { + "name": "rex_balance", + "type": "asset" + }, + { + "name": "matured_rex", + "type": "int64" + }, + { + "name": "rex_maturities", + "type": "pair_time_point_sec_int64[]" + } + ] + }, + { + "name": "rex_fund", + "base": "", + "fields": [ + { + "name": "version", + "type": "uint8" + }, + { + "name": "owner", + "type": "name" + }, + { + "name": "balance", + "type": "asset" + } + ] + }, + { + "name": "rex_loan", + "base": "", + "fields": [ + { + "name": "version", + "type": "uint8" + }, + { + "name": "from", + "type": "name" + }, + { + "name": "receiver", + "type": "name" + }, + { + "name": "payment", + "type": "asset" + }, + { + "name": "balance", + "type": "asset" + }, + { + "name": "total_staked", + "type": "asset" + }, + { + "name": "loan_num", + "type": "uint64" + }, + { + "name": "expiration", + "type": "time_point" + } + ] + }, + { + "name": "rex_order", + "base": "", + "fields": [ + { + "name": "version", + "type": "uint8" + }, + { + "name": "owner", + "type": "name" + }, + { + "name": "rex_requested", + "type": "asset" + }, + { + "name": "proceeds", + "type": "asset" + }, + { + "name": "stake_change", + "type": "asset" + }, + { + "name": "order_time", + "type": "time_point" + }, + { + "name": "is_open", + "type": "bool" + } + ] + }, + { + "name": "rex_pool", + "base": "", + "fields": [ + { + "name": "version", + "type": "uint8" + }, + { + "name": "total_lent", + "type": "asset" + }, + { + "name": "total_unlent", + "type": "asset" + }, + { + "name": "total_rent", + "type": "asset" + }, + { + "name": "total_lendable", + "type": "asset" + }, + { + "name": "total_rex", + "type": "asset" + }, + { + "name": "namebid_proceeds", + "type": "asset" + }, + { + "name": "loan_num", + "type": "uint64" + } + ] + }, + { + "name": "rex_return_buckets", + "base": "", + "fields": [ + { + "name": "version", + "type": "uint8" + }, + { + "name": "return_buckets", + "type": "pair_time_point_sec_int64[]" + } + ] + }, + { + "name": "rex_return_pool", + "base": "", + "fields": [ + { + "name": "version", + "type": "uint8" + }, + { + "name": "last_dist_time", + "type": "time_point_sec" + }, + { + "name": "pending_bucket_time", + "type": "time_point_sec" + }, + { + "name": "oldest_bucket_time", + "type": "time_point_sec" + }, + { + "name": "pending_bucket_proceeds", + "type": "int64" + }, + { + "name": "current_rate_of_increase", + "type": "int64" + }, + { + "name": "proceeds", + "type": "int64" + } + ] + }, + { + "name": "rexexec", + "base": "", + "fields": [ + { + "name": "user", + "type": "name" + }, + { + "name": "max", + "type": "uint16" + } + ] + }, + { + "name": "rmvproducer", + "base": "", + "fields": [ + { + "name": "producer", + "type": "name" + } + ] + }, + { + "name": "sellram", + "base": "", + "fields": [ + { + "name": "account", + "type": "name" + }, + { + "name": "bytes", + "type": "int64" + } + ] + }, + { + "name": "sellrex", + "base": "", + "fields": [ + { + "name": "from", + "type": "name" + }, + { + "name": "rex", + "type": "asset" + } + ] + }, + { + "name": "setabi", + "base": "", + "fields": [ + { + "name": "account", + "type": "name" + }, + { + "name": "abi", + "type": "bytes" + } + ] + }, + { + "name": "setacctcpu", + "base": "", + "fields": [ + { + "name": "account", + "type": "name" + }, + { + "name": "cpu_weight", + "type": "int64?" + } + ] + }, + { + "name": "setacctnet", + "base": "", + "fields": [ + { + "name": "account", + "type": "name" + }, + { + "name": "net_weight", + "type": "int64?" + } + ] + }, + { + "name": "setacctram", + "base": "", + "fields": [ + { + "name": "account", + "type": "name" + }, + { + "name": "ram_bytes", + "type": "int64?" + } + ] + }, + { + "name": "setalimits", + "base": "", + "fields": [ + { + "name": "account", + "type": "name" + }, + { + "name": "ram_bytes", + "type": "int64" + }, + { + "name": "net_weight", + "type": "int64" + }, + { + "name": "cpu_weight", + "type": "int64" + } + ] + }, + { + "name": "setcode", + "base": "", + "fields": [ + { + "name": "account", + "type": "name" + }, + { + "name": "vmtype", + "type": "uint8" + }, + { + "name": "vmversion", + "type": "uint8" + }, + { + "name": "code", + "type": "bytes" + } + ] + }, + { + "name": "setinflation", + "base": "", + "fields": [ + { + "name": "annual_rate", + "type": "int64" + }, + { + "name": "inflation_pay_factor", + "type": "int64" + }, + { + "name": "votepay_factor", + "type": "int64" + } + ] + }, + { + "name": "setparams", + "base": "", + "fields": [ + { + "name": "params", + "type": "blockchain_parameters" + } + ] + }, + { + "name": "setpriv", + "base": "", + "fields": [ + { + "name": "account", + "type": "name" + }, + { + "name": "is_priv", + "type": "uint8" + } + ] + }, + { + "name": "setram", + "base": "", + "fields": [ + { + "name": "max_ram_size", + "type": "uint64" + } + ] + }, + { + "name": "setramrate", + "base": "", + "fields": [ + { + "name": "bytes_per_block", + "type": "uint16" + } + ] + }, + { + "name": "setrex", + "base": "", + "fields": [ + { + "name": "balance", + "type": "asset" + } + ] + }, + { + "name": "undelegatebw", + "base": "", + "fields": [ + { + "name": "from", + "type": "name" + }, + { + "name": "receiver", + "type": "name" + }, + { + "name": "unstake_net_quantity", + "type": "asset" + }, + { + "name": "unstake_cpu_quantity", + "type": "asset" + } + ] + }, + { + "name": "unlinkauth", + "base": "", + "fields": [ + { + "name": "account", + "type": "name" + }, + { + "name": "code", + "type": "name" + }, + { + "name": "type", + "type": "name" + } + ] + }, + { + "name": "unregprod", + "base": "", + "fields": [ + { + "name": "producer", + "type": "name" + } + ] + }, + { + "name": "unstaketorex", + "base": "", + "fields": [ + { + "name": "owner", + "type": "name" + }, + { + "name": "receiver", + "type": "name" + }, + { + "name": "from_net", + "type": "asset" + }, + { + "name": "from_cpu", + "type": "asset" + } + ] + }, + { + "name": "updateauth", + "base": "", + "fields": [ + { + "name": "account", + "type": "name" + }, + { + "name": "permission", + "type": "name" + }, + { + "name": "parent", + "type": "name" + }, + { + "name": "auth", + "type": "authority" + } + ] + }, + { + "name": "updaterex", + "base": "", + "fields": [ + { + "name": "owner", + "type": "name" + } + ] + }, + { + "name": "updtrevision", + "base": "", + "fields": [ + { + "name": "revision", + "type": "uint8" + } + ] + }, + { + "name": "user_resources", + "base": "", + "fields": [ + { + "name": "owner", + "type": "name" + }, + { + "name": "net_weight", + "type": "asset" + }, + { + "name": "cpu_weight", + "type": "asset" + }, + { + "name": "ram_bytes", + "type": "int64" + } + ] + }, + { + "name": "voteproducer", + "base": "", + "fields": [ + { + "name": "voter", + "type": "name" + }, + { + "name": "proxy", + "type": "name" + }, + { + "name": "producers", + "type": "name[]" + } + ] + }, + { + "name": "voter_info", + "base": "", + "fields": [ + { + "name": "owner", + "type": "name" + }, + { + "name": "proxy", + "type": "name" + }, + { + "name": "producers", + "type": "name[]" + }, + { + "name": "staked", + "type": "int64" + }, + { + "name": "last_vote_weight", + "type": "float64" + }, + { + "name": "proxied_vote_weight", + "type": "float64" + }, + { + "name": "is_proxy", + "type": "bool" + }, + { + "name": "flags1", + "type": "uint32" + }, + { + "name": "reserved2", + "type": "uint32" + }, + { + "name": "reserved3", + "type": "asset" + } + ] + }, + { + "name": "wait_weight", + "base": "", + "fields": [ + { + "name": "wait_sec", + "type": "uint32" + }, + { + "name": "weight", + "type": "uint16" + } + ] + }, + { + "name": "withdraw", + "base": "", + "fields": [ + { + "name": "owner", + "type": "name" + }, + { + "name": "amount", + "type": "asset" + } + ] + } + ], + "actions": [ + { + "name": "activate", + "type": "activate", + "ricardian_contract": "" + }, + { + "name": "bidname", + "type": "bidname", + "ricardian_contract": "" + }, + { + "name": "bidrefund", + "type": "bidrefund", + "ricardian_contract": "" + }, + { + "name": "buyram", + "type": "buyram", + "ricardian_contract": "" + }, + { + "name": "buyrambytes", + "type": "buyrambytes", + "ricardian_contract": "" + }, + { + "name": "buyrex", + "type": "buyrex", + "ricardian_contract": "" + }, + { + "name": "canceldelay", + "type": "canceldelay", + "ricardian_contract": "" + }, + { + "name": "claimrewards", + "type": "claimrewards", + "ricardian_contract": "" + }, + { + "name": "closerex", + "type": "closerex", + "ricardian_contract": "" + }, + { + "name": "cnclrexorder", + "type": "cnclrexorder", + "ricardian_contract": "" + }, + { + "name": "consolidate", + "type": "consolidate", + "ricardian_contract": "" + }, + { + "name": "defcpuloan", + "type": "defcpuloan", + "ricardian_contract": "" + }, + { + "name": "defnetloan", + "type": "defnetloan", + "ricardian_contract": "" + }, + { + "name": "delegatebw", + "type": "delegatebw", + "ricardian_contract": "" + }, + { + "name": "deleteauth", + "type": "deleteauth", + "ricardian_contract": "" + }, + { + "name": "deposit", + "type": "deposit", + "ricardian_contract": "" + }, + { + "name": "fundcpuloan", + "type": "fundcpuloan", + "ricardian_contract": "" + }, + { + "name": "fundnetloan", + "type": "fundnetloan", + "ricardian_contract": "" + }, + { + "name": "init", + "type": "init", + "ricardian_contract": "" + }, + { + "name": "linkauth", + "type": "linkauth", + "ricardian_contract": "" + }, + { + "name": "mvfrsavings", + "type": "mvfrsavings", + "ricardian_contract": "" + }, + { + "name": "mvtosavings", + "type": "mvtosavings", + "ricardian_contract": "" + }, + { + "name": "newaccount", + "type": "newaccount", + "ricardian_contract": "" + }, + { + "name": "onblock", + "type": "onblock", + "ricardian_contract": "" + }, + { + "name": "onerror", + "type": "onerror", + "ricardian_contract": "" + }, + { + "name": "refund", + "type": "refund", + "ricardian_contract": "" + }, + { + "name": "regproducer", + "type": "regproducer", + "ricardian_contract": "" + }, + { + "name": "regproxy", + "type": "regproxy", + "ricardian_contract": "" + }, + { + "name": "rentcpu", + "type": "rentcpu", + "ricardian_contract": "" + }, + { + "name": "rentnet", + "type": "rentnet", + "ricardian_contract": "" + }, + { + "name": "rexexec", + "type": "rexexec", + "ricardian_contract": "" + }, + { + "name": "rmvproducer", + "type": "rmvproducer", + "ricardian_contract": "" + }, + { + "name": "sellram", + "type": "sellram", + "ricardian_contract": "" + }, + { + "name": "sellrex", + "type": "sellrex", + "ricardian_contract": "" + }, + { + "name": "setabi", + "type": "setabi", + "ricardian_contract": "" + }, + { + "name": "setacctcpu", + "type": "setacctcpu", + "ricardian_contract": "" + }, + { + "name": "setacctnet", + "type": "setacctnet", + "ricardian_contract": "" + }, + { + "name": "setacctram", + "type": "setacctram", + "ricardian_contract": "" + }, + { + "name": "setalimits", + "type": "setalimits", + "ricardian_contract": "" + }, + { + "name": "setcode", + "type": "setcode", + "ricardian_contract": "" + }, + { + "name": "setinflation", + "type": "setinflation", + "ricardian_contract": "" + }, + { + "name": "setparams", + "type": "setparams", + "ricardian_contract": "" + }, + { + "name": "setpriv", + "type": "setpriv", + "ricardian_contract": "" + }, + { + "name": "setram", + "type": "setram", + "ricardian_contract": "" + }, + { + "name": "setramrate", + "type": "setramrate", + "ricardian_contract": "" + }, + { + "name": "setrex", + "type": "setrex", + "ricardian_contract": "" + }, + { + "name": "undelegatebw", + "type": "undelegatebw", + "ricardian_contract": "" + }, + { + "name": "unlinkauth", + "type": "unlinkauth", + "ricardian_contract": "" + }, + { + "name": "unregprod", + "type": "unregprod", + "ricardian_contract": "" + }, + { + "name": "unstaketorex", + "type": "unstaketorex", + "ricardian_contract": "" + }, + { + "name": "updateauth", + "type": "updateauth", + "ricardian_contract": "" + }, + { + "name": "updaterex", + "type": "updaterex", + "ricardian_contract": "" + }, + { + "name": "updtrevision", + "type": "updtrevision", + "ricardian_contract": "" + }, + { + "name": "voteproducer", + "type": "voteproducer", + "ricardian_contract": "" + }, + { + "name": "withdraw", + "type": "withdraw", + "ricardian_contract": "" + } + ], + "tables": [ + { + "name": "abihash", + "index_type": "i64", + "key_names": [], + "key_types": [], + "type": "abi_hash" + }, + { + "name": "bidrefunds", + "index_type": "i64", + "key_names": [], + "key_types": [], + "type": "bid_refund" + }, + { + "name": "cpuloan", + "index_type": "i64", + "key_names": [], + "key_types": [], + "type": "rex_loan" + }, + { + "name": "delband", + "index_type": "i64", + "key_names": [], + "key_types": [], + "type": "delegated_bandwidth" + }, + { + "name": "global", + "index_type": "i64", + "key_names": [], + "key_types": [], + "type": "eosio_global_state" + }, + { + "name": "global2", + "index_type": "i64", + "key_names": [], + "key_types": [], + "type": "eosio_global_state2" + }, + { + "name": "global3", + "index_type": "i64", + "key_names": [], + "key_types": [], + "type": "eosio_global_state3" + }, + { + "name": "global4", + "index_type": "i64", + "key_names": [], + "key_types": [], + "type": "eosio_global_state4" + }, + { + "name": "namebids", + "index_type": "i64", + "key_names": [], + "key_types": [], + "type": "name_bid" + }, + { + "name": "netloan", + "index_type": "i64", + "key_names": [], + "key_types": [], + "type": "rex_loan" + }, + { + "name": "producers", + "index_type": "i64", + "key_names": [], + "key_types": [], + "type": "producer_info" + }, + { + "name": "producers2", + "index_type": "i64", + "key_names": [], + "key_types": [], + "type": "producer_info2" + }, + { + "name": "rammarket", + "index_type": "i64", + "key_names": [], + "key_types": [], + "type": "exchange_state" + }, + { + "name": "refunds", + "index_type": "i64", + "key_names": [], + "key_types": [], + "type": "refund_request" + }, + { + "name": "retbuckets", + "index_type": "i64", + "key_names": [], + "key_types": [], + "type": "rex_return_buckets" + }, + { + "name": "rexbal", + "index_type": "i64", + "key_names": [], + "key_types": [], + "type": "rex_balance" + }, + { + "name": "rexfund", + "index_type": "i64", + "key_names": [], + "key_types": [], + "type": "rex_fund" + }, + { + "name": "rexpool", + "index_type": "i64", + "key_names": [], + "key_types": [], + "type": "rex_pool" + }, + { + "name": "rexqueue", + "index_type": "i64", + "key_names": [], + "key_types": [], + "type": "rex_order" + }, + { + "name": "rexretpool", + "index_type": "i64", + "key_names": [], + "key_types": [], + "type": "rex_return_pool" + }, + { + "name": "userres", + "index_type": "i64", + "key_names": [], + "key_types": [], + "type": "user_resources" + }, + { + "name": "voters", + "index_type": "i64", + "key_names": [], + "key_types": [], + "type": "voter_info" + } + ], + "ricardian_clauses": [], + "error_messages": [], + "abi_extensions": [], + "variants": [] +} diff --git a/plugins/trace_api_plugin/examples/abis/eosio.msig.abi b/plugins/trace_api_plugin/examples/abis/eosio.msig.abi new file mode 100644 index 00000000000..f2c32898f1b --- /dev/null +++ b/plugins/trace_api_plugin/examples/abis/eosio.msig.abi @@ -0,0 +1,360 @@ +{ + "version": "eosio::abi/1.1", + "types": [], + "structs": [ + { + "name": "action", + "base": "", + "fields": [ + { + "name": "account", + "type": "name" + }, + { + "name": "name", + "type": "name" + }, + { + "name": "authorization", + "type": "permission_level[]" + }, + { + "name": "data", + "type": "bytes" + } + ] + }, + { + "name": "approval", + "base": "", + "fields": [ + { + "name": "level", + "type": "permission_level" + }, + { + "name": "time", + "type": "time_point" + } + ] + }, + { + "name": "approvals_info", + "base": "", + "fields": [ + { + "name": "version", + "type": "uint8" + }, + { + "name": "proposal_name", + "type": "name" + }, + { + "name": "requested_approvals", + "type": "approval[]" + }, + { + "name": "provided_approvals", + "type": "approval[]" + } + ] + }, + { + "name": "approve", + "base": "", + "fields": [ + { + "name": "proposer", + "type": "name" + }, + { + "name": "proposal_name", + "type": "name" + }, + { + "name": "level", + "type": "permission_level" + }, + { + "name": "proposal_hash", + "type": "checksum256$" + } + ] + }, + { + "name": "cancel", + "base": "", + "fields": [ + { + "name": "proposer", + "type": "name" + }, + { + "name": "proposal_name", + "type": "name" + }, + { + "name": "canceler", + "type": "name" + } + ] + }, + { + "name": "exec", + "base": "", + "fields": [ + { + "name": "proposer", + "type": "name" + }, + { + "name": "proposal_name", + "type": "name" + }, + { + "name": "executer", + "type": "name" + } + ] + }, + { + "name": "extension", + "base": "", + "fields": [ + { + "name": "type", + "type": "uint16" + }, + { + "name": "data", + "type": "bytes" + } + ] + }, + { + "name": "invalidate", + "base": "", + "fields": [ + { + "name": "account", + "type": "name" + } + ] + }, + { + "name": "invalidation", + "base": "", + "fields": [ + { + "name": "account", + "type": "name" + }, + { + "name": "last_invalidation_time", + "type": "time_point" + } + ] + }, + { + "name": "old_approvals_info", + "base": "", + "fields": [ + { + "name": "proposal_name", + "type": "name" + }, + { + "name": "requested_approvals", + "type": "permission_level[]" + }, + { + "name": "provided_approvals", + "type": "permission_level[]" + } + ] + }, + { + "name": "permission_level", + "base": "", + "fields": [ + { + "name": "actor", + "type": "name" + }, + { + "name": "permission", + "type": "name" + } + ] + }, + { + "name": "proposal", + "base": "", + "fields": [ + { + "name": "proposal_name", + "type": "name" + }, + { + "name": "packed_transaction", + "type": "bytes" + } + ] + }, + { + "name": "propose", + "base": "", + "fields": [ + { + "name": "proposer", + "type": "name" + }, + { + "name": "proposal_name", + "type": "name" + }, + { + "name": "requested", + "type": "permission_level[]" + }, + { + "name": "trx", + "type": "transaction" + } + ] + }, + { + "name": "transaction", + "base": "transaction_header", + "fields": [ + { + "name": "context_free_actions", + "type": "action[]" + }, + { + "name": "actions", + "type": "action[]" + }, + { + "name": "transaction_extensions", + "type": "extension[]" + } + ] + }, + { + "name": "transaction_header", + "base": "", + "fields": [ + { + "name": "expiration", + "type": "time_point_sec" + }, + { + "name": "ref_block_num", + "type": "uint16" + }, + { + "name": "ref_block_prefix", + "type": "uint32" + }, + { + "name": "max_net_usage_words", + "type": "varuint32" + }, + { + "name": "max_cpu_usage_ms", + "type": "uint8" + }, + { + "name": "delay_sec", + "type": "varuint32" + } + ] + }, + { + "name": "unapprove", + "base": "", + "fields": [ + { + "name": "proposer", + "type": "name" + }, + { + "name": "proposal_name", + "type": "name" + }, + { + "name": "level", + "type": "permission_level" + } + ] + } + ], + "actions": [ + { + "name": "approve", + "type": "approve", + "ricardian_contract": "" + }, + { + "name": "cancel", + "type": "cancel", + "ricardian_contract": "" + }, + { + "name": "exec", + "type": "exec", + "ricardian_contract": "" + }, + { + "name": "invalidate", + "type": "invalidate", + "ricardian_contract": "" + }, + { + "name": "propose", + "type": "propose", + "ricardian_contract": "" + }, + { + "name": "unapprove", + "type": "unapprove", + "ricardian_contract": "" + } + ], + "tables": [ + { + "name": "approvals", + "index_type": "i64", + "key_names": [], + "key_types": [], + "type": "old_approvals_info" + }, + { + "name": "approvals2", + "index_type": "i64", + "key_names": [], + "key_types": [], + "type": "approvals_info" + }, + { + "name": "invals", + "index_type": "i64", + "key_names": [], + "key_types": [], + "type": "invalidation" + }, + { + "name": "proposal", + "index_type": "i64", + "key_names": [], + "key_types": [], + "type": "proposal" + } + ], + "ricardian_clauses": [], + "error_messages": [], + "abi_extensions": [], + "variants": [] +} diff --git a/plugins/trace_api_plugin/examples/abis/eosio.token.abi b/plugins/trace_api_plugin/examples/abis/eosio.token.abi new file mode 100644 index 00000000000..6d3421c17c9 --- /dev/null +++ b/plugins/trace_api_plugin/examples/abis/eosio.token.abi @@ -0,0 +1,186 @@ +{ + "version": "eosio::abi/1.1", + "types": [], + "structs": [ + { + "name": "account", + "base": "", + "fields": [ + { + "name": "balance", + "type": "asset" + } + ] + }, + { + "name": "close", + "base": "", + "fields": [ + { + "name": "owner", + "type": "name" + }, + { + "name": "symbol", + "type": "symbol" + } + ] + }, + { + "name": "create", + "base": "", + "fields": [ + { + "name": "issuer", + "type": "name" + }, + { + "name": "maximum_supply", + "type": "asset" + } + ] + }, + { + "name": "currency_stats", + "base": "", + "fields": [ + { + "name": "supply", + "type": "asset" + }, + { + "name": "max_supply", + "type": "asset" + }, + { + "name": "issuer", + "type": "name" + } + ] + }, + { + "name": "issue", + "base": "", + "fields": [ + { + "name": "to", + "type": "name" + }, + { + "name": "quantity", + "type": "asset" + }, + { + "name": "memo", + "type": "string" + } + ] + }, + { + "name": "open", + "base": "", + "fields": [ + { + "name": "owner", + "type": "name" + }, + { + "name": "symbol", + "type": "symbol" + }, + { + "name": "ram_payer", + "type": "name" + } + ] + }, + { + "name": "retire", + "base": "", + "fields": [ + { + "name": "quantity", + "type": "asset" + }, + { + "name": "memo", + "type": "string" + } + ] + }, + { + "name": "transfer", + "base": "", + "fields": [ + { + "name": "from", + "type": "name" + }, + { + "name": "to", + "type": "name" + }, + { + "name": "quantity", + "type": "asset" + }, + { + "name": "memo", + "type": "string" + } + ] + } + ], + "actions": [ + { + "name": "close", + "type": "close", + "ricardian_contract": "" + }, + { + "name": "create", + "type": "create", + "ricardian_contract": "" + }, + { + "name": "issue", + "type": "issue", + "ricardian_contract": "" + }, + { + "name": "open", + "type": "open", + "ricardian_contract": "" + }, + { + "name": "retire", + "type": "retire", + "ricardian_contract": "" + }, + { + "name": "transfer", + "type": "transfer", + "ricardian_contract": "" + } + ], + "tables": [ + { + "name": "accounts", + "index_type": "i64", + "key_names": [], + "key_types": [], + "type": "account" + }, + { + "name": "stat", + "index_type": "i64", + "key_names": [], + "key_types": [], + "type": "currency_stats" + } + ], + "ricardian_clauses": [], + "error_messages": [], + "abi_extensions": [], + "variants": [] +} diff --git a/plugins/trace_api_plugin/examples/abis/eosio.wrap.abi b/plugins/trace_api_plugin/examples/abis/eosio.wrap.abi new file mode 100644 index 00000000000..aaa54848432 --- /dev/null +++ b/plugins/trace_api_plugin/examples/abis/eosio.wrap.abi @@ -0,0 +1,143 @@ +{ + "version": "eosio::abi/1.0", + "types": [ + { + "new_type_name": "account_name", + "type": "name" + }, + { + "new_type_name": "permission_name", + "type": "name" + }, + { + "new_type_name": "action_name", + "type": "name" + } + ], + "structs": [ + { + "name": "permission_level", + "base": "", + "fields": [ + { + "name": "actor", + "type": "account_name" + }, + { + "name": "permission", + "type": "permission_name" + } + ] + }, + { + "name": "action", + "base": "", + "fields": [ + { + "name": "account", + "type": "account_name" + }, + { + "name": "name", + "type": "action_name" + }, + { + "name": "authorization", + "type": "permission_level[]" + }, + { + "name": "data", + "type": "bytes" + } + ] + }, + { + "name": "transaction_header", + "base": "", + "fields": [ + { + "name": "expiration", + "type": "time_point_sec" + }, + { + "name": "ref_block_num", + "type": "uint16" + }, + { + "name": "ref_block_prefix", + "type": "uint32" + }, + { + "name": "max_net_usage_words", + "type": "varuint32" + }, + { + "name": "max_cpu_usage_ms", + "type": "uint8" + }, + { + "name": "delay_sec", + "type": "varuint32" + } + ] + }, + { + "name": "extension", + "base": "", + "fields": [ + { + "name": "type", + "type": "uint16" + }, + { + "name": "data", + "type": "bytes" + } + ] + }, + { + "name": "transaction", + "base": "transaction_header", + "fields": [ + { + "name": "context_free_actions", + "type": "action[]" + }, + { + "name": "actions", + "type": "action[]" + }, + { + "name": "transaction_extensions", + "type": "extension[]" + } + ] + }, + { + "name": "exec", + "base": "", + "fields": [ + { + "name": "executer", + "type": "account_name" + }, + { + "name": "trx", + "type": "transaction" + } + ] + } + ], + "actions": [ + { + "name": "exec", + "type": "exec", + "ricardian_contract": "" + } + ], + "tables": [], + "ricardian_clauses": [], + "error_messages": [], + "abi_extensions": [], + "variants": [] +} diff --git a/plugins/trace_api_plugin/include/eosio/trace_api/abi_data_handler.hpp b/plugins/trace_api_plugin/include/eosio/trace_api/abi_data_handler.hpp new file mode 100644 index 00000000000..d6c2355f59a --- /dev/null +++ b/plugins/trace_api_plugin/include/eosio/trace_api/abi_data_handler.hpp @@ -0,0 +1,62 @@ +#pragma once + +#include +#include +#include + +namespace eosio { + namespace chain { + struct abi_serializer; + } + + namespace trace_api { + + /** + * Data Handler that uses eosio::chain::abi_serializer to decode data with a known set of ABI's + * Can be used directly as a Data_handler_provider OR shared between request_handlers using the + * ::shared_provider abstraction. + */ + class abi_data_handler { + public: + explicit abi_data_handler( exception_handler except_handler = {} ) + :except_handler( std::move( except_handler ) ) + { + } + + /** + * Add an ABI definition to this data handler + * @param name - the name of the account/contract that this ABI belongs to + * @param abi - the ABI definition of that ABI + */ + void add_abi( const chain::name& name, const chain::abi_def& abi ); + + /** + * Given an action trace, produce a variant that represents the `data` field in the trace + * + * @param action - trace of the action including metadata necessary for finding the ABI + * @param yield - a yield function to allow cooperation during long running tasks + * @return variant representing the `data` field of the action interpreted by known ABIs OR an empty variant + */ + fc::variant process_data( const action_trace_v0& action, const yield_function& yield = {}); + + /** + * Utility class that allows mulitple request_handlers to share the same abi_data_handler + */ + class shared_provider { + public: + explicit shared_provider(const std::shared_ptr& handler) + :handler(handler) + {} + + fc::variant process_data( const action_trace_v0& action, const yield_function& yield = {}) { + return handler->process_data(action, yield); + } + + std::shared_ptr handler; + }; + + private: + std::map> abi_serializer_by_account; + exception_handler except_handler; + }; +} } diff --git a/plugins/trace_api_plugin/include/eosio/trace_api/chain_extraction.hpp b/plugins/trace_api_plugin/include/eosio/trace_api/chain_extraction.hpp new file mode 100644 index 00000000000..bf5e2506775 --- /dev/null +++ b/plugins/trace_api_plugin/include/eosio/trace_api/chain_extraction.hpp @@ -0,0 +1,126 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace eosio { namespace trace_api { + +using chain::transaction_id_type; +using chain::packed_transaction; + +template +class chain_extraction_impl_type { +public: + /** + * Chain Extractor for capturing transaction traces, action traces, and block info. + * @param store provider of append & append_lib + * @param except_handler called on exceptions, logging if any is left to the user + */ + chain_extraction_impl_type( StoreProvider store, exception_handler except_handler ) + : store(std::move(store)) + , except_handler(std::move(except_handler)) + {} + + /// connect to chain controller applied_transaction signal + void signal_applied_transaction( const chain::transaction_trace_ptr& trace, const chain::signed_transaction& strx ) { + on_applied_transaction( trace, strx ); + } + + /// connect to chain controller accepted_block signal + void signal_accepted_block( const chain::block_state_ptr& bsp ) { + on_accepted_block( bsp ); + } + + /// connect to chain controller irreversible_block signal + void signal_irreversible_block( const chain::block_state_ptr& bsp ) { + on_irreversible_block( bsp ); + } + +private: + static bool is_onblock(const chain::transaction_trace_ptr& p) { + if (p->action_traces.size() != 1) + return false; + const auto& act = p->action_traces[0].act; + if (act.account != eosio::chain::config::system_account_name || act.name != N(onblock) || + act.authorization.size() != 1) + return false; + const auto& auth = act.authorization[0]; + return auth.actor == eosio::chain::config::system_account_name && + auth.permission == eosio::chain::config::active_name; + } + + void on_applied_transaction(const chain::transaction_trace_ptr& trace, const chain::signed_transaction& t) { + if( !trace->receipt ) return; + // include only executed transactions; soft_fail included so that onerror (and any inlines via onerror) are included + if((trace->receipt->status != chain::transaction_receipt_header::executed && + trace->receipt->status != chain::transaction_receipt_header::soft_fail)) { + return; + } + if( is_onblock( trace )) { + onblock_trace.emplace( trace ); + } else if( trace->failed_dtrx_trace ) { + cached_traces[trace->failed_dtrx_trace->id] = trace; + } else { + cached_traces[trace->id] = trace; + } + } + + void on_accepted_block(const chain::block_state_ptr& block_state) { + store_block_trace( block_state ); + } + + void on_irreversible_block( const chain::block_state_ptr& block_state ) { + store_lib( block_state ); + } + + void store_block_trace( const chain::block_state_ptr& block_state ) { + try { + block_trace_v0 bt = create_block_trace_v0( block_state ); + + std::vector& traces = bt.transactions; + traces.reserve( block_state->block->transactions.size() + 1 ); + if( onblock_trace ) + traces.emplace_back( to_transaction_trace_v0( *onblock_trace )); + for( const auto& r : block_state->block->transactions ) { + transaction_id_type id; + if( r.trx.contains()) { + id = r.trx.get(); + } else { + id = r.trx.get().id(); + } + const auto it = cached_traces.find( id ); + if( it != cached_traces.end() ) { + traces.emplace_back( to_transaction_trace_v0( it->second )); + } + } + cached_traces.clear(); + onblock_trace.reset(); + + store.append( std::move( bt ) ); + + } catch( ... ) { + except_handler( MAKE_EXCEPTION_WITH_CONTEXT( std::current_exception() ) ); + } + } + + void store_lib( const chain::block_state_ptr& bsp ) { + try { + store.append_lib( bsp->block_num ); + } catch( ... ) { + except_handler( MAKE_EXCEPTION_WITH_CONTEXT( std::current_exception() ) ); + } + } + +private: + StoreProvider store; + exception_handler except_handler; + std::map cached_traces; + fc::optional onblock_trace; + +}; + +}} \ No newline at end of file diff --git a/plugins/trace_api_plugin/include/eosio/trace_api/common.hpp b/plugins/trace_api_plugin/include/eosio/trace_api/common.hpp new file mode 100644 index 00000000000..0b3e6fbcfac --- /dev/null +++ b/plugins/trace_api_plugin/include/eosio/trace_api/common.hpp @@ -0,0 +1,77 @@ +#pragma once + +#include + +namespace eosio::trace_api { + template + class optional_delegate : private std::function { + public: + using std::function::function; + + /** + * overloaded call operator to ignore unset functions + */ + template + auto operator()( Args... args ) const -> std::enable_if_t, R> { + if (static_cast(*this)) { + return std::function::operator()(std::move(args)...); + } else { + return {}; + } + } + + template + auto operator()( Args... args ) const -> std::enable_if_t> { + if (static_cast(*this)) { + std::function::operator()(std::move(args)...); + } + } + }; + + /** + * A function used to separate cooperative or external concerns from long running tasks + * calling code should expect that this can throw yield_exception and gracefully unwind if it does + * @throws yield_exception if the provided yield needs to terminate the long running process for any reason + */ + using yield_function = optional_delegate; + + /** + * Exceptions + */ + class yield_exception : public std::runtime_error { + public: + explicit yield_exception(const char* what_arg) + :std::runtime_error(what_arg) + {} + + explicit yield_exception(const std::string& what_arg) + :std::runtime_error(what_arg) + {} + }; + + class bad_data_exception : public std::runtime_error { + public: + explicit bad_data_exception(const char* what_arg) + :std::runtime_error(what_arg) + {} + + explicit bad_data_exception(const std::string& what_arg) + :std::runtime_error(what_arg) + {} + }; + + using exception_with_context = std::tuple; + using exception_handler = optional_delegate; + + struct block_trace_v0; + // optional block trace and irreversibility paired data + using get_block_t = std::optional>; + /** + * Normal use case: exception_handler except_handler; + * except_handler( MAKE_EXCEPTION_WITH_CONTEXT( std::current_exception() ) ); + */ +#define MAKE_EXCEPTION_WITH_CONTEXT(eptr) \ + (eosio::trace_api::exception_with_context((eptr), __FILE__, __LINE__, __func__)) + + +} \ No newline at end of file diff --git a/plugins/trace_api_plugin/include/eosio/trace_api/configuration_utils.hpp b/plugins/trace_api_plugin/include/eosio/trace_api/configuration_utils.hpp new file mode 100644 index 00000000000..b13b1672a1e --- /dev/null +++ b/plugins/trace_api_plugin/include/eosio/trace_api/configuration_utils.hpp @@ -0,0 +1,54 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace eosio::trace_api::configuration_utils { + using namespace eosio; + + /** + * Given a path (absolute or relative) to a file that contains a JSON-encoded ABI, return the parsed ABI + * + * @param file_name - a path to the ABI + * @param data_dir - the base path for relative file_name values + * @return the ABI implied + * @throws json_parse_exception if the JSON is malformed + */ + chain::abi_def abi_def_from_file(const std::string& file_name, const fc::path& data_dir ) + { + fc::variant abi_variant; + auto abi_path = fc::path(file_name); + if (abi_path.is_relative()) { + abi_path = data_dir / abi_path; + } + + EOS_ASSERT(fc::exists(abi_path) && !fc::is_directory(abi_path), chain::plugin_config_exception, "${path} does not exist or is not a file", ("path", abi_path.generic_string())); + try { + abi_variant = fc::json::from_file(abi_path); + } EOS_RETHROW_EXCEPTIONS(chain::json_parse_exception, "Fail to parse JSON from file: ${file}", ("file", abi_path.generic_string())); + + chain::abi_def result; + fc::from_variant(abi_variant, result); + return result; + } + + /** + * Given a string in the form = where key cannot contain an `=` character and value can contain anything + * return a pair of the two independent strings + * + * @param input + * @return + */ + std::pair parse_kv_pairs( const std::string& input ) { + EOS_ASSERT(!input.empty(), chain::plugin_config_exception, "Key-Value Pair is Empty"); + auto delim = input.find("="); + EOS_ASSERT(delim != std::string::npos, chain::plugin_config_exception, "Missing \"=\""); + EOS_ASSERT(delim != 0, chain::plugin_config_exception, "Missing Key"); + EOS_ASSERT(delim + 1 != input.size(), chain::plugin_config_exception, "Missing Value"); + return std::make_pair(input.substr(0, delim), input.substr(delim + 1)); + } + +} \ No newline at end of file diff --git a/plugins/trace_api_plugin/include/eosio/trace_api/data_log.hpp b/plugins/trace_api_plugin/include/eosio/trace_api/data_log.hpp new file mode 100644 index 00000000000..41b37c1f03c --- /dev/null +++ b/plugins/trace_api_plugin/include/eosio/trace_api/data_log.hpp @@ -0,0 +1,13 @@ +#pragma once +#include +#include +#include +#include + +namespace eosio { namespace trace_api { + + using data_log_entry = fc::static_variant< + block_trace_v0 + >; + +}} diff --git a/plugins/trace_api_plugin/include/eosio/trace_api/extract_util.hpp b/plugins/trace_api_plugin/include/eosio/trace_api/extract_util.hpp new file mode 100644 index 00000000000..98ad09c4d7b --- /dev/null +++ b/plugins/trace_api_plugin/include/eosio/trace_api/extract_util.hpp @@ -0,0 +1,53 @@ +#pragma once + +#include +#include + +namespace eosio { namespace trace_api { + +/// Used by to_transaction_trace_v0 for creation of action_trace_v0 +inline action_trace_v0 to_action_trace_v0( const chain::action_trace& at ) { + action_trace_v0 r; + r.receiver = at.receiver; + r.account = at.act.account; + r.action = at.act.name; + r.data = at.act.data; + if( at.receipt ) { + r.global_sequence = at.receipt->global_sequence; + } + r.authorization.reserve( at.act.authorization.size()); + for( const auto& auth : at.act.authorization ) { + r.authorization.emplace_back( authorization_trace_v0{auth.actor, auth.permission} ); + } + return r; +} + +/// @return transaction_trace_v0 with populated action_trace_v0 +inline transaction_trace_v0 to_transaction_trace_v0( const chain::transaction_trace_ptr& t ) { + transaction_trace_v0 r; + if( !t->failed_dtrx_trace ) { + r.id = t->id; + } else { + r.id = t->failed_dtrx_trace->id; // report the failed trx id since that is the id known to user + } + r.actions.reserve( t->action_traces.size()); + for( const auto& at : t->action_traces ) { + if( !at.context_free ) { // not including CFA at this time + r.actions.emplace_back( to_action_trace_v0( at )); + } + } + return r; +} + +/// @return block_trace_v0 without any transaction_trace_v0 +inline block_trace_v0 create_block_trace_v0( const chain::block_state_ptr& bsp ) { + block_trace_v0 r; + r.id = bsp->id; + r.number = bsp->block_num; + r.previous_id = bsp->block->previous; + r.timestamp = bsp->block->timestamp; + r.producer = bsp->block->producer; + return r; +} + +} } diff --git a/plugins/trace_api_plugin/include/eosio/trace_api/metadata_log.hpp b/plugins/trace_api_plugin/include/eosio/trace_api/metadata_log.hpp new file mode 100644 index 00000000000..7bcc73d3ed2 --- /dev/null +++ b/plugins/trace_api_plugin/include/eosio/trace_api/metadata_log.hpp @@ -0,0 +1,26 @@ +#pragma once +#include +#include +#include +#include + +namespace eosio { namespace trace_api { + struct block_entry_v0 { + chain::block_id_type id; + uint32_t number; + uint64_t offset; + }; + + struct lib_entry_v0 { + uint32_t lib; + }; + + using metadata_log_entry = fc::static_variant< + block_entry_v0, + lib_entry_v0 + >; + +}} + +FC_REFLECT(eosio::trace_api::block_entry_v0, (id)(number)(offset)); +FC_REFLECT(eosio::trace_api::lib_entry_v0, (lib)); diff --git a/plugins/trace_api_plugin/include/eosio/trace_api/request_handler.hpp b/plugins/trace_api_plugin/include/eosio/trace_api/request_handler.hpp new file mode 100644 index 00000000000..d80bcaf2690 --- /dev/null +++ b/plugins/trace_api_plugin/include/eosio/trace_api/request_handler.hpp @@ -0,0 +1,59 @@ +#pragma once + +#include +#include +#include +#include + +namespace eosio::trace_api { + using data_handler_function = std::function; + + namespace detail { + class response_formatter { + public: + static fc::variant process_block( const block_trace_v0& trace, bool irreversible, const data_handler_function& data_handler, const yield_function& yield ); + }; + } + + template + class request_handler { + public: + request_handler(LogfileProvider&& logfile_provider, DataHandlerProvider&& data_handler_provider) + :logfile_provider(std::move(logfile_provider)) + ,data_handler_provider(std::move(data_handler_provider)) + { + } + + /** + * Fetch the trace for a given block height and convert it to a fc::variant for conversion to a final format + * (eg JSON) + * + * @param block_height - the height of the block whose trace is requested + * @param yield - a yield function to allow cooperation during long running tasks + * @return a properly formatted variant representing the trace for the given block height if it exists, an + * empty variant otherwise. + * @throws yield_exception if a call to `yield` throws. + * @throws bad_data_exception when there are issues with the underlying data preventing processing. + */ + fc::variant get_block_trace( uint32_t block_height, const yield_function& yield = {}) { + auto data = logfile_provider.get_block(block_height, yield); + if (!data) { + return {}; + } + + yield(); + + auto data_handler = [this](const action_trace_v0& action, const yield_function& yield) -> fc::variant { + return data_handler_provider.process_data(action, yield); + }; + + return detail::response_formatter::process_block(std::get<0>(*data), std::get<1>(*data), data_handler, yield); + } + + private: + LogfileProvider logfile_provider; + DataHandlerProvider data_handler_provider; + }; + + +} diff --git a/plugins/trace_api_plugin/include/eosio/trace_api/store_provider.hpp b/plugins/trace_api_plugin/include/eosio/trace_api/store_provider.hpp new file mode 100644 index 00000000000..9ee28e7d900 --- /dev/null +++ b/plugins/trace_api_plugin/include/eosio/trace_api/store_provider.hpp @@ -0,0 +1,295 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +namespace eosio::trace_api { + using namespace boost::filesystem; + + class path_does_not_exist : public std::runtime_error { + public: + explicit path_does_not_exist(const char* what_arg) + :std::runtime_error(what_arg) + {} + explicit path_does_not_exist(const std::string& what_arg) + :std::runtime_error(what_arg) + {} + }; + + class old_slice_version : public std::runtime_error { + public: + explicit old_slice_version(const char* what_arg) + :std::runtime_error(what_arg) + {} + explicit old_slice_version(const std::string& what_arg) + :std::runtime_error(what_arg) + {} + }; + + class incompatible_slice_files : public std::runtime_error { + public: + explicit incompatible_slice_files(const char* what_arg) + :std::runtime_error(what_arg) + {} + explicit incompatible_slice_files(const std::string& what_arg) + :std::runtime_error(what_arg) + {} + }; + + class malformed_slice_file : public std::runtime_error { + public: + explicit malformed_slice_file(const char* what_arg) + :std::runtime_error(what_arg) + {} + explicit malformed_slice_file(const std::string& what_arg) + :std::runtime_error(what_arg) + {} + }; + + /** + * append an entry to the store + * + * @param entry : the entry to append + * @param file : the file to append entry to + * @return the offset in the file where that entry is written + */ + template + static uint64_t append_store(const DataEntry &entry, File &file) { + auto data = fc::raw::pack(entry); + const auto offset = file.tellp(); + file.write(data.data(), data.size()); + file.flush(); + file.sync(); + return offset; + } + + /** + * extract an entry from the data log + * + * @param file : the file to extract entry from + * @return the extracted data log + */ + template + static DataEntry extract_store( File& file ) { + DataEntry entry; + auto ds = file.create_datastream(); + fc::raw::unpack(ds, entry); + return entry; + } + + + class store_provider; + + /** + * Provides access to the slice directory. It is only intended to be used by store_provider + * and unit tests. + */ + class slice_directory { + public: + struct index_header { + uint32_t version; + }; + + enum class open_state { read /*read from front to back*/, write /*write to end of file*/ }; + slice_directory(const boost::filesystem::path& slice_dir, uint32_t width, std::optional minimum_irreversible_history_blocks); + + /** + * Return the slice number that would include the passed in block_height + * + * @param block_height : height of the requested data + * @return the slice number for the block_height + */ + uint32_t slice_number(uint32_t block_height) const { + return block_height / _width; + } + + /** + * Find or create the index file associated with the indicated slice_number + * + * @param slice_number : slice number of the requested slice file + * @param state : indicate if the file is going to be written to (appended) or read + * @param index_file : the cfile that will be set to the appropriate slice filename + * and opened to that file + * @return the true if file was found (i.e. already existed) + */ + bool find_or_create_index_slice(uint32_t slice_number, open_state state, fc::cfile& index_file) const; + + /** + * Find the index file associated with the indicated slice_number + * + * @param slice_number : slice number of the requested slice file + * @param state : indicate if the file is going to be written to (appended) or read + * @param index_file : the cfile that will be set to the appropriate slice filename (always) + * and opened to that file (if it was found) + * @param open_file : indicate if the file should be opened (if found) or not + * @return the true if file was found (i.e. already existed), if not found index_file + * is set to the appropriate file, but not open + */ + bool find_index_slice(uint32_t slice_number, open_state state, fc::cfile& index_file, bool open_file = true) const; + + /** + * Find or create the trace file associated with the indicated slice_number + * + * @param slice_number : slice number of the requested slice file + * @param state : indicate if the file is going to be written to (appended) or read + * @param trace_file : the cfile that will be set to the appropriate slice filename + * and opened to that file + * @return the true if file was found (i.e. already existed) + */ + bool find_or_create_trace_slice(uint32_t slice_number, open_state state, fc::cfile& trace_file) const; + + /** + * Find the trace file associated with the indicated slice_number + * + * @param slice_number : slice number of the requested slice file + * @param state : indicate if the file is going to be written to (appended) or read + * @param trace_file : the cfile that will be set to the appropriate slice filename (always) + * and opened to that file (if it was found) + * @param open_file : indicate if the file should be opened (if found) or not + * @return the true if file was found (i.e. already existed), if not found index_file + * is set to the appropriate file, but not open + */ + bool find_trace_slice(uint32_t slice_number, open_state state, fc::cfile& trace_file, bool open_file = true) const; + + /** + * Find or create a trace and index file pair + * + * @param slice_number : slice number of the requested slice file + * @param state : indicate if the file is going to be written to (appended) or read + * @param trace : the cfile that will be set to the appropriate slice filename and + * opened to that file + */ + void find_or_create_slice_pair(uint32_t slice_number, open_state state, fc::cfile& trace, fc::cfile& index); + + /** + * Cleans up all slices that are no longer needed to maintain the minimum number of blocks past lib + * + * @param lib : block number of the current lib + */ + void cleanup_old_slices(uint32_t lib); + + private: + // returns true if slice is found, slice_file will always be set to the appropriate path for + // the slice_prefix and slice_number, but will only be opened if found + bool find_slice(const char* slice_prefix, uint32_t slice_number, fc::cfile& slice_file, bool open_file) const; + + // take an index file that is initialized to a file and open it and write its header + void create_new_index_slice_file(fc::cfile& index_file) const; + + // take an open index slice file and verify its header is valid and prepare the file to be appended to (or read from) + void validate_existing_index_slice_file(fc::cfile& index_file, open_state state) const; + + const boost::filesystem::path _slice_dir; + const uint32_t _width; + const std::optional _minimum_irreversible_history_blocks; + std::optional _last_cleaned_up_slice; + }; + + /** + * Provides read and write access to block trace data. + */ + class store_provider { + public: + using open_state = slice_directory::open_state; + + store_provider(const boost::filesystem::path& slice_dir, uint32_t stride_width, std::optional minimum_irreversible_history_blocks); + + void append(const block_trace_v0& bt); + void append_lib(uint32_t lib); + + /** + * Read the trace for a given block + * @param block_height : the height of the data being read + * @return empty optional if the data cannot be read OTHERWISE + * optional containing a 2-tuple of the block_trace and a flag indicating irreversibility + */ + get_block_t get_block(uint32_t block_height, const yield_function& yield= {}); + + protected: + /** + * Read the metadata log font-to-back starting at an offset passing each entry to a provided functor/lambda + * + * @tparam Fn : type of the functor/lambda + * @param block_height : height of the requested data + * @param offset : initial offset to read from + * @param fn : the functor/lambda + * @return the highest offset read during this scan + */ + template + uint64_t scan_metadata_log_from( uint32_t block_height, uint64_t offset, Fn&& fn, const yield_function& yield ) { + // ignoring offset + offset = 0; + fc::cfile index; + const uint32_t slice_number = _slice_directory.slice_number(block_height); + const bool found = _slice_directory.find_index_slice(slice_number, open_state::read, index); + if( !found ) { + return 0; + } + const uint64_t end = file_size(index.get_file_path()); + offset = index.tellp(); + uint64_t last_read_offset = offset; + while (offset < end) { + yield(); + const auto metadata = extract_store(index); + if(! fn(metadata)) { + break; + } + last_read_offset = offset; + offset = index.tellp(); + } + return last_read_offset; + } + + /** + * Read from the data log + * @param block_height : the block_height of the data being read + * @param offset : the offset in the datalog to read + * @return empty optional if the data log does not exist, data otherwise + * @throws std::exception : when the data is not the correct type or if the log is corrupt in some way + * + */ + std::optional read_data_log( uint32_t block_height, uint64_t offset ) { + const uint32_t slice_number = _slice_directory.slice_number(block_height); + fc::cfile trace; + if( !_slice_directory.find_trace_slice(slice_number, open_state::read, trace) ) { + const std::string offset_str = boost::lexical_cast(offset); + const std::string bh_str = boost::lexical_cast(block_height); + throw malformed_slice_file("Requested offset: " + offset_str + " to retrieve block number: " + bh_str + " but this trace file is new, so there are no traces present."); + } + const uint64_t end = file_size(trace.get_file_path()); + if( offset >= end ) { + const std::string offset_str = boost::lexical_cast(offset); + const std::string bh_str = boost::lexical_cast(block_height); + const std::string end_str = boost::lexical_cast(end); + throw malformed_slice_file("Requested offset: " + offset_str + " to retrieve block number: " + bh_str + " but this trace file only goes to offset: " + end_str); + } + trace.seek(offset); + return extract_store(trace); + } + + /** + * Initialize a new index slice with a valid header + * @param index : index file to open and add header to + * + */ + void initialize_new_index_slice_file(fc::cfile& index); + + /** + * Ensure an existing index slice has a valid header + * @param index : index file to open and read header from + * @param state : indicate if the file is going to be written to (appended) or read + * + */ + void validate_existing_index_slice_file(fc::cfile& index, open_state state); + + slice_directory _slice_directory; + }; + +} + +FC_REFLECT(eosio::trace_api::slice_directory::index_header, (version)) diff --git a/plugins/trace_api_plugin/include/eosio/trace_api/trace.hpp b/plugins/trace_api_plugin/include/eosio/trace_api/trace.hpp new file mode 100644 index 00000000000..e845bede3af --- /dev/null +++ b/plugins/trace_api_plugin/include/eosio/trace_api/trace.hpp @@ -0,0 +1,44 @@ +#pragma once + +#include +#include +#include + +namespace eosio { namespace trace_api { + + struct authorization_trace_v0 { + chain::name account; + chain::name permission; + }; + + struct action_trace_v0 { + uint64_t global_sequence = {}; + chain::name receiver = {}; + chain::name account = {}; + chain::name action = {}; + std::vector authorization = {}; + chain::bytes data = {}; + }; + + struct transaction_trace_v0 { + using status_type = chain::transaction_receipt_header::status_enum; + + chain::transaction_id_type id = {}; + std::vector actions = {}; + }; + + struct block_trace_v0 { + chain::block_id_type id = {}; + uint32_t number = {}; + chain::block_id_type previous_id = {}; + chain::block_timestamp_type timestamp = chain::block_timestamp_type(0); + chain::name producer = {}; + std::vector transactions = {}; + }; + +} } + +FC_REFLECT(eosio::trace_api::authorization_trace_v0, (account)(permission)) +FC_REFLECT(eosio::trace_api::action_trace_v0, (global_sequence)(receiver)(account)(action)(authorization)(data)) +FC_REFLECT(eosio::trace_api::transaction_trace_v0, (id)(actions)) +FC_REFLECT(eosio::trace_api::block_trace_v0, (id)(number)(previous_id)(timestamp)(producer)(transactions)) diff --git a/plugins/trace_api_plugin/include/eosio/trace_api/trace_api_plugin.hpp b/plugins/trace_api_plugin/include/eosio/trace_api/trace_api_plugin.hpp new file mode 100644 index 00000000000..cd5eb9f1537 --- /dev/null +++ b/plugins/trace_api_plugin/include/eosio/trace_api/trace_api_plugin.hpp @@ -0,0 +1,51 @@ +#pragma once +#include +#include +#include + +namespace eosio { + /** + * Plugin that runs both a data extraction and the HTTP RPC in the same application + */ + class trace_api_plugin : public appbase::plugin { + public: + APPBASE_PLUGIN_REQUIRES((chain_plugin)(http_plugin)) + + trace_api_plugin(); + virtual ~trace_api_plugin(); + + virtual void set_program_options(appbase::options_description& cli, appbase::options_description& cfg) override; + + void plugin_initialize(const appbase::variables_map& options); + void plugin_startup(); + void plugin_shutdown(); + + void handle_sighup() override; + + private: + std::shared_ptr my; + std::shared_ptr rpc; + }; + + /** + * Plugin that only runs the RPC + */ + class trace_api_rpc_plugin : public appbase::plugin { + public: + APPBASE_PLUGIN_REQUIRES((http_plugin)) + + trace_api_rpc_plugin(); + virtual ~trace_api_rpc_plugin(); + + virtual void set_program_options(appbase::options_description& cli, appbase::options_description& cfg) override; + + void plugin_initialize(const appbase::variables_map& options); + void plugin_startup(); + void plugin_shutdown(); + + void handle_sighup() override; + + private: + std::shared_ptr rpc; + }; +} \ No newline at end of file diff --git a/plugins/trace_api_plugin/request_handler.cpp b/plugins/trace_api_plugin/request_handler.cpp new file mode 100644 index 00000000000..f0d0a132fcc --- /dev/null +++ b/plugins/trace_api_plugin/request_handler.cpp @@ -0,0 +1,92 @@ +#include + +#include + +#include + +namespace { + using namespace eosio::trace_api; + + std::string to_iso8601_datetime( const fc::time_point& t) { + return (std::string)t + "Z"; + } + + fc::variants process_authorizations(const std::vector& authorizations, const yield_function& yield ) { + fc::variants result; + result.reserve(authorizations.size()); + for ( const auto& a: authorizations) { + yield(); + + result.emplace_back(fc::mutable_variant_object() + ("account", a.account.to_string()) + ("permission", a.permission.to_string()) + ); + } + + return result; + + } + + fc::variants process_actions(const std::vector& actions, const data_handler_function& data_handler, const yield_function& yield ) { + fc::variants result; + result.reserve(actions.size()); + + // create a vector of indices to sort based on actions to avoid copies + std::vector indices(actions.size()); + std::iota(indices.begin(), indices.end(), 0); + std::sort(indices.begin(), indices.end(), [&actions](const int& lhs, const int& rhs) -> bool { + return actions.at(lhs).global_sequence < actions.at(rhs).global_sequence; + }); + + for ( int index : indices) { + yield(); + + const auto& a = actions.at(index); + auto action_variant = fc::mutable_variant_object() + ("receiver", a.receiver.to_string()) + ("account", a.account.to_string()) + ("action", a.action.to_string()) + ("authorization", process_authorizations(a.authorization, yield)) + ("data", fc::to_hex(a.data.data(), a.data.size())); + + auto params = data_handler(a, yield); + if (!params.is_null()) { + action_variant("params", params); + } + + result.emplace_back( std::move(action_variant) ); + } + + return result; + + } + + fc::variants process_transactions(const std::vector& transactions, const data_handler_function& data_handler, const yield_function& yield ) { + fc::variants result; + result.reserve(transactions.size()); + for ( const auto& t: transactions) { + yield(); + + result.emplace_back(fc::mutable_variant_object() + ("id", t.id.str()) + ("actions", process_actions(t.actions, data_handler, yield)) + ); + } + + return result; + } + +} + +namespace eosio::trace_api::detail { + fc::variant response_formatter::process_block( const block_trace_v0& trace, bool irreversible, const data_handler_function& data_handler, const yield_function& yield ) { + return fc::mutable_variant_object() + ("id", trace.id.str() ) + ("number", trace.number ) + ("previous_id", trace.previous_id.str() ) + ("status", irreversible ? "irreversible" : "pending" ) + ("timestamp", to_iso8601_datetime(trace.timestamp)) + ("producer", trace.producer.to_string()) + ("transactions", process_transactions(trace.transactions, data_handler, yield )); + } +} diff --git a/plugins/trace_api_plugin/store_provider.cpp b/plugins/trace_api_plugin/store_provider.cpp new file mode 100644 index 00000000000..e211db46c13 --- /dev/null +++ b/plugins/trace_api_plugin/store_provider.cpp @@ -0,0 +1,209 @@ +#include + +#include + +namespace { + static constexpr uint32_t _current_version = 1; + static constexpr const char* _trace_prefix = "trace_"; + static constexpr const char* _trace_index_prefix = "trace_index_"; + static constexpr const char* _trace_ext = ".log"; + static constexpr uint _max_filename_size = std::char_traits::length(_trace_index_prefix) + 10 + 1 + 10 + std::char_traits::length(_trace_ext) + 1; // "trace_index_" + 10-digits + '-' + 10-digits + ".log" + null-char +} + +namespace eosio::trace_api { + namespace bfs = boost::filesystem; + store_provider::store_provider(const bfs::path& slice_dir, uint32_t stride_width, std::optional minimum_irreversible_history_blocks) + : _slice_directory(slice_dir, stride_width, minimum_irreversible_history_blocks) { + } + + void store_provider::append(const block_trace_v0& bt) { + fc::cfile trace; + fc::cfile index; + const uint32_t slice_number = _slice_directory.slice_number(bt.number); + _slice_directory.find_or_create_slice_pair(slice_number, open_state::write, trace, index); + // storing as static_variant to allow adding other data types to the trace file in the future + const uint64_t offset = append_store(data_log_entry { bt }, trace); + + auto be = metadata_log_entry { block_entry_v0 { .id = bt.id, .number = bt.number, .offset = offset }}; + append_store(be, index); + } + + void store_provider::append_lib(uint32_t lib) { + fc::cfile index; + const uint32_t slice_number = _slice_directory.slice_number(lib); + _slice_directory.find_or_create_index_slice(slice_number, open_state::write, index); + auto le = metadata_log_entry { lib_entry_v0 { .lib = lib }}; + append_store(le, index); + _slice_directory.cleanup_old_slices(lib); + } + + get_block_t store_provider::get_block(uint32_t block_height, const yield_function& yield) { + std::optional trace_offset; + bool irreversible = false; + uint64_t offset = scan_metadata_log_from(block_height, 0, [&block_height, &trace_offset, &irreversible](const metadata_log_entry& e) -> bool { + if (e.contains()) { + const auto& block = e.get(); + if (block.number == block_height) { + trace_offset = block.offset; + } + } else if (e.contains()) { + auto lib = e.get().lib; + if (lib >= block_height) { + irreversible = true; + return false; + } + } + return true; + }, yield); + if (!trace_offset) { + return get_block_t{}; + } + std::optional entry = read_data_log(block_height, *trace_offset); + if (!entry) { + return get_block_t{}; + } + const auto bt = entry->get(); + return std::make_tuple( bt, irreversible ); + } + + slice_directory::slice_directory(const bfs::path& slice_dir, uint32_t width, std::optional minimum_irreversible_history_blocks) + : _slice_dir(slice_dir) + , _width(width) + , _minimum_irreversible_history_blocks(minimum_irreversible_history_blocks) { + if (!exists(_slice_dir)) { + bfs::create_directories(slice_dir); + } + } + + bool slice_directory::find_or_create_index_slice(uint32_t slice_number, open_state state, fc::cfile& index_file) const { + const bool found = find_index_slice(slice_number, state, index_file); + if( !found ) { + create_new_index_slice_file(index_file); + } + return found; + } + + bool slice_directory::find_index_slice(uint32_t slice_number, open_state state, fc::cfile& index_file, bool open_file) const { + const bool found = find_slice(_trace_index_prefix, slice_number, index_file, open_file); + if( !found || !open_file ) { + return found; + } + + validate_existing_index_slice_file(index_file, state); + return true; + } + + void slice_directory::create_new_index_slice_file(fc::cfile& index_file) const { + index_file.open(fc::cfile::create_or_update_rw_mode); + index_header h { .version = _current_version }; + append_store(h, index_file); + } + + void slice_directory::validate_existing_index_slice_file(fc::cfile& index_file, open_state state) const { + const auto header = extract_store(index_file); + if (header.version != _current_version) { + throw old_slice_version("Old slice file with version: " + std::to_string(header.version) + + " is in directory, only supporting version: " + std::to_string(_current_version)); + } + + if( state == open_state::write ) { + index_file.seek_end(0); + } + } + + bool slice_directory::find_or_create_trace_slice(uint32_t slice_number, open_state state, fc::cfile& trace_file) const { + const bool found = find_trace_slice(slice_number, state, trace_file); + + if( !found ) { + trace_file.open(fc::cfile::create_or_update_rw_mode); + } + + return found; + } + + bool slice_directory::find_trace_slice(uint32_t slice_number, open_state state, fc::cfile& trace_file, bool open_file) const { + const bool found = find_slice(_trace_prefix, slice_number, trace_file, open_file); + + if( !found || !open_file ) { + return found; + } + + if( state == open_state::write ) { + trace_file.seek_end(0); + } + else { + trace_file.seek(0); // ensure we are at the start of the file + } + return true; + } + + bool slice_directory::find_slice(const char* slice_prefix, uint32_t slice_number, fc::cfile& slice_file, bool open_file) const { + char filename[_max_filename_size] = {}; + const uint32_t slice_start = slice_number * _width; + const int size_written = snprintf(filename, _max_filename_size, "%s%010d-%010d%s", slice_prefix, slice_start, (slice_start + _width), _trace_ext); + // assert that _max_filename_size is correct + if ( size_written >= _max_filename_size ) { + const std::string max_size_str = std::to_string(_max_filename_size - 1); // dropping null character from size + const std::string size_written_str = std::to_string(size_written); + throw std::runtime_error("Could not write the complete filename. Anticipated the max filename characters to be: " + + max_size_str + " or less, but wrote: " + size_written_str + " characters. This is likely because the file " + "format was changed and the code was not updated accordingly. Filename created: " + filename); + } + const path slice_path = _slice_dir / filename; + slice_file.set_file_path(slice_path); + + const bool file_exists = exists(slice_path); + if( !file_exists || !open_file ) { + return file_exists; + } + + slice_file.open(fc::cfile::create_or_update_rw_mode); + // TODO: this is a temporary fix until fc::cfile handles it internally. OSX and Linux differ on the read offset + // when opening in "ab+" mode + slice_file.seek(0); + return true; + } + + + void slice_directory::find_or_create_slice_pair(uint32_t slice_number, open_state state, fc::cfile& trace, fc::cfile& index) { + const bool trace_found = find_or_create_trace_slice(slice_number, state, trace); + const bool index_found = find_or_create_index_slice(slice_number, state, index); + if (trace_found != index_found) { + const std::string trace_status = trace_found ? "existing" : "new"; + const std::string index_status = index_found ? "existing" : "new"; + elog("Trace file is ${ts}, but it's metadata file is ${is}. This means the files are not consistent.", ("ts", trace_status)("is", index_status)); + } + } + + void slice_directory::cleanup_old_slices(uint32_t lib) { + if (!_minimum_irreversible_history_blocks) + return; + const uint32_t lib_slice_number = slice_number( lib ); + if (lib_slice_number < 1 || (_last_cleaned_up_slice && _last_cleaned_up_slice >= lib_slice_number - 1)) + return; + + // can only cleanup a slice once our last needed history block (lib - *_minimum_irreversible_history_blocks) + // is out of that slice (... - width) + const int64_t cleanup_block_number = static_cast(lib) - static_cast(*_minimum_irreversible_history_blocks) - _width; + if (cleanup_block_number > 0) { + uint32_t cleanup_slice_num = slice_number(static_cast(cleanup_block_number)); + // since we subtracted width, we are guaranteed cleanup_slice_num is not the slice that contains LIB + while (!_last_cleaned_up_slice || *_last_cleaned_up_slice < cleanup_slice_num) { + fc::cfile trace; + fc::cfile index; + const uint32_t slice_to_clean = _last_cleaned_up_slice ? *_last_cleaned_up_slice + 1 : 0; + // cleanup index first to reduce the likelihood of reader finding index, but not finding trace + const bool dont_open_file = false; + const bool index_found = find_index_slice(slice_to_clean, open_state::read, index, dont_open_file); + if (index_found) { + bfs::remove(index.get_file_path()); + } + const bool trace_found = find_trace_slice(slice_to_clean, open_state::read, trace, dont_open_file); + if (trace_found) { + bfs::remove(trace.get_file_path()); + } + _last_cleaned_up_slice = slice_to_clean; + } + } + } +} diff --git a/plugins/trace_api_plugin/test/CMakeLists.txt b/plugins/trace_api_plugin/test/CMakeLists.txt new file mode 100644 index 00000000000..999c9e7a61e --- /dev/null +++ b/plugins/trace_api_plugin/test/CMakeLists.txt @@ -0,0 +1,29 @@ +add_executable( test_extraction test_extraction.cpp ) +target_link_libraries( test_extraction trace_api_plugin ) +target_include_directories( test_extraction PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" ) + +add_test(NAME test_extraction COMMAND plugins/trace_api_plugin/test/test_extraction WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) + +add_executable( test_responses test_responses.cpp ) +target_link_libraries( test_responses trace_api_plugin ) +target_include_directories( test_responses PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" ) + +add_test(NAME test_responses COMMAND plugins/trace_api_plugin/test/test_responses WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) + +add_executable( test_trace_file test_trace_file.cpp ) +target_link_libraries( test_trace_file trace_api_plugin ) +target_include_directories( test_trace_file PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" ) + +add_test(NAME test_trace_file COMMAND plugins/trace_api_plugin/test/test_trace_file WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) + +add_executable( test_data_handlers test_data_handlers.cpp ) +target_link_libraries( test_data_handlers trace_api_plugin ) +target_include_directories( test_data_handlers PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" ) + +add_test(NAME test_data_handlers COMMAND plugins/trace_api_plugin/test/test_data_handlers WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) + +add_executable( test_configuration_utils test_configuration_utils.cpp ) +target_link_libraries( test_configuration_utils trace_api_plugin ) +target_include_directories( test_configuration_utils PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" ) + +add_test(NAME test_configuration_utils COMMAND plugins/trace_api_plugin/test/test_configuration_utils WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) diff --git a/plugins/trace_api_plugin/test/include/eosio/trace_api/test_common.hpp b/plugins/trace_api_plugin/test/include/eosio/trace_api/test_common.hpp new file mode 100644 index 00000000000..0369a41aec6 --- /dev/null +++ b/plugins/trace_api_plugin/test/include/eosio/trace_api/test_common.hpp @@ -0,0 +1,250 @@ +#pragma once + +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include + +namespace eosio::trace_api { + /** + * Utilities that make writing tests easier + */ + + namespace test_common { + fc::sha256 operator"" _h(const char* input, std::size_t) { + return fc::sha256(input); + } + + chain::name operator"" _n(const char* input, std::size_t) { + return chain::name(input); + } + + chain::asset operator"" _t(const char* input, std::size_t) { + return chain::asset::from_string(input); + } + + auto get_private_key( chain::name keyname, std::string role = "owner" ) { + auto secret = fc::sha256::hash( keyname.to_string() + role ); + return chain::private_key_type::regenerate( secret ); + } + + auto get_public_key( chain::name keyname, std::string role = "owner" ) { + return get_private_key( keyname, role ).get_public_key(); + } + + chain::bytes make_transfer_data( chain::name from, chain::name to, chain::asset quantity, std::string&& memo) { + fc::datastream ps; + fc::raw::pack(ps, from, to, quantity, memo); + chain::bytes result( ps.tellp()); + + if( result.size()) { + fc::datastream ds( result.data(), size_t( result.size())); + fc::raw::pack(ds, from, to, quantity, memo); + } + return result; + } + + auto make_block_state( chain::block_id_type previous, uint32_t height, uint32_t slot, chain::name producer, + std::vector trxs ) { + chain::signed_block_ptr block = std::make_shared(); + for( auto& trx : trxs ) { + block->transactions.emplace_back( trx ); + } + block->producer = producer; + block->timestamp = chain::block_timestamp_type(slot); + // make sure previous contains correct block # so block_header::block_num() returns correct value + if( previous == chain::block_id_type() ) { + previous._hash[0] &= 0xffffffff00000000; + previous._hash[0] += fc::endian_reverse_u32(height - 1); + } + block->previous = previous; + + auto priv_key = get_private_key( block->producer, "active" ); + auto pub_key = get_public_key( block->producer, "active" ); + + auto prev = std::make_shared(); + auto header_bmroot = chain::digest_type::hash( std::make_pair( block->digest(), prev->blockroot_merkle.get_root())); + auto sig_digest = chain::digest_type::hash( std::make_pair( header_bmroot, prev->pending_schedule.schedule_hash )); + block->producer_signature = priv_key.sign( sig_digest ); + + std::vector signing_keys; + signing_keys.emplace_back( std::move( priv_key )); + auto signer = [&]( chain::digest_type d ) { + std::vector result; + result.reserve( signing_keys.size()); + for( const auto& k: signing_keys ) + result.emplace_back( k.sign( d )); + return result; + }; + chain::pending_block_header_state pbhs; + pbhs.producer = block->producer; + pbhs.timestamp = block->timestamp; + chain::producer_authority_schedule schedule = {0, {chain::producer_authority{block->producer, + chain::block_signing_authority_v0{1, {{pub_key, 1}}}}}}; + pbhs.active_schedule = schedule; + pbhs.valid_block_signing_authority = chain::block_signing_authority_v0{1, {{pub_key, 1}}}; + auto bsp = std::make_shared( + std::move( pbhs ), + std::move( block ), + std::vector(), + chain::protocol_feature_set(), + []( chain::block_timestamp_type timestamp, + const fc::flat_set& cur_features, + const std::vector& new_features ) {}, + signer + ); + bsp->block_num = height; + + return bsp; + } + + void to_kv_helper(const fc::variant& v, std::function&& append){ + if (v.is_object() ) { + const auto& obj = v.get_object(); + static const std::string sep = "."; + + for (const auto& entry: obj) { + to_kv_helper( entry.value(), [&append, &entry](const std::string& path, const std::string& value){ + append(sep + entry.key() + path, value); + }); + } + } else if (v.is_array()) { + const auto& arr = v.get_array(); + for (size_t idx = 0; idx < arr.size(); idx++) { + const auto& entry = arr.at(idx); + to_kv_helper( entry, [&append, idx](const std::string& path, const std::string& value){ + append(std::string("[") + std::to_string(idx) + std::string("]") + path, value); + }); + } + } else if (!v.is_null()) { + append("", v.as_string()); + } + } + + auto to_kv(const fc::variant& v) { + std::map result; + to_kv_helper(v, [&result](const std::string& k, const std::string& v){ + result.emplace(k, v); + }); + return result; + } + } + + // TODO: promote these to the main files? + // I prefer not to have these operators but they are convenient for BOOST TEST integration + // + + bool operator==(const authorization_trace_v0& lhs, const authorization_trace_v0& rhs) { + return + lhs.account == rhs.account && + lhs.permission == rhs.permission; + } + + bool operator==(const action_trace_v0& lhs, const action_trace_v0& rhs) { + return + lhs.global_sequence == rhs.global_sequence && + lhs.receiver == rhs.receiver && + lhs.account == rhs.account && + lhs.action == rhs.action && + lhs.authorization == rhs.authorization && + lhs.data == rhs.data; + } + + bool operator==(const transaction_trace_v0& lhs, const transaction_trace_v0& rhs) { + return + lhs.id == rhs.id && + lhs.actions == rhs.actions; + } + + bool operator==(const block_trace_v0 &lhs, const block_trace_v0 &rhs) { + return + lhs.id == rhs.id && + lhs.number == rhs.number && + lhs.previous_id == rhs.previous_id && + lhs.timestamp == rhs.timestamp && + lhs.producer == rhs.producer && + lhs.transactions == rhs.transactions; + } + + std::ostream& operator<<(std::ostream &os, const block_trace_v0 &bt) { + os << fc::json::to_string( bt, fc::time_point::maximum() ); + return os; + } + + bool operator==(const block_entry_v0& lhs, const block_entry_v0& rhs) { + return + lhs.id == rhs.id && + lhs.number == rhs.number && + lhs.offset == rhs.offset; + } + + bool operator!=(const block_entry_v0& lhs, const block_entry_v0& rhs) { + return !(lhs == rhs); + } + + bool operator==(const lib_entry_v0& lhs, const lib_entry_v0& rhs) { + return + lhs.lib == rhs.lib; + } + + bool operator!=(const lib_entry_v0& lhs, const lib_entry_v0& rhs) { + return !(lhs == rhs); + } + + std::ostream& operator<<(std::ostream& os, const block_entry_v0& be) { + os << fc::json::to_string(be, fc::time_point::maximum()); + return os; + } + + std::ostream& operator<<(std::ostream& os, const lib_entry_v0& le) { + os << fc::json::to_string(le, fc::time_point::maximum()); + return os; + } +} + +namespace fc { + template + std::ostream& operator<<(std::ostream &os, const fc::static_variant& v ) { + os << fc::json::to_string(v, fc::time_point::maximum()); + return os; + } + + std::ostream& operator<<(std::ostream &os, const fc::microseconds& t ) { + os << t.count(); + return os; + } + +} + +namespace eosio::chain { + bool operator==(const abi_def& lhs, const abi_def& rhs) { + return fc::raw::pack(lhs) == fc::raw::pack(rhs); + } + + bool operator!=(const abi_def& lhs, const abi_def& rhs) { + return !(lhs == rhs); + } + + std::ostream& operator<<(std::ostream& os, const abi_def& abi) { + os << fc::json::to_string(abi, fc::time_point::maximum()); + return os; + } +} + +namespace std { + /* + * operator for printing to_kv entries + */ + ostream& operator<<(ostream& os, const pair& entry) { + os << entry.first + "=" + entry.second; + return os; + } +} diff --git a/plugins/trace_api_plugin/test/test_configuration_utils.cpp b/plugins/trace_api_plugin/test/test_configuration_utils.cpp new file mode 100644 index 00000000000..2a612a2f56e --- /dev/null +++ b/plugins/trace_api_plugin/test/test_configuration_utils.cpp @@ -0,0 +1,86 @@ +#define BOOST_TEST_MODULE trace_configuration_utils +#include +#include +#include + +#include +#include + +using namespace eosio; +using namespace eosio::trace_api::configuration_utils; + +namespace bfs = boost::filesystem; + +struct temp_file_fixture { + temp_file_fixture() {} + + ~temp_file_fixture() { + for (const auto& p: paths) { + if (bfs::exists(p)) { + bfs::remove(p); + } + } + } + + std::string create_temp_file( const std::string& contents ) { + auto path = bfs::temp_directory_path() / bfs::unique_path(); + auto os = bfs::ofstream(path, std::ios_base::out); + os << contents; + os.close(); + return paths.emplace_back(std::move(path)).generic_string(); + } + + std::list paths; +}; + +BOOST_AUTO_TEST_SUITE(configuration_utils_tests) + BOOST_AUTO_TEST_CASE(parse_kv_pairs_test) + { + using spair = std::pair; + + // basic + BOOST_TEST( parse_kv_pairs("a=b") == spair("a", "b") ); + BOOST_TEST( parse_kv_pairs("a==b") == spair("a", "=b") ); + BOOST_TEST( parse_kv_pairs("a={}:\"=") == spair("a", "{}:\"=") ); + BOOST_TEST( parse_kv_pairs("{}:\"=a") == spair("{}:\"", "a") ); + + // missing key not OK + BOOST_REQUIRE_THROW(parse_kv_pairs("=b"), chain::plugin_config_exception); + + // missing value not OK + BOOST_REQUIRE_THROW(parse_kv_pairs("a="), chain::plugin_config_exception); + + // missing = not OK + BOOST_REQUIRE_THROW(parse_kv_pairs("a"), chain::plugin_config_exception); + + // emptynot OK + BOOST_REQUIRE_THROW(parse_kv_pairs(""), chain::plugin_config_exception); + + } + + BOOST_FIXTURE_TEST_CASE(abi_def_from_file_test, temp_file_fixture) + { + auto data_dir = fc::path(bfs::temp_directory_path()); + auto good_json = std::string("{\"version\" : \"test string please ignore\"}"); + auto good_json_filename = create_temp_file(good_json); + auto relative_json_filename = bfs::path(good_json_filename).filename().generic_string(); + + auto good_abi = chain::abi_def(); + good_abi.version = "test string please ignore"; + + auto bad_json = std::string("{{\"version\":oops\"}"); + auto bad_json_filename = create_temp_file(bad_json); + auto bad_filename = (bfs::temp_directory_path() / bfs::unique_path()).generic_string(); + auto directory_name = bfs::temp_directory_path().generic_string(); + + // good cases + BOOST_TEST( abi_def_from_file(good_json_filename, data_dir) == good_abi ); + BOOST_TEST( abi_def_from_file(relative_json_filename, data_dir) == good_abi ); + + // bad cases + BOOST_REQUIRE_THROW( abi_def_from_file(bad_json_filename, data_dir), chain::json_parse_exception ); + BOOST_REQUIRE_THROW( abi_def_from_file(bad_filename, data_dir), chain::plugin_config_exception ); + BOOST_REQUIRE_THROW( abi_def_from_file(directory_name, data_dir), chain::plugin_config_exception ); + } + +BOOST_AUTO_TEST_SUITE_END() diff --git a/plugins/trace_api_plugin/test/test_data_handlers.cpp b/plugins/trace_api_plugin/test/test_data_handlers.cpp new file mode 100644 index 00000000000..e1f521dc994 --- /dev/null +++ b/plugins/trace_api_plugin/test/test_data_handlers.cpp @@ -0,0 +1,126 @@ +#define BOOST_TEST_MODULE trace_data_handlers +#include + +#include + +#include + +using namespace eosio; +using namespace eosio::trace_api; +using namespace eosio::trace_api::test_common; + +BOOST_AUTO_TEST_SUITE(abi_data_handler_tests) + BOOST_AUTO_TEST_CASE(empty_data) + { + auto action = action_trace_v0 { + 0, "alice"_n, "alice"_n, "foo"_n, {}, {} + }; + abi_data_handler handler; + + auto expected = fc::variant(); + auto actual = handler.process_data(action); + + BOOST_TEST(to_kv(expected) == to_kv(actual), boost::test_tools::per_element()); + } + + BOOST_AUTO_TEST_CASE(no_abi) + { + auto action = action_trace_v0 { + 0, "alice"_n, "alice"_n, "foo"_n, {}, {0x00, 0x01, 0x02, 0x03} + }; + abi_data_handler handler; + + auto expected = fc::variant(); + auto actual = handler.process_data(action); + + BOOST_TEST(to_kv(expected) == to_kv(actual), boost::test_tools::per_element()); + } + + BOOST_AUTO_TEST_CASE(basic_abi) + { + auto action = action_trace_v0 { + 0, "alice"_n, "alice"_n, "foo"_n, {}, {0x00, 0x01, 0x02, 0x03} + }; + + auto abi = chain::abi_def ( {}, + { + { "foo", "", { {"a", "varuint32"}, {"b", "varuint32"}, {"c", "varuint32"}, {"d", "varuint32"} } } + }, + { + { "foo"_n, "foo", ""} + }, + {}, {}, {} + ); + abi.version = "eosio::abi/1."; + + abi_data_handler handler; + handler.add_abi("alice"_n, abi); + + fc::variant expected = fc::mutable_variant_object() + ("a", 0) + ("b", 1) + ("c", 2) + ("d", 3); + + auto actual = handler.process_data(action); + + BOOST_TEST(to_kv(expected) == to_kv(actual), boost::test_tools::per_element()); + } + + BOOST_AUTO_TEST_CASE(basic_abi_wrong_type) + { + auto action = action_trace_v0 { + 0, "alice"_n, "alice"_n, "foo"_n, {}, {0x00, 0x01, 0x02, 0x03} + }; + + auto abi = chain::abi_def ( {}, + { + { "foo", "", { {"a", "varuint32"}, {"b", "varuint32"}, {"c", "varuint32"}, {"d", "varuint32"} } } + }, + { + { "bar"_n, "foo", ""} + }, + {}, {}, {} + ); + abi.version = "eosio::abi/1."; + + abi_data_handler handler; + handler.add_abi("alice"_n, abi); + + auto expected = fc::variant(); + + auto actual = handler.process_data(action); + + BOOST_TEST(to_kv(expected) == to_kv(actual), boost::test_tools::per_element()); + } + + BOOST_AUTO_TEST_CASE(basic_abi_insufficient_data) + { + auto action = action_trace_v0 { + 0, "alice"_n, "alice"_n, "foo"_n, {}, {0x00, 0x01, 0x02} + }; + + auto abi = chain::abi_def ( {}, + { + { "foo", "", { {"a", "varuint32"}, {"b", "varuint32"}, {"c", "varuint32"}, {"d", "varuint32"} } } + }, + { + { "foo"_n, "foo", ""} + }, + {}, {}, {} + ); + abi.version = "eosio::abi/1."; + + bool log_called = false; + abi_data_handler handler([&log_called](const exception_with_context& ){log_called = true;}); + handler.add_abi("alice"_n, abi); + + auto expected = fc::variant(); + + auto actual = handler.process_data(action); + + BOOST_TEST(to_kv(expected) == to_kv(actual), boost::test_tools::per_element()); + BOOST_TEST(log_called); + } + +BOOST_AUTO_TEST_SUITE_END() diff --git a/plugins/trace_api_plugin/test/test_extraction.cpp b/plugins/trace_api_plugin/test/test_extraction.cpp new file mode 100644 index 00000000000..f31c103f756 --- /dev/null +++ b/plugins/trace_api_plugin/test/test_extraction.cpp @@ -0,0 +1,309 @@ +#define BOOST_TEST_MODULE trace_data_extraction +#include + +#include +#include +#include +#include + +#include +#include + +using namespace eosio; +using namespace eosio::trace_api; +using namespace eosio::trace_api::test_common; +using eosio::chain::name; +using eosio::chain::digest_type; + +namespace { + chain::transaction_trace_ptr make_transaction_trace( const chain::transaction_id_type& id, uint32_t block_number, + uint32_t slot, chain::transaction_receipt_header::status_enum status, std::vector&& actions ) { + return std::make_shared(chain::transaction_trace{ + id, + block_number, + chain::block_timestamp_type(slot), + {}, + chain::transaction_receipt_header{status}, + fc::microseconds(0), + 0, + false, + std::move(actions), + {}, + {}, + {}, + {}, + {} + }); + } + + chain::bytes make_onerror_data( const chain::onerror& one ) { + fc::datastream ps; + fc::raw::pack(ps, one); + chain::bytes result(ps.tellp()); + + if( result.size() ) { + fc::datastream ds( result.data(), size_t(result.size()) ); + fc::raw::pack(ds, one); + } + return result; + } + + auto make_transfer_action( chain::name from, chain::name to, chain::asset quantity, std::string memo ) { + return chain::action( std::vector {{from, chain::config::active_name}}, + "eosio.token"_n, "transfer"_n, make_transfer_data( from, to, quantity, std::move(memo) ) ); + } + + auto make_onerror_action( chain::name creator, chain::uint128_t sender_id ) { + return chain::action( std::vector{{creator, chain::config::active_name}}, + chain::onerror{ sender_id, "test ", 4 }); + } + + auto make_packed_trx( std::vector actions ) { + chain::signed_transaction trx; + trx.actions = std::move( actions ); + return packed_transaction( trx ); + } + + chain::action_trace make_action_trace( uint64_t global_sequence, chain::action act, chain::name receiver ) { + chain::action_trace result; + // don't think we need any information other than receiver and global sequence + result.receipt.emplace(chain::action_receipt{ + receiver, + digest_type::hash(act), + global_sequence, + 0, + {}, + 0, + 0 + }); + result.receiver = receiver; + result.act = std::move(act); + return result; + } + +} + +struct extraction_test_fixture { + /** + * MOCK implementation of the logfile input API + */ + struct mock_logfile_provider_type { + mock_logfile_provider_type(extraction_test_fixture& fixture) + :fixture(fixture) + {} + + /** + * append an entry to the data store + * + * @param entry : the entry to append + */ + void append( const block_trace_v0& entry ) { + fixture.data_log.emplace_back(entry); + } + + void append_lib( uint32_t lib ) { + fixture.max_lib = std::max(fixture.max_lib, lib); + } + + extraction_test_fixture& fixture; + }; + + extraction_test_fixture() + : extraction_impl(mock_logfile_provider_type(*this), exception_handler{} ) + { + } + + void signal_applied_transaction( const chain::transaction_trace_ptr& trace, const chain::signed_transaction& strx ) { + extraction_impl.signal_applied_transaction(trace, strx); + } + + void signal_accepted_block( const chain::block_state_ptr& bsp ) { + extraction_impl.signal_accepted_block(bsp); + } + + // fixture data and methods + uint32_t max_lib = 0; + std::vector data_log = {}; + + chain_extraction_impl_type extraction_impl; +}; + + +BOOST_AUTO_TEST_SUITE(block_extraction) + + BOOST_FIXTURE_TEST_CASE(basic_single_transaction_block, extraction_test_fixture) + { + auto act1 = make_transfer_action( "alice"_n, "bob"_n, "0.0001 SYS"_t, "Memo!" ); + auto act2 = make_transfer_action( "alice"_n, "bob"_n, "0.0001 SYS"_t, "Memo!" ); + auto act3 = make_transfer_action( "alice"_n, "bob"_n, "0.0001 SYS"_t, "Memo!" ); + auto actt1 = make_action_trace( 0, act1, "eosio.token"_n ); + auto actt2 = make_action_trace( 1, act2, "alice"_n ); + auto actt3 = make_action_trace( 2, act3, "bob"_n ); + auto ptrx1 = make_packed_trx( { act1, act2, act3 } ); + + // apply a basic transfer + signal_applied_transaction( + make_transaction_trace( ptrx1.id(), 1, 1, chain::transaction_receipt_header::executed, + { actt1, actt2, actt3 } ), + ptrx1.get_signed_transaction() ); + + // accept the block with one transaction + auto bsp1 = make_block_state( chain::block_id_type(), 1, 1, "bp.one"_n, + { chain::packed_transaction(ptrx1) } ); + signal_accepted_block( bsp1 ); + + const uint32_t expected_lib = 0; + const block_trace_v0 expected_trace { bsp1->id, 1, bsp1->prev(), chain::block_timestamp_type(1), "bp.one"_n, + { + { + ptrx1.id(), + { + { + 0, + "eosio.token"_n, "eosio.token"_n, "transfer"_n, + {{ "alice"_n, "active"_n }}, + make_transfer_data( "alice"_n, "bob"_n, "0.0001 SYS"_t, "Memo!" ) + }, + { + 1, + "alice"_n, "eosio.token"_n, "transfer"_n, + {{ "alice"_n, "active"_n }}, + make_transfer_data( "alice"_n, "bob"_n, "0.0001 SYS"_t, "Memo!" ) + }, + { + 2, + "bob"_n, "eosio.token"_n, "transfer"_n, + {{ "alice"_n, "active"_n }}, + make_transfer_data( "alice"_n, "bob"_n, "0.0001 SYS"_t, "Memo!" ) + } + } + } + } + }; + + BOOST_REQUIRE_EQUAL(max_lib, 0); + BOOST_REQUIRE(data_log.size() == 1); + BOOST_REQUIRE(data_log.at(0).contains()); + BOOST_REQUIRE_EQUAL(data_log.at(0).get(), expected_trace); + } + + BOOST_FIXTURE_TEST_CASE(basic_multi_transaction_block, extraction_test_fixture) { + auto act1 = make_transfer_action( "alice"_n, "bob"_n, "0.0001 SYS"_t, "Memo!" ); + auto act2 = make_transfer_action( "bob"_n, "alice"_n, "0.0001 SYS"_t, "Memo!" ); + auto act3 = make_transfer_action( "fred"_n, "bob"_n, "0.0001 SYS"_t, "Memo!" ); + auto actt1 = make_action_trace( 0, act1, "eosio.token"_n ); + auto actt2 = make_action_trace( 1, act2, "bob"_n ); + auto actt3 = make_action_trace( 2, act3, "fred"_n ); + auto ptrx1 = make_packed_trx( { act1 } ); + auto ptrx2 = make_packed_trx( { act2 } ); + auto ptrx3 = make_packed_trx( { act3 } ); + + signal_applied_transaction( + make_transaction_trace( ptrx1.id(), 1, 1, chain::transaction_receipt_header::executed, + { actt1 } ), + ptrx1.get_signed_transaction() ); + signal_applied_transaction( + make_transaction_trace( ptrx2.id(), 1, 1, chain::transaction_receipt_header::executed, + { actt2 } ), + ptrx2.get_signed_transaction() ); + signal_applied_transaction( + make_transaction_trace( ptrx3.id(), 1, 1, chain::transaction_receipt_header::executed, + { actt3 } ), + ptrx3.get_signed_transaction() ); + + // accept the block with three transaction + auto bsp1 = make_block_state( chain::block_id_type(), 1, 1, "bp.one"_n, + { chain::packed_transaction(ptrx1), chain::packed_transaction(ptrx2), chain::packed_transaction(ptrx3) } ); + signal_accepted_block( bsp1 ); + + const uint32_t expected_lib = 0; + const block_trace_v0 expected_trace { bsp1->id, 1, bsp1->prev(), chain::block_timestamp_type(1), "bp.one"_n, + { + { + ptrx1.id(), + { + { + 0, + "eosio.token"_n, "eosio.token"_n, "transfer"_n, + {{ "alice"_n, "active"_n }}, + make_transfer_data( "alice"_n, "bob"_n, "0.0001 SYS"_t, "Memo!" ) + } + } + }, + { + ptrx2.id(), + { + { + 1, + "bob"_n, "eosio.token"_n, "transfer"_n, + {{ "bob"_n, "active"_n }}, + make_transfer_data( "bob"_n, "alice"_n, "0.0001 SYS"_t, "Memo!" ) + } + } + }, + { + ptrx3.id(), + { + { + 2, + "fred"_n, "eosio.token"_n, "transfer"_n, + {{ "fred"_n, "active"_n }}, + make_transfer_data( "fred"_n, "bob"_n, "0.0001 SYS"_t, "Memo!" ) + } + } + } + } + }; + + BOOST_REQUIRE_EQUAL(max_lib, 0); + BOOST_REQUIRE(data_log.size() == 1); + BOOST_REQUIRE(data_log.at(0).contains()); + BOOST_REQUIRE_EQUAL(data_log.at(0).get(), expected_trace); + } + + BOOST_FIXTURE_TEST_CASE(onerror_transaction_block, extraction_test_fixture) + { + auto onerror_act = make_onerror_action( "alice"_n, 1 ); + auto actt1 = make_action_trace( 0, onerror_act, "eosio.token"_n ); + auto ptrx1 = make_packed_trx( { onerror_act } ); + + auto act2 = make_transfer_action( "bob"_n, "alice"_n, "0.0001 SYS"_t, "Memo!" ); + auto actt2 = make_action_trace( 1, act2, "bob"_n ); + auto transfer_trx = make_packed_trx( { act2 } ); + + auto onerror_trace = make_transaction_trace( ptrx1.id(), 1, 1, chain::transaction_receipt_header::executed, + { actt1 } ); + auto transfer_trace = make_transaction_trace( transfer_trx.id(), 1, 1, chain::transaction_receipt_header::soft_fail, + { actt2 } ); + onerror_trace->failed_dtrx_trace = transfer_trace; + + signal_applied_transaction( onerror_trace, transfer_trx.get_signed_transaction() ); + + auto bsp1 = make_block_state( chain::block_id_type(), 1, 1, "bp.one"_n, + { chain::packed_transaction(transfer_trx) } ); + signal_accepted_block( bsp1 ); + + const uint32_t expected_lib = 0; + const block_trace_v0 expected_trace { bsp1->id, 1, bsp1->prev(), chain::block_timestamp_type(1), "bp.one"_n, + { + { + transfer_trx.id(), // transfer_trx.id() because that is the trx id known to the user + { + { + 0, + "eosio.token"_n, "eosio"_n, "onerror"_n, + {{ "alice"_n, "active"_n }}, + make_onerror_data( chain::onerror{ 1, "test ", 4 } ) + } + } + } + } + }; + + BOOST_REQUIRE_EQUAL(max_lib, 0); + BOOST_REQUIRE(data_log.size() == 1); + BOOST_REQUIRE(data_log.at(0).contains()); + BOOST_REQUIRE_EQUAL(data_log.at(0).get(), expected_trace); + } + +BOOST_AUTO_TEST_SUITE_END() diff --git a/plugins/trace_api_plugin/test/test_responses.cpp b/plugins/trace_api_plugin/test/test_responses.cpp new file mode 100644 index 00000000000..7c1e902d6c1 --- /dev/null +++ b/plugins/trace_api_plugin/test/test_responses.cpp @@ -0,0 +1,432 @@ +#define BOOST_TEST_MODULE trace_data_responses +#include + +#include + +#include +#include + +using namespace eosio; +using namespace eosio::trace_api; +using namespace eosio::trace_api::test_common; + +struct response_test_fixture { + /** + * MOCK implementation of the logfile input API + */ + struct mock_logfile_provider { + mock_logfile_provider(response_test_fixture& fixture) + :fixture(fixture) + {} + + /** + * Read the trace for a given block + * @param block_height : the height of the data being read + * @return empty optional if the data cannot be read OTHERWISE + * optional containing a 2-tuple of the block_trace and a flag indicating irreversibility + * @throws bad_data_exception : if the data is corrupt in some way + */ + get_block_t get_block(uint32_t height, const yield_function& yield= {}) { + return fixture.mock_get_block(height, yield); + } + response_test_fixture& fixture; + }; + + constexpr static auto default_mock_data_handler = [](const action_trace_v0& a, const yield_function&) -> fc::variant { + return fc::mutable_variant_object()("hex" , fc::to_hex(a.data.data(), a.data.size())); + }; + + + struct mock_data_handler_provider { + mock_data_handler_provider(response_test_fixture& fixture) + :fixture(fixture) + {} + + fc::variant process_data(const action_trace_v0& action, const yield_function& yield) { + return fixture.mock_data_handler(action, yield); + } + + response_test_fixture& fixture; + }; + + using response_impl_type = request_handler; + /** + * TODO: initialize extraction implementation here with `mock_logfile_provider` as template param + */ + response_test_fixture() + : response_impl(mock_logfile_provider(*this), mock_data_handler_provider(*this)) + { + + } + + fc::variant get_block_trace( uint32_t block_height, const yield_function& yield = {} ) { + return response_impl.get_block_trace( block_height, yield ); + } + + // fixture data and methods + std::function mock_get_block; + std::function mock_data_handler = default_mock_data_handler; + + response_impl_type response_impl; + +}; + +BOOST_AUTO_TEST_SUITE(trace_responses) + BOOST_FIXTURE_TEST_CASE(basic_empty_block_response, response_test_fixture) + { + auto block_trace = block_trace_v0 { + "b000000000000000000000000000000000000000000000000000000000000001"_h, + 1, + "0000000000000000000000000000000000000000000000000000000000000000"_h, + chain::block_timestamp_type(0), + "bp.one"_n, + {} + }; + + fc::variant expected_response = fc::mutable_variant_object() + ("id", "b000000000000000000000000000000000000000000000000000000000000001") + ("number", 1) + ("previous_id", "0000000000000000000000000000000000000000000000000000000000000000") + ("status", "pending") + ("timestamp", "2000-01-01T00:00:00.000Z") + ("producer", "bp.one") + ("transactions", fc::variants() ) + ; + + mock_get_block = [&block_trace]( uint32_t height, const yield_function& ) -> get_block_t { + BOOST_TEST(height == 1); + return std::make_tuple(block_trace, false); + }; + + fc::variant actual_response = get_block_trace( 1 ); + + BOOST_TEST(to_kv(expected_response) == to_kv(actual_response), boost::test_tools::per_element()); + } + + BOOST_FIXTURE_TEST_CASE(basic_block_response, response_test_fixture) + { + auto block_trace = block_trace_v0 { + "b000000000000000000000000000000000000000000000000000000000000001"_h, + 1, + "0000000000000000000000000000000000000000000000000000000000000000"_h, + chain::block_timestamp_type(0), + "bp.one"_n, + { + { + "0000000000000000000000000000000000000000000000000000000000000001"_h, + { + { + 0, + "receiver"_n, "contract"_n, "action"_n, + {{ "alice"_n, "active"_n }}, + { 0x00, 0x01, 0x02, 0x03 } + } + } + } + } + }; + + fc::variant expected_response = fc::mutable_variant_object() + ("id", "b000000000000000000000000000000000000000000000000000000000000001") + ("number", 1) + ("previous_id", "0000000000000000000000000000000000000000000000000000000000000000") + ("status", "pending") + ("timestamp", "2000-01-01T00:00:00.000Z") + ("producer", "bp.one") + ("transactions", fc::variants({ + fc::mutable_variant_object() + ("id", "0000000000000000000000000000000000000000000000000000000000000001") + ("actions", fc::variants({ + fc::mutable_variant_object() + ("receiver", "receiver") + ("account", "contract") + ("action", "action") + ("authorization", fc::variants({ + fc::mutable_variant_object() + ("account", "alice") + ("permission", "active") + })) + ("data", "00010203") + ("params", fc::mutable_variant_object() + ("hex", "00010203")) + })) + })) + ; + + mock_get_block = [&block_trace]( uint32_t height, const yield_function& ) -> get_block_t { + BOOST_TEST(height == 1); + return std::make_tuple(block_trace, false); + }; + + fc::variant actual_response = get_block_trace( 1 ); + + BOOST_TEST(to_kv(expected_response) == to_kv(actual_response), boost::test_tools::per_element()); + } + + BOOST_FIXTURE_TEST_CASE(basic_block_response_no_params, response_test_fixture) + { + auto block_trace = block_trace_v0 { + "b000000000000000000000000000000000000000000000000000000000000001"_h, + 1, + "0000000000000000000000000000000000000000000000000000000000000000"_h, + chain::block_timestamp_type(0), + "bp.one"_n, + { + { + "0000000000000000000000000000000000000000000000000000000000000001"_h, + { + { + 0, + "receiver"_n, "contract"_n, "action"_n, + {{ "alice"_n, "active"_n }}, + { 0x00, 0x01, 0x02, 0x03 } + } + } + } + } + }; + + fc::variant expected_response = fc::mutable_variant_object() + ("id", "b000000000000000000000000000000000000000000000000000000000000001") + ("number", 1) + ("previous_id", "0000000000000000000000000000000000000000000000000000000000000000") + ("status", "pending") + ("timestamp", "2000-01-01T00:00:00.000Z") + ("producer", "bp.one") + ("transactions", fc::variants({ + fc::mutable_variant_object() + ("id", "0000000000000000000000000000000000000000000000000000000000000001") + ("actions", fc::variants({ + fc::mutable_variant_object() + ("receiver", "receiver") + ("account", "contract") + ("action", "action") + ("authorization", fc::variants({ + fc::mutable_variant_object() + ("account", "alice") + ("permission", "active") + })) + ("data", "00010203") + })) + })) + ; + + mock_get_block = [&block_trace]( uint32_t height, const yield_function& ) -> get_block_t { + BOOST_TEST(height == 1); + return std::make_tuple(block_trace, false); + }; + + // simulate an inability to parse the parameters + mock_data_handler = [](const action_trace_v0&, const yield_function&) -> fc::variant { + return {}; + }; + + fc::variant actual_response = get_block_trace( 1 ); + + BOOST_TEST(to_kv(expected_response) == to_kv(actual_response), boost::test_tools::per_element()); + } + + BOOST_FIXTURE_TEST_CASE(basic_block_response_unsorted, response_test_fixture) + { + auto block_trace = block_trace_v0 { + "b000000000000000000000000000000000000000000000000000000000000001"_h, + 1, + "0000000000000000000000000000000000000000000000000000000000000000"_h, + chain::block_timestamp_type(0), + "bp.one"_n, + { + { + "0000000000000000000000000000000000000000000000000000000000000001"_h, + { + { + 1, + "receiver"_n, "contract"_n, "action"_n, + {{ "alice"_n, "active"_n }}, + { 0x01, 0x01, 0x01, 0x01 } + }, + { + 0, + "receiver"_n, "contract"_n, "action"_n, + {{ "alice"_n, "active"_n }}, + { 0x00, 0x00, 0x00, 0x00 } + }, + { + 2, + "receiver"_n, "contract"_n, "action"_n, + {{ "alice"_n, "active"_n }}, + { 0x02, 0x02, 0x02, 0x02 } + } + } + } + } + }; + + fc::variant expected_response = fc::mutable_variant_object() + ("id", "b000000000000000000000000000000000000000000000000000000000000001") + ("number", 1) + ("previous_id", "0000000000000000000000000000000000000000000000000000000000000000") + ("status", "pending") + ("timestamp", "2000-01-01T00:00:00.000Z") + ("producer", "bp.one") + ("transactions", fc::variants({ + fc::mutable_variant_object() + ("id", "0000000000000000000000000000000000000000000000000000000000000001") + ("actions", fc::variants({ + fc::mutable_variant_object() + ("receiver", "receiver") + ("account", "contract") + ("action", "action") + ("authorization", fc::variants({ + fc::mutable_variant_object() + ("account", "alice") + ("permission", "active") + })) + ("data", "00000000") + , + fc::mutable_variant_object() + ("receiver", "receiver") + ("account", "contract") + ("action", "action") + ("authorization", fc::variants({ + fc::mutable_variant_object() + ("account", "alice") + ("permission", "active") + })) + ("data", "01010101") + , + fc::mutable_variant_object() + ("receiver", "receiver") + ("account", "contract") + ("action", "action") + ("authorization", fc::variants({ + fc::mutable_variant_object() + ("account", "alice") + ("permission", "active") + })) + ("data", "02020202") + })) + })) + ; + + mock_get_block = [&block_trace]( uint32_t height, const yield_function& ) -> get_block_t { + BOOST_TEST(height == 1); + return std::make_tuple(block_trace, false); + }; + + // simulate an inability to parse the parameters + mock_data_handler = [](const action_trace_v0&, const yield_function&) -> fc::variant { + return {}; + }; + + fc::variant actual_response = get_block_trace( 1 ); + + BOOST_TEST(to_kv(expected_response) == to_kv(actual_response), boost::test_tools::per_element()); + } + + BOOST_FIXTURE_TEST_CASE(lib_response, response_test_fixture) + { + auto block_trace = block_trace_v0 { + "b000000000000000000000000000000000000000000000000000000000000001"_h, + 1, + "0000000000000000000000000000000000000000000000000000000000000000"_h, + chain::block_timestamp_type(0), + "bp.one"_n, + {} + }; + + fc::variant expected_response = fc::mutable_variant_object() + ("id", "b000000000000000000000000000000000000000000000000000000000000001") + ("number", 1) + ("previous_id", "0000000000000000000000000000000000000000000000000000000000000000") + ("status", "irreversible") + ("timestamp", "2000-01-01T00:00:00.000Z") + ("producer", "bp.one") + ("transactions", fc::variants() ) + ; + + mock_get_block = [&block_trace]( uint32_t height, const yield_function& ) -> get_block_t { + BOOST_TEST(height == 1); + return std::make_tuple(block_trace, true); + }; + + fc::variant response = get_block_trace( 1 ); + BOOST_TEST(to_kv(expected_response) == to_kv(response), boost::test_tools::per_element()); + + } + + BOOST_FIXTURE_TEST_CASE(corrupt_block_data, response_test_fixture) + { + mock_get_block = []( uint32_t height, const yield_function& ) -> get_block_t { + BOOST_TEST(height == 1); + throw bad_data_exception("mock exception"); + }; + + BOOST_REQUIRE_THROW(get_block_trace( 1 ), bad_data_exception); + } + + BOOST_FIXTURE_TEST_CASE(missing_block_data, response_test_fixture) + { + mock_get_block = []( uint32_t height, const yield_function& ) -> get_block_t { + BOOST_TEST(height == 1); + return {}; + }; + + fc::variant null_response = get_block_trace( 1 ); + + BOOST_TEST(null_response.is_null()); + } + + BOOST_FIXTURE_TEST_CASE(yield_throws, response_test_fixture) + { + auto block_trace = block_trace_v0 { + "b000000000000000000000000000000000000000000000000000000000000001"_h, + 1, + "0000000000000000000000000000000000000000000000000000000000000000"_h, + chain::block_timestamp_type(0), + "bp.one"_n, + { + { + "0000000000000000000000000000000000000000000000000000000000000001"_h, + { + { + 0, + "receiver"_n, "contract"_n, "action"_n, + {{ "alice"_n, "active"_n }}, + { 0x00, 0x01, 0x02, 0x03 } + } + } + } + } + }; + + mock_get_block = [&block_trace]( uint32_t height, const yield_function& ) -> get_block_t { + BOOST_TEST(height == 1); + return std::make_tuple(block_trace, false); + }; + + int countdown = 3; + yield_function yield = [&]() { + if (countdown-- == 0) { + throw yield_exception("mock"); + } + }; + + BOOST_REQUIRE_THROW(get_block_trace( 1, yield ), yield_exception); + } + + BOOST_FIXTURE_TEST_CASE(yield_throws_from_get_block, response_test_fixture) + { + // no other yield calls will throw + yield_function yield = [&]() { + }; + + // simulate a yield throw inside get block + mock_get_block = []( uint32_t height, const yield_function& yield) -> get_block_t { + throw yield_exception("mock exception"); + }; + + + BOOST_REQUIRE_THROW(get_block_trace( 1, yield ), yield_exception); + } + +BOOST_AUTO_TEST_SUITE_END() diff --git a/plugins/trace_api_plugin/test/test_trace_file.cpp b/plugins/trace_api_plugin/test/test_trace_file.cpp new file mode 100644 index 00000000000..81f70c1ce6f --- /dev/null +++ b/plugins/trace_api_plugin/test/test_trace_file.cpp @@ -0,0 +1,658 @@ +#define BOOST_TEST_MODULE trace_trace_file +#include +#include +#include +#include +#include + +using namespace eosio; +using namespace eosio::trace_api; +using namespace eosio::trace_api::test_common; +namespace bfs = boost::filesystem; +using open_state = slice_directory::open_state; + +namespace { + struct test_fixture { + + const block_trace_v0 bt { + "0000000000000000000000000000000000000000000000000000000000000001"_h, + 1, + "0000000000000000000000000000000000000000000000000000000000000003"_h, + chain::block_timestamp_type(1), + "bp.one"_n, + { + { + "0000000000000000000000000000000000000000000000000000000000000001"_h, + { + { + 0, + "eosio.token"_n, "eosio.token"_n, "transfer"_n, + {{ "alice"_n, "active"_n }}, + make_transfer_data( "alice"_n, "bob"_n, "0.0001 SYS"_t, "Memo!" ) + }, + { + 1, + "alice"_n, "eosio.token"_n, "transfer"_n, + {{ "alice"_n, "active"_n }}, + make_transfer_data( "alice"_n, "bob"_n, "0.0001 SYS"_t, "Memo!" ) + }, + { + 2, + "bob"_n, "eosio.token"_n, "transfer"_n, + {{ "alice"_n, "active"_n }}, + make_transfer_data( "alice"_n, "bob"_n, "0.0001 SYS"_t, "Memo!" ) + } + } + } + } + }; + + const block_trace_v0 bt2 { + "0000000000000000000000000000000000000000000000000000000000000002"_h, + 5, + "0000000000000000000000000000000000000000000000000000000000000005"_h, + chain::block_timestamp_type(2), + "bp.two"_n, + { + { + "f000000000000000000000000000000000000000000000000000000000000004"_h, + {} + } + } + }; + + const metadata_log_entry be1 { block_entry_v0 { + "b000000000000000000000000000000000000000000000000000000000000001"_h, 5, 0 + } }; + const metadata_log_entry le1 { lib_entry_v0 { 4 } }; + const metadata_log_entry be2 { block_entry_v0 { + "b000000000000000000000000000000000000000000000000000000000000002"_h, 7, 0 + } }; + const metadata_log_entry le2 { lib_entry_v0 { 5 } }; + }; + + struct test_store_provider : public store_provider { + test_store_provider(const bfs::path& slice_dir, uint32_t width, std::optional minimum_irreversible_history_blocks = std::optional()) + : store_provider(slice_dir, width, minimum_irreversible_history_blocks) { + } + using store_provider::scan_metadata_log_from; + using store_provider::read_data_log; + }; + + class vslice_datastream; + + struct vslice { + enum mode { read_mode, write_mode}; + vslice(mode m = write_mode) : _mode(m) {} + long tellp() const { + return _pos; + } + + void seek( long loc ) { + if (_mode == read_mode) { + if (loc > _buffer.size()) { + throw std::ios_base::failure( "read vslice unable to seek to: " + std::to_string(loc) + ", end is at: " + std::to_string(_buffer.size())); + } + } + _pos = loc; + } + + void seek_end( long loc ) { + _pos = _buffer.size(); + } + + void read( char* d, size_t n ) { + if( _pos + n > _buffer.size() ) { + throw std::ios_base::failure( "vslice unable to read " + std::to_string( n ) + " bytes; only can read " + std::to_string( _buffer.size() - _pos ) ); + } + std::memcpy( d, _buffer.data() + _pos, n); + _pos += n; + } + + void write( const char* d, size_t n ) { + if (_mode == read_mode) { + throw std::ios_base::failure( "read vslice should not have write called" ); + } + if (_buffer.size() < _pos + n) { + _buffer.resize(_pos + n); + } + std::memcpy( _buffer.data() + _pos, d, n); + _pos += n; + } + + void flush() { + _flush = true; + } + + void sync() { + _sync = true; + } + + vslice_datastream create_datastream(); + + std::vector _buffer; + mode _mode = write_mode; + long _pos = 0l; + bool _flush = false; + bool _sync = false; + }; + + class vslice_datastream { + public: + explicit vslice_datastream( vslice& vs ) : _vs(vs) {} + + void skip( size_t s ) { + std::vector d( s ); + read( &d[0], s ); + } + + bool read( char* d, size_t s ) { + _vs.read( d, s ); + return true; + } + + bool get( unsigned char& c ) { return get( *(char*)&c ); } + + bool get( char& c ) { return read(&c, 1); } + + private: + vslice& _vs; + }; + + inline vslice_datastream vslice::create_datastream() { + return vslice_datastream(*this); + } +} + +BOOST_AUTO_TEST_SUITE(slice_tests) + BOOST_FIXTURE_TEST_CASE(write_data_trace, test_fixture) + { + vslice vs; + const auto offset = append_store( bt, vs ); + BOOST_REQUIRE_EQUAL(offset,0); + + const auto offset2 = append_store( bt2, vs ); + BOOST_REQUIRE(offset < offset2); + + vs._pos = offset; + const auto bt_returned = extract_store( vs ); + BOOST_REQUIRE(bt_returned == bt); + + vs._pos = offset2; + const auto bt_returned2 = extract_store( vs ); + BOOST_REQUIRE(bt_returned2 == bt2); + } + + BOOST_FIXTURE_TEST_CASE(write_metadata_trace, test_fixture) + { + vslice vs; + const auto offset = append_store( be1, vs ); + auto next_offset = vs._pos; + BOOST_REQUIRE(offset < next_offset); + const auto offset2 = append_store( le1, vs ); + BOOST_REQUIRE(next_offset <= offset2); + BOOST_REQUIRE(offset2 < vs._pos); + next_offset = vs._pos; + const auto offset3 = append_store( be2, vs ); + BOOST_REQUIRE(next_offset <= offset3); + BOOST_REQUIRE(offset3 < vs._pos); + next_offset = vs._pos; + const auto offset4 = append_store( le2, vs ); + BOOST_REQUIRE(next_offset <= offset4); + BOOST_REQUIRE(offset4 < vs._pos); + + vs._pos = offset; + const auto be_returned1 = extract_store( vs ); + BOOST_REQUIRE(be_returned1.contains()); + const auto real_be_returned1 = be_returned1.get(); + const auto real_be1 = be1.get(); + BOOST_REQUIRE(real_be_returned1 == real_be1); + + vs._pos = offset2; + const auto le_returned1 = extract_store( vs ); + BOOST_REQUIRE(le_returned1.contains()); + const auto real_le_returned1 = le_returned1.get(); + const auto real_le1 = le1.get(); + BOOST_REQUIRE(real_le_returned1 == real_le1); + + vs._pos = offset3; + const auto be_returned2 = extract_store( vs ); + BOOST_REQUIRE(be_returned2.contains()); + const auto real_be_returned2 = be_returned2.get(); + const auto real_be2 = be2.get(); + BOOST_REQUIRE(real_be_returned2 == real_be2); + + vs._pos = offset4; + const auto le_returned2 = extract_store( vs ); + BOOST_REQUIRE(le_returned2.contains()); + const auto real_le_returned2 = le_returned2.get(); + const auto real_le2 = le2.get(); + BOOST_REQUIRE(real_le_returned2 == real_le2); + } + + BOOST_FIXTURE_TEST_CASE(slice_number, test_fixture) + { + fc::temp_directory tempdir; + slice_directory sd(tempdir.path(), 100, std::optional()); + BOOST_REQUIRE_EQUAL(sd.slice_number(99), 0); + BOOST_REQUIRE_EQUAL(sd.slice_number(100), 1); + BOOST_REQUIRE_EQUAL(sd.slice_number(1599), 15); + slice_directory sd2(tempdir.path(), 0x10, std::optional()); + BOOST_REQUIRE_EQUAL(sd2.slice_number(0xf), 0); + BOOST_REQUIRE_EQUAL(sd2.slice_number(0x100), 0x10); + BOOST_REQUIRE_EQUAL(sd2.slice_number(0x233), 0x23); + } + + BOOST_FIXTURE_TEST_CASE(slice_file, test_fixture) + { + fc::temp_directory tempdir; + slice_directory sd(tempdir.path(), 100, std::optional()); + fc::cfile slice; + + // create trace slices + for (uint i = 0; i < 9; ++i) { + bool found = sd.find_or_create_trace_slice(i, open_state::write, slice); + BOOST_REQUIRE(!found); + bfs::path fp = slice.get_file_path(); + BOOST_REQUIRE_EQUAL(fp.parent_path().generic_string(), tempdir.path().generic_string()); + const std::string expected_filename = "trace_0000000" + std::to_string(i) + "00-0000000" + std::to_string(i+1) + "00.log"; + BOOST_REQUIRE_EQUAL(fp.filename().generic_string(), expected_filename); + BOOST_REQUIRE(slice.is_open()); + BOOST_REQUIRE_EQUAL(bfs::file_size(fp), 0); + BOOST_REQUIRE_EQUAL(slice.tellp(), 0); + slice.close(); + } + + // create trace index slices + for (uint i = 0; i < 9; ++i) { + bool found = sd.find_or_create_index_slice(i, open_state::write, slice); + BOOST_REQUIRE(!found); + fc::path fp = slice.get_file_path(); + BOOST_REQUIRE_EQUAL(fp.parent_path().generic_string(), tempdir.path().generic_string()); + const std::string expected_filename = "trace_index_0000000" + std::to_string(i) + "00-0000000" + std::to_string(i+1) + "00.log"; + BOOST_REQUIRE_EQUAL(fp.filename().generic_string(), expected_filename); + BOOST_REQUIRE(slice.is_open()); + slice_directory::index_header h; + const auto data = fc::raw::pack(h); + BOOST_REQUIRE_EQUAL(bfs::file_size(fp), data.size()); + BOOST_REQUIRE_EQUAL(slice.tellp(), data.size()); + slice.close(); + } + + // reopen trace slice for append + bool found = sd.find_or_create_trace_slice(0, open_state::write, slice); + BOOST_REQUIRE(found); + fc::path fp = slice.get_file_path(); + BOOST_REQUIRE_EQUAL(fp.parent_path().generic_string(), tempdir.path().generic_string()); + std::string expected_filename = "trace_0000000000-0000000100.log"; + BOOST_REQUIRE_EQUAL(fp.filename().generic_string(), expected_filename); + BOOST_REQUIRE(slice.is_open()); + BOOST_REQUIRE_EQUAL(bfs::file_size(fp), 0); + BOOST_REQUIRE_EQUAL(slice.tellp(), 0); + uint64_t offset = append_store(bt, slice); + BOOST_REQUIRE_EQUAL(offset, 0); + auto data = fc::raw::pack(bt); + BOOST_REQUIRE(slice.tellp() > 0); + BOOST_REQUIRE_EQUAL(data.size(), slice.tellp()); + BOOST_REQUIRE_EQUAL(bfs::file_size(fp), slice.tellp()); + uint64_t trace_file_size = bfs::file_size(fp); + slice.close(); + + // open same file for read + found = sd.find_or_create_trace_slice(0, open_state::read, slice); + BOOST_REQUIRE(found); + fp = slice.get_file_path(); + BOOST_REQUIRE_EQUAL(fp.filename().generic_string(), expected_filename); + BOOST_REQUIRE(slice.is_open()); + BOOST_REQUIRE_EQUAL(bfs::file_size(fp), trace_file_size); + BOOST_REQUIRE_EQUAL(slice.tellp(), 0); + slice.close(); + + // open same file for append again + found = sd.find_or_create_trace_slice(0, open_state::write, slice); + BOOST_REQUIRE(found); + fp = slice.get_file_path(); + BOOST_REQUIRE_EQUAL(fp.filename().generic_string(), expected_filename); + BOOST_REQUIRE(slice.is_open()); + BOOST_REQUIRE_EQUAL(bfs::file_size(fp), trace_file_size); + BOOST_REQUIRE_EQUAL(slice.tellp(), trace_file_size); + slice.close(); + + // reopen trace index slice for append + found = sd.find_or_create_index_slice(1, open_state::write, slice); + BOOST_REQUIRE(found); + fp = slice.get_file_path(); + BOOST_REQUIRE_EQUAL(fp.parent_path().generic_string(), tempdir.path().generic_string()); + expected_filename = "trace_index_0000000100-0000000200.log"; + BOOST_REQUIRE_EQUAL(fp.filename().generic_string(), expected_filename); + BOOST_REQUIRE(slice.is_open()); + slice_directory::index_header h; + data = fc::raw::pack(h); + const uint64_t header_size = data.size(); + BOOST_REQUIRE_EQUAL(bfs::file_size(fp), header_size); + BOOST_REQUIRE_EQUAL(slice.tellp(), header_size); + offset = append_store(be1, slice); + BOOST_REQUIRE_EQUAL(offset, header_size); + data = fc::raw::pack(be1); + const auto be1_size = data.size(); + BOOST_REQUIRE_EQUAL(header_size + be1_size, slice.tellp()); + BOOST_REQUIRE_EQUAL(bfs::file_size(fp), slice.tellp()); + uint64_t index_file_size = bfs::file_size(fp); + slice.close(); + + found = sd.find_or_create_index_slice(1, open_state::read, slice); + BOOST_REQUIRE(found); + fp = slice.get_file_path(); + BOOST_REQUIRE_EQUAL(fp.filename().generic_string(), expected_filename); + BOOST_REQUIRE(slice.is_open()); + BOOST_REQUIRE_EQUAL(bfs::file_size(fp), header_size + be1_size); + BOOST_REQUIRE_EQUAL(slice.tellp(), header_size); + slice.close(); + + found = sd.find_or_create_index_slice(1, open_state::write, slice); + BOOST_REQUIRE(found); + fp = slice.get_file_path(); + BOOST_REQUIRE_EQUAL(fp.filename().generic_string(), expected_filename); + BOOST_REQUIRE(slice.is_open()); + BOOST_REQUIRE_EQUAL(bfs::file_size(fp), header_size + be1_size); + BOOST_REQUIRE_EQUAL(slice.tellp(), header_size + be1_size); + offset = append_store(le1, slice); + BOOST_REQUIRE_EQUAL(offset, header_size + be1_size); + data = fc::raw::pack(le1); + const auto le1_size = data.size(); + BOOST_REQUIRE_EQUAL(header_size + be1_size + le1_size, slice.tellp()); + BOOST_REQUIRE_EQUAL(bfs::file_size(fp), slice.tellp()); + slice.close(); + + found = sd.find_or_create_index_slice(1, open_state::read, slice); + BOOST_REQUIRE(found); + fp = slice.get_file_path(); + BOOST_REQUIRE_EQUAL(fp.filename().generic_string(), expected_filename); + BOOST_REQUIRE(slice.is_open()); + BOOST_REQUIRE_EQUAL(bfs::file_size(fp), header_size + be1_size + le1_size); + BOOST_REQUIRE_EQUAL(slice.tellp(), header_size); + slice.close(); + } + + BOOST_FIXTURE_TEST_CASE(slice_file_find_test, test_fixture) + { + fc::temp_directory tempdir; + slice_directory sd(tempdir.path(), 100, std::optional()); + fc::cfile slice; + + // create trace slice + bool found = sd.find_or_create_trace_slice(1, open_state::write, slice); + BOOST_REQUIRE(!found); + bfs::path fp = slice.get_file_path(); + BOOST_REQUIRE_EQUAL(fp.parent_path().generic_string(), tempdir.path().generic_string()); + const std::string expected_filename = "trace_0000000100-0000000200.log"; + BOOST_REQUIRE_EQUAL(fp.filename().generic_string(), expected_filename); + BOOST_REQUIRE(slice.is_open()); + BOOST_REQUIRE_EQUAL(bfs::file_size(fp), 0); + BOOST_REQUIRE_EQUAL(slice.tellp(), 0); + slice.close(); + + // find trace slice (and open) + found = sd.find_trace_slice(1, open_state::write, slice); + BOOST_REQUIRE(found); + fp = slice.get_file_path(); + BOOST_REQUIRE_EQUAL(fp.parent_path().generic_string(), tempdir.path().generic_string()); + BOOST_REQUIRE_EQUAL(fp.filename().generic_string(), expected_filename); + BOOST_REQUIRE(slice.is_open()); + BOOST_REQUIRE_EQUAL(bfs::file_size(fp), 0); + slice.close(); + + // find trace slice (and don't open) + found = sd.find_trace_slice(1, open_state::write, slice, false); + BOOST_REQUIRE(found); + fp = slice.get_file_path(); + BOOST_REQUIRE_EQUAL(fp.parent_path().generic_string(), tempdir.path().generic_string()); + BOOST_REQUIRE_EQUAL(fp.filename().generic_string(), expected_filename); + BOOST_REQUIRE(!slice.is_open()); + BOOST_REQUIRE_EQUAL(bfs::file_size(fp), 0); + slice.close(); + } + + void verify_directory_contents(const bfs::path& tempdir, std::set expected_files) { + std::set unexpected_files; + for (bfs::directory_iterator itr(tempdir); itr != directory_iterator(); ++itr) { + const auto filename = itr->path().filename(); + if (expected_files.erase(filename) < 1) { + unexpected_files.insert(filename); + } + } + if (expected_files.size() + unexpected_files.size() == 0) + return; + + std::string msg; + if (expected_files.size()) { + msg += " Expected the following files to be present, but were not:"; + } + bool comma = false; + for(auto file : expected_files) { + if (comma) + msg += ","; + msg += " " + file.generic_string(); + } + if (unexpected_files.size()) { + msg += " Did not expect the following files to be present, but they were:"; + } + for(auto file : expected_files) { + if (comma) + msg += ","; + msg += " " + file.generic_string(); + } + BOOST_FAIL(msg); + } + + BOOST_FIXTURE_TEST_CASE(slice_dir_cleanup_height_less_than_width, test_fixture) + { + fc::temp_directory tempdir; + const uint32_t width = 10; + const uint32_t min_saved_blocks = 5; + slice_directory sd(tempdir.path(), width, std::optional(min_saved_blocks)); + fc::cfile file; + + // verify it cleans up when there is just an index file, just a trace file, or when both are there + // verify it cleans up all slices that need to be cleaned + std::set files; + BOOST_REQUIRE(!sd.find_or_create_index_slice(0, open_state::read, file)); + files.insert(file.get_file_path().filename()); + verify_directory_contents(tempdir.path(), files); + BOOST_REQUIRE(!sd.find_or_create_trace_slice(0, open_state::read, file)); + files.insert(file.get_file_path().filename()); + BOOST_REQUIRE(!sd.find_or_create_index_slice(1, open_state::read, file)); + files.insert(file.get_file_path().filename()); + BOOST_REQUIRE(!sd.find_or_create_trace_slice(2, open_state::read, file)); + files.insert(file.get_file_path().filename()); + BOOST_REQUIRE(!sd.find_or_create_index_slice(3, open_state::read, file)); + files.insert(file.get_file_path().filename()); + BOOST_REQUIRE(!sd.find_or_create_index_slice(4, open_state::read, file)); + const auto index4 = file.get_file_path().filename(); + files.insert(index4); + BOOST_REQUIRE(!sd.find_or_create_trace_slice(4, open_state::read, file)); + const auto trace4 = file.get_file_path().filename(); + files.insert(trace4); + BOOST_REQUIRE(!sd.find_or_create_index_slice(5, open_state::read, file)); + const auto index5 = file.get_file_path().filename(); + files.insert(index5); + BOOST_REQUIRE(!sd.find_or_create_trace_slice(6, open_state::read, file)); + const auto trace6 = file.get_file_path().filename(); + files.insert(trace6); + verify_directory_contents(tempdir.path(), files); + + // verify that the current_slice and the previous are maintained as long as lib - min_saved_blocks is part of previous slice + uint32_t current_slice = 6; + uint32_t lib = current_slice * width; + sd.cleanup_old_slices(lib); + std::set files2; + files2.insert(index5); + files2.insert(trace6); + verify_directory_contents(tempdir.path(), files2); + + // saved blocks still in previous slice + lib += min_saved_blocks - 1; // current_slice * width + min_saved_blocks - 1 + sd.cleanup_old_slices(lib); + verify_directory_contents(tempdir.path(), files2); + + // now all saved blocks in current slice + lib += 1; // current_slice * width + min_saved_blocks + sd.cleanup_old_slices(lib); + std::set files3; + files3.insert(trace6); + verify_directory_contents(tempdir.path(), files3); + + // moving lib into next slice, so 1 saved blocks still in 6th slice + lib += width - 1; + sd.cleanup_old_slices(lib); + verify_directory_contents(tempdir.path(), files3); + + // moved last saved block out of 6th slice, so 6th slice is cleaned up + lib += 1; + sd.cleanup_old_slices(lib); + verify_directory_contents(tempdir.path(), std::set()); + } + + BOOST_FIXTURE_TEST_CASE(store_provider_write_read, test_fixture) + { + fc::temp_directory tempdir; + test_store_provider sp(tempdir.path(), 100); + sp.append(bt); + sp.append_lib(54); + sp.append(bt2); + const uint32_t bt_bn = bt.number; + bool found_block = false; + bool lib_seen = false; + const uint64_t first_offset = sp.scan_metadata_log_from(9, 0, [&](const metadata_log_entry& e) -> bool { + if (e.contains()) { + const auto& block = e.get(); + if (block.number == bt_bn) { + BOOST_REQUIRE(!found_block); + found_block = true; + } + } else if (e.contains()) { + auto best_lib = e.get(); + BOOST_REQUIRE(!lib_seen); + BOOST_REQUIRE_EQUAL(best_lib.lib, 54); + lib_seen = true; + return false; + } + return true; + }, []() {}); + BOOST_REQUIRE(found_block); + BOOST_REQUIRE(lib_seen); + + std::vector block_nums; + std::vector block_offsets; + lib_seen = false; + uint64_t offset = sp.scan_metadata_log_from(9, 0, [&](const metadata_log_entry& e) -> bool { + if (e.contains()) { + const auto& block = e.get(); + block_nums.push_back(block.number); + block_offsets.push_back(block.offset); + } else if (e.contains()) { + auto best_lib = e.get(); + BOOST_REQUIRE(!lib_seen); + BOOST_REQUIRE_EQUAL(best_lib.lib, 54); + lib_seen = true; + } + return true; + }, []() {}); + BOOST_REQUIRE(lib_seen); + BOOST_REQUIRE_EQUAL(block_nums.size(), 2); + BOOST_REQUIRE_EQUAL(block_nums[0], bt.number); + BOOST_REQUIRE_EQUAL(block_nums[1], bt2.number); + BOOST_REQUIRE_EQUAL(block_offsets.size(), 2); + BOOST_REQUIRE(block_offsets[0] < block_offsets[1]); + BOOST_REQUIRE(first_offset < offset); + + std::optional bt_data = sp.read_data_log(block_nums[0], block_offsets[0]); + BOOST_REQUIRE(bt_data); + BOOST_REQUIRE_EQUAL(*bt_data, bt); + + bt_data = sp.read_data_log(block_nums[1], block_offsets[1]); + BOOST_REQUIRE(bt_data); + BOOST_REQUIRE_EQUAL(*bt_data, bt2); + + block_nums.clear(); + block_offsets.clear(); + lib_seen = false; + int counter = 0; + try { + offset = sp.scan_metadata_log_from(9, 0, [&](const metadata_log_entry& e) -> bool { + if (e.contains()) { + const auto& block = e.get(); + block_nums.push_back(block.number); + block_offsets.push_back(block.offset); + } else if (e.contains()) { + auto best_lib = e.get(); + BOOST_REQUIRE(!lib_seen); + BOOST_REQUIRE_EQUAL(best_lib.lib, 54); + lib_seen = true; + } + return true; + }, [&counter]() { + if( ++counter == 3 ) { + throw yield_exception(""); + } + }); + BOOST_FAIL("Should not have completed scan"); + } catch (const yield_exception& ex) { + } + BOOST_REQUIRE(lib_seen); + BOOST_REQUIRE_EQUAL(block_nums.size(), 1); + BOOST_REQUIRE_EQUAL(block_nums[0], bt.number); + BOOST_REQUIRE_EQUAL(block_offsets.size(), 1); + BOOST_REQUIRE(first_offset < offset); + } + + BOOST_FIXTURE_TEST_CASE(test_get_block, test_fixture) + { + fc::temp_directory tempdir; + store_provider sp(tempdir.path(), 100, std::optional()); + sp.append(bt); + sp.append_lib(1); + sp.append(bt2); + int count = 0; + get_block_t block1 = sp.get_block(1,[&count]() { + if (++count >= 3) { + throw yield_exception(""); + } + }); + BOOST_REQUIRE(block1); + BOOST_REQUIRE(std::get<1>(*block1)); + const auto block1_bt = std::get<0>(*block1); + BOOST_REQUIRE_EQUAL(block1_bt, bt); + + count = 0; + get_block_t block2 = sp.get_block(5,[&count]() { + if (++count >= 4) { + throw yield_exception(""); + } + }); + BOOST_REQUIRE(block2); + BOOST_REQUIRE(!std::get<1>(*block2)); + const auto block2_bt = std::get<0>(*block2); + BOOST_REQUIRE_EQUAL(block2_bt, bt2); + + count = 0; + try { + sp.get_block(5,[&count]() { + if (++count >= 3) { + throw yield_exception(""); + } + }); + BOOST_FAIL("Should not have completed scan"); + } catch (const yield_exception& ex) { + } + + count = 0; + block2 = sp.get_block(2,[&count]() { + if (++count >= 4) { + throw yield_exception(""); + } + }); + BOOST_REQUIRE(!block2); + } + +BOOST_AUTO_TEST_SUITE_END() diff --git a/plugins/trace_api_plugin/trace_api.swagger.yaml b/plugins/trace_api_plugin/trace_api.swagger.yaml new file mode 100644 index 00000000000..3c6875402ff --- /dev/null +++ b/plugins/trace_api_plugin/trace_api.swagger.yaml @@ -0,0 +1,52 @@ +openapi: 3.0.0 +info: + title: Trace API + version: 1.0.0 + license: + name: MIT + url: https://opensource.org/licenses/MIT + contact: + url: https://eos.io +servers: + - url: '{protocol}://{host}:{port}/v1/' + variables: + protocol: + enum: + - http + - https + default: http + host: + default: localhost + port: + default: "8080" +components: + schemas: {} +paths: + /trace_api/get_block: + post: + description: Returns a block object containing retired actions and related metadata. + operationId: get_block + requestBody: + content: + application/json: + schema: + type: object + required: + - block_num + properties: + block_num: + type: string + description: Provide a `block number` + responses: + "200": + description: OK - valid response payload + content: + application/json: + schema: + $ref: "https://eosio.github.io/schemata/v2.0/oas/Block.yaml" + "400": + description: Error - requested block number is invalid (not a number, larger than max int) + "404": + description: Error - requested data not present on node + "500": + description: Error - exceptional condition while processing get_block; e.g. corrupt files diff --git a/plugins/trace_api_plugin/trace_api_plugin.cpp b/plugins/trace_api_plugin/trace_api_plugin.cpp new file mode 100644 index 00000000000..473b1e2c0d3 --- /dev/null +++ b/plugins/trace_api_plugin/trace_api_plugin.cpp @@ -0,0 +1,375 @@ +#include + +#include +#include +#include +#include + +#include + +#include + +using namespace eosio::trace_api; +using namespace eosio::trace_api::configuration_utils; +using boost::signals2::scoped_connection; + +namespace { + appbase::abstract_plugin& plugin_reg = app().register_plugin(); + + const std::string logger_name("trace_api"); + fc::logger _log; + + std::string to_detail_string(const std::exception_ptr& e) { + try { + std::rethrow_exception(e); + } catch (fc::exception& er) { + return er.to_detail_string(); + } catch (const std::exception& e) { + fc::exception fce( + FC_LOG_MESSAGE(warn, "std::exception: ${what}: ", ("what", e.what())), + fc::std_exception_code, + BOOST_CORE_TYPEID(e).name(), + e.what()); + return fce.to_detail_string(); + } catch (...) { + fc::unhandled_exception ue( + FC_LOG_MESSAGE(warn, "unknown: ",), + std::current_exception()); + return ue.to_detail_string(); + } + } + + void log_exception( const exception_with_context& e, fc::log_level level ) { + if( _log.is_enabled( level ) ) { + auto detail_string = to_detail_string(std::get<0>(e)); + auto context = fc::log_context( level, std::get<1>(e), std::get<2>(e), std::get<3>(e) ); + _log.log(fc::log_message( context, detail_string )); + } + } + + /** + * The exception_handler provided to the extraction sub-system throws `yield_exception` as a signal that + * Something has gone wrong and the extraction process needs to terminate immediately + * + * This templated method is used to wrap signal handlers for `chain_controller` so that the plugin-internal + * `yield_exception` can be translated to a `chain::controller_emit_signal_exception`. + * + * The goal is that the currently applied block will be rolled-back before the shutdown takes effect leaving + * the system in a better state for restart. + */ + template + void emit_killer(F&& f) { + try { + f(); + } catch (const yield_exception& ) { + EOS_THROW(chain::controller_emit_signal_exception, "Trace API encountered an Error which it cannot recover from. Please resolve the error and relaunch the process") + } + } + + template + struct shared_store_provider { + shared_store_provider(const std::shared_ptr& store) + :store(store) + {} + + void append( const block_trace_v0& trace ) { + store->append(trace); + } + + void append_lib( uint32_t new_lib ) { + store->append_lib(new_lib); + } + + get_block_t get_block(uint32_t height, const yield_function& yield) { + return store->get_block(height, yield); + } + + std::shared_ptr store; + }; +} + +namespace eosio { + +/** + * A common source for information shared between the extraction process and the RPC process + */ +struct trace_api_common_impl { + static void set_program_options(appbase::options_description& cli, appbase::options_description& cfg) { + auto cfg_options = cfg.add_options(); + cfg_options("trace-dir", bpo::value()->default_value("traces"), + "the location of the trace directory (absolute path or relative to application data dir)"); + cfg_options("trace-slice-stride", bpo::value()->default_value(10'000), + "the number of blocks each \"slice\" of trace data will contain on the filesystem"); + cfg_options("trace-minimum-irreversible-history-blocks", boost::program_options::value()->default_value(-1), + "Number of blocks to ensure are kept past LIB for retrieval before \"slice\" files can be automatically removed.\n" + "A value of -1 indicates that automatic removal of \"slice\" files will be turned off."); + } + + void plugin_initialize(const appbase::variables_map& options) { + auto dir_option = options.at("trace-dir").as(); + if (dir_option.is_relative()) + trace_dir = app().data_dir() / dir_option; + else + trace_dir = dir_option; + + slice_stride = options.at("trace-slice-stride").as(); + + const int32_t blocks = options.at("trace-minimum-irreversible-history-blocks").as(); + EOS_ASSERT(blocks >= -1, chain::plugin_config_exception, + "\"trace-minimum-irreversible-history-blocks\" must be greater to or equal to -1."); + if (blocks > manual_slice_file_value) { + minimum_irreversible_history_blocks = blocks; + } + + store = std::make_shared(trace_dir, slice_stride, minimum_irreversible_history_blocks); + } + + // common configuration paramters + boost::filesystem::path trace_dir; + uint32_t slice_stride = 0; + + std::optional minimum_irreversible_history_blocks; + static constexpr uint32_t manual_slice_file_value = -1; + + std::shared_ptr store; +}; + +/** + * Interface with the RPC process + */ +struct trace_api_rpc_plugin_impl : public std::enable_shared_from_this +{ + trace_api_rpc_plugin_impl( const std::shared_ptr& common ) + :common(common) {} + + static void set_program_options(appbase::options_description& cli, appbase::options_description& cfg) { + auto cfg_options = cfg.add_options(); + cfg_options("trace-rpc-abi", bpo::value>()->composing(), + "ABIs used when decoding trace RPC responses.\n" + "There must be at least one ABI specified OR the flag trace-no-abis must be used.\n" + "ABIs are specified as \"Key=Value\" pairs in the form =\n" + "Where can be:\n" + " an absolute path to a file containing a valid JSON-encoded ABI\n" + " a relative path from `data-dir` to a file containing a valid JSON-encoded ABI\n" + ); + cfg_options("trace-no-abis", + "Use to indicate that the RPC responses will not use ABIs.\n" + "Failure to specify this option when there are no trace-rpc-abi configuations will result in an Error.\n" + "This option is mutually exclusive with trace-rpc-api" + ); + } + + void plugin_initialize(const appbase::variables_map& options) { + std::shared_ptr data_handler = std::make_shared([](const exception_with_context& e){ + log_exception(e, fc::log_level::debug); + }); + + if( options.count("trace-rpc-abi") ) { + EOS_ASSERT(options.count("trace-no-abis") == 0, chain::plugin_config_exception, + "Trace API is configured with ABIs however trace-no-abis is set"); + const std::vector key_value_pairs = options["trace-rpc-abi"].as>(); + for (const auto& entry : key_value_pairs) { + try { + auto kv = parse_kv_pairs(entry); + auto account = chain::name(kv.first); + auto abi = abi_def_from_file(kv.second, app().data_dir()); + data_handler->add_abi(account, abi); + } catch (...) { + elog("Malformed trace-rpc-abi provider: \"${val}\"", ("val", entry)); + throw; + } + } + } else { + EOS_ASSERT(options.count("trace-no-abis") != 0, chain::plugin_config_exception, + "Trace API is not configured with ABIs and trace-no-abis is not set"); + } + + req_handler = std::make_shared( + shared_store_provider(common->store), + abi_data_handler::shared_provider(data_handler) + ); + } + + void plugin_startup() { + auto& http = app().get_plugin(); + http.add_handler("/v1/trace_api/get_block", [wthis=weak_from_this()](std::string, std::string body, url_response_callback cb){ + auto that = wthis.lock(); + if (!that) { + return; + } + + auto block_number = ([&body]() -> std::optional { + if (body.empty()) { + return {}; + } + + try { + auto input = fc::json::from_string(body); + auto block_num = input.get_object()["block_num"].as_uint64(); + if (block_num > std::numeric_limits::max()) { + return {}; + } + return block_num; + } catch (...) { + return {}; + } + })(); + + if (!block_number) { + error_results results{400, "Bad or missing block_num"}; + cb( 400, fc::variant( results )); + return; + } + + try { + + auto resp = that->req_handler->get_block_trace(*block_number); + if (resp.is_null()) { + error_results results{404, "Block trace missing"}; + cb( 404, fc::variant( results )); + } else { + cb( 200, std::move(resp) ); + } + } catch (...) { + http_plugin::handle_exception("trace_api", "get_block", body, cb); + } + }); + } + + void plugin_shutdown() { + } + + std::shared_ptr common; + + using request_handler_t = request_handler, abi_data_handler::shared_provider>; + std::shared_ptr req_handler; +}; + +struct trace_api_plugin_impl { + trace_api_plugin_impl( const std::shared_ptr& common ) + :common(common) {} + + static void set_program_options(appbase::options_description& cli, appbase::options_description& cfg) { + auto cfg_options = cfg.add_options(); + } + + void plugin_initialize(const appbase::variables_map& options) { + auto log_exceptions_and_shutdown = [](const exception_with_context& e) { + log_exception(e, fc::log_level::error); + app().quit(); + throw yield_exception("shutting down"); + }; + extraction = std::make_shared(shared_store_provider(common->store), log_exceptions_and_shutdown); + + auto& chain = app().find_plugin()->chain(); + + applied_transaction_connection.emplace( + chain.applied_transaction.connect([this](std::tuple t) { + emit_killer([&](){ + extraction->signal_applied_transaction(std::get<0>(t), std::get<1>(t)); + }); + })); + + accepted_block_connection.emplace( + chain.accepted_block.connect([this](const chain::block_state_ptr& p) { + emit_killer([&](){ + extraction->signal_accepted_block(p); + }); + })); + + irreversible_block_connection.emplace( + chain.irreversible_block.connect([this](const chain::block_state_ptr& p) { + emit_killer([&](){ + extraction->signal_irreversible_block(p); + }); + })); + + } + + void plugin_startup() { + } + + void plugin_shutdown() { + } + + std::shared_ptr common; + + using chain_extraction_t = chain_extraction_impl_type>; + std::shared_ptr extraction; + + fc::optional applied_transaction_connection; + fc::optional accepted_block_connection; + fc::optional irreversible_block_connection; +}; + +trace_api_plugin::trace_api_plugin() +{} + +trace_api_plugin::~trace_api_plugin() +{} + +void trace_api_plugin::set_program_options(appbase::options_description& cli, appbase::options_description& cfg) { + trace_api_common_impl::set_program_options(cli, cfg); + trace_api_plugin_impl::set_program_options(cli, cfg); + trace_api_rpc_plugin_impl::set_program_options(cli, cfg); +} + +void trace_api_plugin::plugin_initialize(const appbase::variables_map& options) { + auto common = std::make_shared(); + common->plugin_initialize(options); + + my = std::make_shared(common); + my->plugin_initialize(options); + + rpc = std::make_shared(common); + rpc->plugin_initialize(options); +} + +void trace_api_plugin::plugin_startup() { + handle_sighup(); // setup logging + + my->plugin_startup(); + rpc->plugin_startup(); +} + +void trace_api_plugin::plugin_shutdown() { + my->plugin_shutdown(); + rpc->plugin_shutdown(); +} + +void trace_api_plugin::handle_sighup() { + fc::logger::update( logger_name, _log ); +} + +trace_api_rpc_plugin::trace_api_rpc_plugin() +{} + +trace_api_rpc_plugin::~trace_api_rpc_plugin() +{} + +void trace_api_rpc_plugin::set_program_options(appbase::options_description& cli, appbase::options_description& cfg) { + trace_api_common_impl::set_program_options(cli, cfg); + trace_api_rpc_plugin_impl::set_program_options(cli, cfg); +} + +void trace_api_rpc_plugin::plugin_initialize(const appbase::variables_map& options) { + auto common = std::make_shared(); + common->plugin_initialize(options); + + rpc = std::make_shared(common); + rpc->plugin_initialize(options); +} + +void trace_api_rpc_plugin::plugin_startup() { + rpc->plugin_startup(); +} + +void trace_api_rpc_plugin::plugin_shutdown() { + rpc->plugin_shutdown(); +} + +void trace_api_rpc_plugin::handle_sighup() { + fc::logger::update( logger_name, _log ); +} + +} \ No newline at end of file diff --git a/programs/nodeos/CMakeLists.txt b/programs/nodeos/CMakeLists.txt index 0b1b5fed649..935f459390d 100644 --- a/programs/nodeos/CMakeLists.txt +++ b/programs/nodeos/CMakeLists.txt @@ -52,6 +52,7 @@ target_link_libraries( ${NODE_EXECUTABLE_NAME} PRIVATE -Wl,${whole_archive_flag} login_plugin -Wl,${no_whole_archive_flag} PRIVATE -Wl,${whole_archive_flag} history_plugin -Wl,${no_whole_archive_flag} PRIVATE -Wl,${whole_archive_flag} state_history_plugin -Wl,${no_whole_archive_flag} + PRIVATE -Wl,${whole_archive_flag} trace_api_plugin -Wl,${no_whole_archive_flag} PRIVATE -Wl,${whole_archive_flag} history_api_plugin -Wl,${no_whole_archive_flag} PRIVATE -Wl,${whole_archive_flag} chain_api_plugin -Wl,${no_whole_archive_flag} PRIVATE -Wl,${whole_archive_flag} net_plugin -Wl,${no_whole_archive_flag} diff --git a/tests/nodeos_forked_chain_test.py b/tests/nodeos_forked_chain_test.py index 745f18c2f51..e3e6622e2b2 100755 --- a/tests/nodeos_forked_chain_test.py +++ b/tests/nodeos_forked_chain_test.py @@ -289,7 +289,8 @@ def getMinHeadAndLib(prodNodes): blockProducer=node.getBlockProducerByNum(blockNum) if producerToSlot[lastBlockProducer]["count"]!=inRowCountPerProducer: - Utils.errorExit("Producer %s, in slot %d, expected to produce %d blocks but produced %d blocks" % (blockProducer, slot, inRowCountPerProducer, producerToSlot[lastBlockProducer]["count"])) + Utils.errorExit("Producer %s, in slot %d, expected to produce %d blocks but produced %d blocks. At block number %d." % + (lastBlockProducer, slot, inRowCountPerProducer, producerToSlot[lastBlockProducer]["count"], blockNum-1)) if blockProducer==productionCycle[0]: break diff --git a/tests/nodeos_multiple_version_protocol_feature_test.py b/tests/nodeos_multiple_version_protocol_feature_test.py index 02c4bf11ea9..e45ffe45d59 100755 --- a/tests/nodeos_multiple_version_protocol_feature_test.py +++ b/tests/nodeos_multiple_version_protocol_feature_test.py @@ -91,13 +91,16 @@ def hasBlockBecomeIrr(): } Utils.Print("Alternate Version Labels File is {}".format(alternateVersionLabelsFile)) assert exists(alternateVersionLabelsFile), "Alternate version labels file does not exist" + # version 1.7 did not provide a default value for "--last-block-time-offset-us" so this is needed to + # avoid dropping late blocks assert cluster.launch(pnodes=4, totalNodes=4, prodCount=1, totalProducers=4, extraNodeosArgs=" --plugin eosio::producer_api_plugin ", useBiosBootFile=False, specificExtraNodeosArgs={ 0:"--http-max-response-time-ms 990000", 1:"--http-max-response-time-ms 990000", - 2:"--http-max-response-time-ms 990000"}, + 2:"--http-max-response-time-ms 990000", + 3:"--last-block-time-offset-us -200000"}, onlySetProds=True, pfSetupPolicy=PFSetupPolicy.NONE, alternateVersionLabelsFile=alternateVersionLabelsFile, @@ -117,10 +120,10 @@ def resumeBlockProductions(): for node in allNodes: if not node.killed: node.processCurlCmd("producer", "resume", "") - def shouldNodesBeInSync(nodes:[Node]): + def areNodesInSync(nodes:[Node]): # Pause all block production to ensure the head is not moving pauseBlockProductions() - time.sleep(1) # Wait for some time to ensure all blocks are propagated + time.sleep(2) # Wait for some time to ensure all blocks are propagated headBlockIds = [] for node in nodes: headBlockId = node.getInfo()["head_block_id"] @@ -129,7 +132,7 @@ def shouldNodesBeInSync(nodes:[Node]): return len(set(headBlockIds)) == 1 # Before everything starts, all nodes (new version and old version) should be in sync - assert shouldNodesBeInSync(allNodes), "Nodes are not in sync before preactivation" + assert areNodesInSync(allNodes), "Nodes are not in sync before preactivation" # First, we are going to test the case where: # - 1st node has valid earliest_allowed_activation_time @@ -147,13 +150,13 @@ def shouldNodesBeInSync(nodes:[Node]): assert shouldNodeContainPreactivateFeature(newNodes[0]), "1st node should contain PREACTIVATE FEATURE" assert not (shouldNodeContainPreactivateFeature(newNodes[1]) or shouldNodeContainPreactivateFeature(newNodes[2])), \ "2nd and 3rd node should not contain PREACTIVATE FEATURE" - assert shouldNodesBeInSync([newNodes[1], newNodes[2], oldNode]), "2nd, 3rd and 4th node should be in sync" - assert not shouldNodesBeInSync(allNodes), "1st node should be out of sync with the rest nodes" + assert areNodesInSync([newNodes[1], newNodes[2], oldNode]), "2nd, 3rd and 4th node should be in sync" + assert not areNodesInSync(allNodes), "1st node should be out of sync with the rest nodes" waitForOneRound() assert not shouldNodeContainPreactivateFeature(newNodes[0]), "PREACTIVATE_FEATURE should be dropped" - assert shouldNodesBeInSync(allNodes), "All nodes should be in sync" + assert areNodesInSync(allNodes), "All nodes should be in sync" # Then we set the earliest_allowed_activation_time of 2nd node and 3rd node with valid value # Once the 1st node activate PREACTIVATE_FEATURE, all of them should have PREACTIVATE_FEATURE activated in the next block @@ -167,8 +170,8 @@ def shouldNodesBeInSync(nodes:[Node]): libBeforePreactivation = newNodes[0].getIrreversibleBlockNum() newNodes[0].activatePreactivateFeature() - assert shouldNodesBeInSync(newNodes), "New nodes should be in sync" - assert not shouldNodesBeInSync(allNodes), "Nodes should not be in sync after preactivation" + assert areNodesInSync(newNodes), "New nodes should be in sync" + assert not areNodesInSync(allNodes), "Nodes should not be in sync after preactivation" for node in newNodes: assert shouldNodeContainPreactivateFeature(node), "New node should contain PREACTIVATE_FEATURE" activatedBlockNum = newNodes[0].getHeadBlockNum() # The PREACTIVATE_FEATURE should have been activated before or at this block num @@ -195,7 +198,7 @@ def shouldNodesBeInSync(nodes:[Node]): restartNode(oldNode, oldNodeId, chainArg="--replay", nodeosPath="programs/nodeos/nodeos") time.sleep(2) # Give some time to replay - assert shouldNodesBeInSync(allNodes), "All nodes should be in sync" + assert areNodesInSync(allNodes), "All nodes should be in sync" assert shouldNodeContainPreactivateFeature(oldNode), "4th node should contain PREACTIVATE_FEATURE" testSuccessful = True diff --git a/unittests/api_tests.cpp b/unittests/api_tests.cpp index a5e745436bd..6a236ce18f1 100644 --- a/unittests/api_tests.cpp +++ b/unittests/api_tests.cpp @@ -1091,13 +1091,36 @@ BOOST_FIXTURE_TEST_CASE(transaction_tests, TESTER) { try { auto& t = std::get<0>(x); if (t && t->receipt && t->receipt->status != transaction_receipt::executed) { trace = t; } } ); + block_state_ptr bsp; + auto c2 = control->accepted_block.connect([&](const block_state_ptr& b) { bsp = b; }); // test error handling on deferred transaction failure - CALL_TEST_FUNCTION(*this, "test_transaction", "send_transaction_trigger_error_handler", {}); + auto test_trace = CALL_TEST_FUNCTION(*this, "test_transaction", "send_transaction_trigger_error_handler", {}); BOOST_REQUIRE(trace); BOOST_CHECK_EQUAL(trace->receipt->status, transaction_receipt::soft_fail); + + std::set block_ids; + for( const auto& receipt : bsp->block->transactions ) { + transaction_id_type id; + if( receipt.trx.contains() ) { + const auto& pt = receipt.trx.get(); + id = pt.id(); + } else { + id = receipt.trx.get(); + } + block_ids.insert( id ); + } + + BOOST_CHECK_EQUAL(2, block_ids.size() ); // originating trx and deferred + BOOST_CHECK_EQUAL(1, block_ids.count( test_trace->id ) ); // originating + BOOST_CHECK( !test_trace->failed_dtrx_trace ); + BOOST_CHECK_EQUAL(0, block_ids.count( trace->id ) ); // onerror id, not in block + BOOST_CHECK_EQUAL(1, block_ids.count( trace->failed_dtrx_trace->id ) ); // deferred id since trace moved to failed_dtrx_trace + BOOST_CHECK( trace->action_traces.at(0).act.name == N(onerror) ); + c.disconnect(); + c2.disconnect(); } // test test_transaction_size @@ -1171,7 +1194,7 @@ BOOST_FIXTURE_TEST_CASE(deferred_transaction_tests, TESTER) { try { auto dtrxs = get_scheduled_transactions(); BOOST_CHECK_EQUAL(dtrxs.size(), 1); for (const auto& trx: dtrxs) { - control->push_scheduled_transaction(trx, fc::time_point::maximum()); + control->push_scheduled_transaction(trx, fc::time_point::maximum(), 0, false); } BOOST_CHECK_EQUAL(1, count); BOOST_REQUIRE(trace); @@ -1194,10 +1217,11 @@ BOOST_FIXTURE_TEST_CASE(deferred_transaction_tests, TESTER) { try { produce_blocks( 3 ); //check that only one deferred transaction executed + auto billed_cpu_time_us = control->get_global_properties().configuration.min_transaction_cpu_usage; auto dtrxs = get_scheduled_transactions(); BOOST_CHECK_EQUAL(dtrxs.size(), 1); for (const auto& trx: dtrxs) { - control->push_scheduled_transaction(trx, fc::time_point::maximum()); + control->push_scheduled_transaction(trx, fc::time_point::maximum(), billed_cpu_time_us, true); } BOOST_CHECK_EQUAL(1, count); BOOST_CHECK(trace); diff --git a/unittests/delay_tests.cpp b/unittests/delay_tests.cpp index 8d5874c7a8f..96d24704540 100644 --- a/unittests/delay_tests.cpp +++ b/unittests/delay_tests.cpp @@ -75,7 +75,8 @@ BOOST_FIXTURE_TEST_CASE( delay_error_create_account, validating_tester) { try { auto scheduled_trxs = get_scheduled_transactions(); BOOST_REQUIRE_EQUAL(scheduled_trxs.size(), 1u); - auto dtrace = control->push_scheduled_transaction(scheduled_trxs.front(), fc::time_point::maximum()); + auto billed_cpu_time_us = control->get_global_properties().configuration.min_transaction_cpu_usage; + auto dtrace = control->push_scheduled_transaction(scheduled_trxs.front(), fc::time_point::maximum(), billed_cpu_time_us, true); BOOST_REQUIRE_EQUAL(dtrace->except.valid(), true); BOOST_REQUIRE_EQUAL(dtrace->except->code(), missing_auth_exception::code_value); diff --git a/unittests/forked_tests.cpp b/unittests/forked_tests.cpp index 4ff7b07d350..d24511babea 100644 --- a/unittests/forked_tests.cpp +++ b/unittests/forked_tests.cpp @@ -610,12 +610,17 @@ BOOST_AUTO_TEST_CASE( push_block_returns_forked_transactions ) try { wlog( "sam and pam go off on their own fork on c2 while dan produces blocks by himself in c1" ); auto fork_block_num = c.control->head_block_num(); + signed_block_ptr c2b; wlog( "c2 blocks:" ); c2.produce_blocks(12); // pam produces 12 blocks - b = c2.produce_block( fc::milliseconds(config::block_interval_ms * 13) ); // sam skips over dan's blocks + b = c2b = c2.produce_block( fc::milliseconds(config::block_interval_ms * 13) ); // sam skips over dan's blocks expected_producer = N(sam); BOOST_REQUIRE_EQUAL( b->producer.to_string(), expected_producer.to_string() ); - c2.produce_blocks(11 + 12); + // save blocks for verification of forking later + std::vector c2blocks; + for( size_t i = 0; i < 11 + 12; ++i ) { + c2blocks.emplace_back( c2.produce_block() ); + } wlog( "c1 blocks:" ); @@ -625,7 +630,7 @@ BOOST_AUTO_TEST_CASE( push_block_returns_forked_transactions ) try { // create accounts on c1 which will be forked out c.produce_block(); - transaction_trace_ptr trace1, trace2, trace3; + transaction_trace_ptr trace1, trace2, trace3, trace4; { // create account the hard way so we can set reference block and expiration signed_transaction trx; authority active_auth( get_public_key( N(test1), "active" ) ); @@ -675,9 +680,32 @@ BOOST_AUTO_TEST_CASE( push_block_returns_forked_transactions ) try { trx.sign( get_private_key( config::system_account_name, "active" ), c.control->get_chain_id() ); trace3 = c.push_transaction( trx ); } + { + signed_transaction trx; + authority active_auth( get_public_key( N(test4), "active" ) ); + authority owner_auth( get_public_key( N(test4), "owner" ) ); + trx.actions.emplace_back( vector{{config::system_account_name,config::active_name}}, + newaccount{ + .creator = config::system_account_name, + .name = N(test4), + .owner = owner_auth, + .active = active_auth, + }); + trx.expiration = c.control->head_block_time() + fc::seconds( 60 ); + trx.set_reference_block( b->id() ); // tapos to dan's block should be rejected on fork switch + trx.sign( get_private_key( config::system_account_name, "active" ), c.control->get_chain_id() ); + trace4 = c.push_transaction( trx ); + BOOST_CHECK( trace4->receipt->status == transaction_receipt_header::executed ); + } c.produce_block(); c.produce_blocks(9); + // test forked blocks signal accepted_block in order, required by trace_api_plugin + std::vector accepted_blocks; + auto conn = c.control->accepted_block.connect( [&]( const block_state_ptr& bsp) { + accepted_blocks.emplace_back( bsp->block ); + } ); + // dan on chain 1 now gets all of the blocks from chain 2 which should cause fork switch wlog( "push c2 blocks to c1" ); for( uint32_t start = fork_block_num + 1, end = c2.control->head_block_num(); start <= end; ++start ) { @@ -685,11 +713,23 @@ BOOST_AUTO_TEST_CASE( push_block_returns_forked_transactions ) try { c.push_block( fb ); } + { // verify forked blocks where signaled in order + auto itr = std::find( accepted_blocks.begin(), accepted_blocks.end(), c2b ); + BOOST_CHECK( itr != accepted_blocks.end() ); + ++itr; + BOOST_CHECK( itr != accepted_blocks.end() ); + size_t i = 0; + for( i = 0; itr != accepted_blocks.end(); ++i, ++itr ) { + BOOST_CHECK( c2blocks.at(i) == *itr ); + } + BOOST_CHECK( i == 11 + 12 ); + } // verify transaction on fork is reported by push_block in order - BOOST_REQUIRE_EQUAL( 3, c.get_unapplied_transaction_queue().size() ); + BOOST_REQUIRE_EQUAL( 4, c.get_unapplied_transaction_queue().size() ); BOOST_REQUIRE_EQUAL( trace1->id, c.get_unapplied_transaction_queue().begin()->id() ); BOOST_REQUIRE_EQUAL( trace2->id, (++c.get_unapplied_transaction_queue().begin())->id() ); BOOST_REQUIRE_EQUAL( trace3->id, (++(++c.get_unapplied_transaction_queue().begin()))->id() ); + BOOST_REQUIRE_EQUAL( trace4->id, (++(++(++c.get_unapplied_transaction_queue().begin())))->id() ); BOOST_REQUIRE_EXCEPTION(c.control->get_account( N(test1) ), fc::exception, [a=N(test1)] (const fc::exception& e)->bool { @@ -703,15 +743,38 @@ BOOST_AUTO_TEST_CASE( push_block_returns_forked_transactions ) try { [a=N(test3)] (const fc::exception& e)->bool { return std::string( e.what() ).find( a.to_string() ) != std::string::npos; }) ; + BOOST_REQUIRE_EXCEPTION(c.control->get_account( N(test4) ), fc::exception, + [a=N(test4)] (const fc::exception& e)->bool { + return std::string( e.what() ).find( a.to_string() ) != std::string::npos; + }) ; // produce block which will apply the unapplied transactions - c.produce_block(); + std::vector traces; + c.produce_block( traces ); + + BOOST_REQUIRE_EQUAL( 4, traces.size() ); + BOOST_CHECK_EQUAL( trace1->id, traces.at(0)->id ); + BOOST_CHECK_EQUAL( transaction_receipt_header::executed, traces.at(0)->receipt->status ); + BOOST_CHECK_EQUAL( trace2->id, traces.at(1)->id ); + BOOST_CHECK_EQUAL( transaction_receipt_header::executed, traces.at(1)->receipt->status ); + BOOST_CHECK_EQUAL( trace3->id, traces.at(2)->id ); + BOOST_CHECK_EQUAL( transaction_receipt_header::executed, traces.at(2)->receipt->status ); + // test4 failed because it was tapos to a forked out block + BOOST_CHECK_EQUAL( trace4->id, traces.at(3)->id ); + BOOST_CHECK( !traces.at(3)->receipt ); + BOOST_CHECK( !!traces.at(3)->except ); // verify unapplied transactions ran BOOST_REQUIRE_EQUAL( c.control->get_account( N(test1) ).name, N(test1) ); BOOST_REQUIRE_EQUAL( c.control->get_account( N(test2) ).name, N(test2) ); BOOST_REQUIRE_EQUAL( c.control->get_account( N(test3) ).name, N(test3) ); + // failed because of tapos to forked out block + BOOST_REQUIRE_EXCEPTION(c.control->get_account( N(test4) ), fc::exception, + [a=N(test4)] (const fc::exception& e)->bool { + return std::string( e.what() ).find( a.to_string() ) != std::string::npos; + }) ; + } FC_LOG_AND_RETHROW() diff --git a/unittests/wasm_tests.cpp b/unittests/wasm_tests.cpp index da482058b07..9c4bd985834 100644 --- a/unittests/wasm_tests.cpp +++ b/unittests/wasm_tests.cpp @@ -3,6 +3,7 @@ #include #include +#include #include #include #include @@ -1893,6 +1894,146 @@ BOOST_AUTO_TEST_CASE( code_size ) try { } FC_LOG_AND_RETHROW() +BOOST_AUTO_TEST_CASE( billed_cpu_test ) try { + + fc::temp_directory tempdir; + tester chain( tempdir, true ); + chain.execute_setup_policy( setup_policy::full ); + + const resource_limits_manager& mgr = chain.control->get_resource_limits_manager(); + + account_name acc = N( asserter ); + account_name user = N( user ); + chain.create_accounts( {acc, user} ); + chain.produce_block(); + + auto create_trx = [&](auto trx_max_ms) { + signed_transaction trx; + trx.actions.emplace_back( vector{{acc, config::active_name}}, + assertdef {1, "Should Not Assert!"} ); + static int num_secs = 1; + chain.set_transaction_headers( trx, ++num_secs ); // num_secs provides nonce + trx.max_cpu_usage_ms = trx_max_ms; + trx.sign( chain.get_private_key( acc, "active" ), chain.control->get_chain_id() ); + auto ptrx = std::make_shared(trx); + auto fut = transaction_metadata::start_recover_keys( ptrx, chain.control->get_thread_pool(), chain.control->get_chain_id(), fc::microseconds::maximum() ); + return fut.get(); + }; + + auto push_trx = [&]( const transaction_metadata_ptr& trx, fc::time_point deadline, + uint32_t billed_cpu_time_us, bool explicit_billed_cpu_time ) { + auto r = chain.control->push_transaction( trx, deadline, billed_cpu_time_us, explicit_billed_cpu_time ); + if( r->except_ptr ) std::rethrow_exception( r->except_ptr ); + if( r->except ) throw *r->except; + return r; + }; + + auto ptrx = create_trx(0); + // no limits, just verifying trx works + push_trx( ptrx, fc::time_point::maximum(), 0, false ); // non-explicit billing + + // setup account acc with large limits + chain.push_action( config::system_account_name, N(setalimits), config::system_account_name, fc::mutable_variant_object() + ("account", user) + ("ram_bytes", -1) + ("net_weight", 19'999'999) + ("cpu_weight", 19'999'999) + ); + chain.push_action( config::system_account_name, N(setalimits), config::system_account_name, fc::mutable_variant_object() + ("account", acc) + ("ram_bytes", -1) + ("net_weight", 9'999) + ("cpu_weight", 9'999) + ); + + chain.produce_block(); + + auto max_cpu_time_us = chain.control->get_global_properties().configuration.max_transaction_cpu_usage; + auto min_cpu_time_us = chain.control->get_global_properties().configuration.min_transaction_cpu_usage; + + auto cpu_limit = mgr.get_account_cpu_limit(acc).first; // huge limit ~17s + + ptrx = create_trx(0); + BOOST_CHECK_LT( max_cpu_time_us, cpu_limit ); // max_cpu_time_us has to be less than cpu_limit to actually test max and not account + // indicate explicit billing at transaction max, max_cpu_time_us has to be greater than account cpu time + push_trx( ptrx, fc::time_point::maximum(), max_cpu_time_us, true ); + chain.produce_block(); + + cpu_limit = mgr.get_account_cpu_limit(acc).first; + + // do not allow to bill greater than chain configured max, objective failure even with explicit billing for over max + ptrx = create_trx(0); + BOOST_CHECK_LT( max_cpu_time_us + 1, cpu_limit ); // max_cpu_time_us+1 has to be less than cpu_limit to actually test max and not account + // indicate explicit billing at max + 1 + BOOST_CHECK_EXCEPTION( push_trx( ptrx, fc::time_point::maximum(), max_cpu_time_us + 1, true ), tx_cpu_usage_exceeded, + fc_exception_message_starts_with( "billed") ); + + // allow to bill at trx configured max + ptrx = create_trx(5); // set trx max at 5ms + BOOST_CHECK_LT( 5 * 1000, cpu_limit ); // 5ms has to be less than cpu_limit to actually test trx max and not account + // indicate explicit billing at max + push_trx( ptrx, fc::time_point::maximum(), 5 * 1000, true ); + chain.produce_block(); + + cpu_limit = mgr.get_account_cpu_limit(acc).first; // update after last trx + + // do not allow to bill greater than trx configured max, objective failure even with explicit billing for over max + ptrx = create_trx(5); // set trx max at 5ms + BOOST_CHECK_LT( 5 * 1000 + 1, cpu_limit ); // 5ms has to be less than cpu_limit to actually test trx max and not account + // indicate explicit billing at max + 1 + BOOST_CHECK_EXCEPTION( push_trx( ptrx, fc::time_point::maximum(), 5 * 1000 + 1, true ), tx_cpu_usage_exceeded, + fc_exception_message_starts_with("billed") ); + + // bill at minimum + ptrx = create_trx(0); + // indicate explicit billing at transaction minimum + push_trx( ptrx, fc::time_point::maximum(), min_cpu_time_us, true ); + chain.produce_block(); + + // do not allow to bill less than minimum + ptrx = create_trx(0); + // indicate explicit billing at minimum-1, objective failure even with explicit billing for under min + BOOST_CHECK_EXCEPTION( push_trx( ptrx, fc::time_point::maximum(), min_cpu_time_us - 1, true ), transaction_exception, + fc_exception_message_starts_with("cannot bill CPU time less than the minimum") ); + + chain.push_action( config::system_account_name, N(setalimits), config::system_account_name, fc::mutable_variant_object() + ("account", acc) + ("ram_bytes", -1) + ("net_weight", 75) + ("cpu_weight", 75) // ~130ms + ); + + chain.produce_block(); + chain.produce_block( fc::days(1) ); // produce for one day to reset account cpu + + cpu_limit = mgr.get_account_cpu_limit_ex(acc).first.max; + + ptrx = create_trx(0); + BOOST_CHECK_LT( cpu_limit+1, max_cpu_time_us ); // needs to be less or this just tests the same thing as max_cpu_time_us test above + // indicate non-explicit billing with 1 more than our account cpu limit, triggers optimization check #8638 and fails trx + BOOST_CHECK_EXCEPTION( push_trx( ptrx, fc::time_point::maximum(), cpu_limit+1, false ), tx_cpu_usage_exceeded, + fc_exception_message_starts_with("estimated") ); + + ptrx = create_trx(0); + BOOST_CHECK_LT( cpu_limit, max_cpu_time_us ); + // indicate non-explicit billing at our account cpu limit, will allow this trx to run, but only bills for actual use + auto r = push_trx( ptrx, fc::time_point::maximum(), cpu_limit, false ); + BOOST_CHECK_LT( r->receipt->cpu_usage_us, cpu_limit ); // verify not billed at provided bill amount when explicit_billed_cpu_time=false + + chain.produce_block(); + chain.produce_block( fc::days(1) ); // produce for one day to reset account cpu + + ptrx = create_trx(0); + BOOST_CHECK_LT( cpu_limit+1, max_cpu_time_us ); // needs to be less or this just tests the same thing as max_cpu_time_us test above + // indicate explicit billing at over our account cpu limit, not allowed + cpu_limit = mgr.get_account_cpu_limit_ex(acc).first.max; + BOOST_CHECK_EXCEPTION( push_trx( ptrx, fc::time_point::maximum(), cpu_limit+1, true ), tx_cpu_usage_exceeded, + fc_exception_message_starts_with("billed") ); + +} FC_LOG_AND_RETHROW() + + + // TODO: restore net_usage_tests #if 0 BOOST_FIXTURE_TEST_CASE(net_usage_tests, tester ) try {