diff --git a/.cicd/base-images.yml b/.cicd/base-images.yml
new file mode 100644
index 00000000000..045f51e1643
--- /dev/null
+++ b/.cicd/base-images.yml
@@ -0,0 +1,121 @@
+steps:
+ - wait
+
+ - label: ":aws: Amazon_Linux 2 - Base Image Pinned"
+ command:
+ - "./.cicd/generate-base-images.sh"
+ env:
+ FORCE_BASE_IMAGE: true
+ IMAGE_TAG: amazon_linux-2-pinned
+ PLATFORM_TYPE: pinned
+ agents:
+ queue: "automation-eks-eos-builder-fleet"
+ timeout: 180
+
+ - label: ":centos: CentOS 7.7 - Base Image Pinned"
+ command:
+ - "./.cicd/generate-base-images.sh"
+ env:
+ FORCE_BASE_IMAGE: true
+ IMAGE_TAG: centos-7.7-pinned
+ PLATFORM_TYPE: pinned
+ agents:
+ queue: "automation-eks-eos-builder-fleet"
+ timeout: 180
+
+ - label: ":darwin: macOS 10.14 - Base Image Pinned"
+ command:
+ - "git clone git@github.com:EOSIO/eos.git eos && cd eos && git checkout -f $BUILDKITE_BRANCH"
+ - "cd eos && ./.cicd/platforms/pinned/macos-10.14-pinned.sh"
+ plugins:
+ - EOSIO/anka#v0.6.0:
+ debug: true
+ vm-name: "10.14.6_6C_14G_40G"
+ no-volume: true
+ always-pull: true
+ wait-network: true
+ vm-registry-tag: "clean::cicd::git-ssh::nas::brew::buildkite-agent"
+ failover-registries:
+ - "registry_1"
+ - "registry_2"
+ inherit-environment-vars: true
+ - EOSIO/skip-checkout#v0.1.1:
+ cd: ~
+ agents: "queue=mac-anka-node-fleet"
+ timeout: 180
+
+ - label: ":ubuntu: Ubuntu 16.04 - Base Image Pinned"
+ command:
+ - "./.cicd/generate-base-images.sh"
+ env:
+ FORCE_BASE_IMAGE: true
+ IMAGE_TAG: ubuntu-16.04-pinned
+ PLATFORM_TYPE: pinned
+ agents:
+ queue: "automation-eks-eos-builder-fleet"
+ timeout: 180
+
+ - label: ":ubuntu: Ubuntu 18.04 - Base Image Pinned"
+ command:
+ - "./.cicd/generate-base-images.sh"
+ env:
+ FORCE_BASE_IMAGE: true
+ IMAGE_TAG: ubuntu-18.04-pinned
+ PLATFORM_TYPE: pinned
+ agents:
+ queue: "automation-eks-eos-builder-fleet"
+ timeout: 180
+
+ - label: ":aws: Amazon_Linux 2 - Base Image Unpinned"
+ command:
+ - "./.cicd/generate-base-images.sh"
+ env:
+ FORCE_BASE_IMAGE: true
+ IMAGE_TAG: amazon_linux-2-unpinned
+ PLATFORM_TYPE: unpinned
+ agents:
+ queue: "automation-eks-eos-builder-fleet"
+ timeout: 180
+
+ - label: ":centos: CentOS 7.7 - Base Image Unpinned"
+ command:
+ - "./.cicd/generate-base-images.sh"
+ env:
+ FORCE_BASE_IMAGE: true
+ IMAGE_TAG: centos-7.7-unpinned
+ PLATFORM_TYPE: unpinned
+ agents:
+ queue: "automation-eks-eos-builder-fleet"
+ timeout: 180
+
+ - label: ":darwin: macOS 10.14 - Base Image Unpinned"
+ command:
+ - "git clone git@github.com:EOSIO/eos.git eos && cd eos && git checkout -f $BUILDKITE_BRANCH"
+ - "cd eos && ./.cicd/platforms/unpinned/macos-10.14-unpinned.sh"
+ plugins:
+ - EOSIO/anka#v0.6.0:
+ debug: true
+ vm-name: "10.14.6_6C_14G_40G"
+ no-volume: true
+ always-pull: true
+ wait-network: true
+ vm-registry-tag: "clean::cicd::git-ssh::nas::brew::buildkite-agent"
+ failover-registries:
+ - "registry_1"
+ - "registry_2"
+ inherit-environment-vars: true
+ - EOSIO/skip-checkout#v0.1.1:
+ cd: ~
+ agents: "queue=mac-anka-node-fleet"
+ timeout: 180
+
+ - label: ":ubuntu: Ubuntu 18.04 - Base Image Unpinned"
+ command:
+ - "./.cicd/generate-base-images.sh"
+ env:
+ FORCE_BASE_IMAGE: true
+ IMAGE_TAG: ubuntu-18.04-unpinned
+ PLATFORM_TYPE: unpinned
+ agents:
+ queue: "automation-eks-eos-builder-fleet"
+ timeout: 180
\ No newline at end of file
diff --git a/.cicd/generate-base-images.sh b/.cicd/generate-base-images.sh
index d05a4a25b99..d4d52233f0b 100755
--- a/.cicd/generate-base-images.sh
+++ b/.cicd/generate-base-images.sh
@@ -8,9 +8,14 @@ ORG_REPO=$(echo $FULL_TAG | cut -d: -f1)
TAG=$(echo $FULL_TAG | cut -d: -f2)
EXISTS=$(curl -s -H "Authorization: Bearer $(curl -sSL "https://auth.docker.io/token?service=registry.docker.io&scope=repository:${ORG_REPO}:pull" | jq --raw-output .token)" "https://registry.hub.docker.com/v2/${ORG_REPO}/manifests/$TAG")
# build, if neccessary
-if [[ $EXISTS =~ '404 page not found' || $EXISTS =~ 'manifest unknown' || $FORCE_BASE_IMAGE == 'true' ]]; then # if we cannot pull the image, we build and push it first
- docker build -t $FULL_TAG -f $CICD_DIR/platforms/$PLATFORM_TYPE/$IMAGE_TAG.dockerfile .
- docker push $FULL_TAG
+if [[ $EXISTS =~ '404 page not found' || $EXISTS =~ 'manifest unknown' || $FORCE_BASE_IMAGE == true ]]; then # if we cannot pull the image, we build and push it first
+ docker build --no-cache -t $FULL_TAG -f $CICD_DIR/platforms/$PLATFORM_TYPE/$IMAGE_TAG.dockerfile .
+ if [[ $FORCE_BASE_IMAGE != true ]]; then
+ docker push $FULL_TAG
+ else
+ echo "Base image creation successful. Not pushing...".
+ exit 0
+ fi
else
echo "$FULL_TAG already exists."
fi
\ No newline at end of file
diff --git a/.cicd/submodule-regression-check.sh b/.cicd/submodule-regression-check.sh
index 80999067204..9392ebb43b1 100755
--- a/.cicd/submodule-regression-check.sh
+++ b/.cicd/submodule-regression-check.sh
@@ -10,7 +10,7 @@ if [[ $BUILDKITE == true ]]; then
else
[[ -z $GITHUB_BASE_REF ]] && echo "Cannot find \$GITHUB_BASE_REF, so we have nothing to compare submodules to. Skipping submodule regression check." && exit 0
BASE_BRANCH=$GITHUB_BASE_REF
- CURRENT_BRANCH=$GITHUB_SHA
+ CURRENT_BRANCH="refs/remotes/pull/$PR_NUMBER/merge"
fi
echo "getting submodule info for $CURRENT_BRANCH"
@@ -25,12 +25,6 @@ while read -r a b; do
BASE_MAP[$a]=$b
done < <(git submodule --quiet foreach --recursive 'echo $path `git log -1 --format=%ct`')
-# We need to switch back to the PR ref/head so we can git log properly
-if [[ $BUILDKITE != true ]]; then
- echo "git fetch origin +$GITHUB_REF:"
- git fetch origin +${GITHUB_REF}: 1> /dev/null
-fi
-
echo "switching back to $CURRENT_BRANCH..."
echo "git checkout -qf $CURRENT_BRANCH"
git checkout -qf $CURRENT_BRANCH 1> /dev/null
diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
index 19fc006353c..20c04810660 100644
--- a/.github/workflows/main.yml
+++ b/.github/workflows/main.yml
@@ -1,25 +1,22 @@
name: Pull Request
on: [pull_request]
-jobs:
- start-job:
- name: Start Job
- runs-on: ubuntu-latest
- steps:
- - name: Start Job.
- run: echo "PR created. Builds will be triggered here for forked PRs or Buildkite for internal PRs."
-
+env:
+ PR_NUMBER: ${{ toJson(github.event.number) }}
+jobs:
submodule_regression_check:
if: github.event.pull_request.base.repo.id != github.event.pull_request.head.repo.id
name: Submodule Regression Check
runs-on: ubuntu-latest
- needs: start-job
steps:
- name: Checkout
- uses: actions/checkout@50fbc622fc4ef5163becd7fab6573eac35f8462e
- with:
- submodules: recursive
+ run: |
+ git clone https://github.com/${GITHUB_REPOSITORY} .
+ git fetch -v --prune origin +refs/pull/${PR_NUMBER}/merge:refs/remotes/pull/${PR_NUMBER}/merge
+ git checkout --force --progress refs/remotes/pull/${PR_NUMBER}/merge
+ git submodule sync --recursive
+ git submodule update --init --force --recursive
- name: Submodule Regression Check
run: ./.cicd/submodule-regression-check.sh
@@ -28,12 +25,14 @@ jobs:
if: github.event.pull_request.base.repo.id != github.event.pull_request.head.repo.id
name: Amazon_Linux 2 | Build
runs-on: ubuntu-latest
- needs: start-job
steps:
- name: Checkout
- uses: actions/checkout@50fbc622fc4ef5163becd7fab6573eac35f8462e
- with:
- submodules: recursive
+ run: |
+ git clone https://github.com/${GITHUB_REPOSITORY} .
+ git fetch -v --prune origin +refs/pull/${PR_NUMBER}/merge:refs/remotes/pull/${PR_NUMBER}/merge
+ git checkout --force --progress refs/remotes/pull/${PR_NUMBER}/merge
+ git submodule sync --recursive
+ git submodule update --init --force --recursive
- name: Build
run: |
./.cicd/build.sh
@@ -52,9 +51,12 @@ jobs:
needs: amazon_linux-2-build
steps:
- name: Checkout
- uses: actions/checkout@50fbc622fc4ef5163becd7fab6573eac35f8462e
- with:
- submodules: recursive
+ run: |
+ git clone https://github.com/${GITHUB_REPOSITORY} .
+ git fetch -v --prune origin +refs/pull/${PR_NUMBER}/merge:refs/remotes/pull/${PR_NUMBER}/merge
+ git checkout --force --progress refs/remotes/pull/${PR_NUMBER}/merge
+ git submodule sync --recursive
+ git submodule update --init --force --recursive
- name: Download Build Artifact
uses: actions/download-artifact@v1
with:
@@ -72,9 +74,12 @@ jobs:
needs: amazon_linux-2-build
steps:
- name: Checkout
- uses: actions/checkout@50fbc622fc4ef5163becd7fab6573eac35f8462e
- with:
- submodules: recursive
+ run: |
+ git clone https://github.com/${GITHUB_REPOSITORY} .
+ git fetch -v --prune origin +refs/pull/${PR_NUMBER}/merge:refs/remotes/pull/${PR_NUMBER}/merge
+ git checkout --force --progress refs/remotes/pull/${PR_NUMBER}/merge
+ git submodule sync --recursive
+ git submodule update --init --force --recursive
- name: Download Build Artifact
uses: actions/download-artifact@v1
with:
@@ -92,9 +97,12 @@ jobs:
needs: amazon_linux-2-build
steps:
- name: Checkout
- uses: actions/checkout@50fbc622fc4ef5163becd7fab6573eac35f8462e
- with:
- submodules: recursive
+ run: |
+ git clone https://github.com/${GITHUB_REPOSITORY} .
+ git fetch -v --prune origin +refs/pull/${PR_NUMBER}/merge:refs/remotes/pull/${PR_NUMBER}/merge
+ git checkout --force --progress refs/remotes/pull/${PR_NUMBER}/merge
+ git submodule sync --recursive
+ git submodule update --init --force --recursive
- name: Download Build Artifact
uses: actions/download-artifact@v1
with:
@@ -112,12 +120,14 @@ jobs:
if: github.event.pull_request.base.repo.id != github.event.pull_request.head.repo.id
name: CentOS 7.7 | Build
runs-on: ubuntu-latest
- needs: start-job
steps:
- name: Checkout
- uses: actions/checkout@50fbc622fc4ef5163becd7fab6573eac35f8462e
- with:
- submodules: recursive
+ run: |
+ git clone https://github.com/${GITHUB_REPOSITORY} .
+ git fetch -v --prune origin +refs/pull/${PR_NUMBER}/merge:refs/remotes/pull/${PR_NUMBER}/merge
+ git checkout --force --progress refs/remotes/pull/${PR_NUMBER}/merge
+ git submodule sync --recursive
+ git submodule update --init --force --recursive
- name: Build
run: |
./.cicd/build.sh
@@ -136,9 +146,12 @@ jobs:
needs: centos-77-build
steps:
- name: Checkout
- uses: actions/checkout@50fbc622fc4ef5163becd7fab6573eac35f8462e
- with:
- submodules: recursive
+ run: |
+ git clone https://github.com/${GITHUB_REPOSITORY} .
+ git fetch -v --prune origin +refs/pull/${PR_NUMBER}/merge:refs/remotes/pull/${PR_NUMBER}/merge
+ git checkout --force --progress refs/remotes/pull/${PR_NUMBER}/merge
+ git submodule sync --recursive
+ git submodule update --init --force --recursive
- name: Download Build Artifact
uses: actions/download-artifact@v1
with:
@@ -156,9 +169,12 @@ jobs:
needs: centos-77-build
steps:
- name: Checkout
- uses: actions/checkout@50fbc622fc4ef5163becd7fab6573eac35f8462e
- with:
- submodules: recursive
+ run: |
+ git clone https://github.com/${GITHUB_REPOSITORY} .
+ git fetch -v --prune origin +refs/pull/${PR_NUMBER}/merge:refs/remotes/pull/${PR_NUMBER}/merge
+ git checkout --force --progress refs/remotes/pull/${PR_NUMBER}/merge
+ git submodule sync --recursive
+ git submodule update --init --force --recursive
- name: Download Build Artifact
uses: actions/download-artifact@v1
with:
@@ -176,9 +192,12 @@ jobs:
needs: centos-77-build
steps:
- name: Checkout
- uses: actions/checkout@50fbc622fc4ef5163becd7fab6573eac35f8462e
- with:
- submodules: recursive
+ run: |
+ git clone https://github.com/${GITHUB_REPOSITORY} .
+ git fetch -v --prune origin +refs/pull/${PR_NUMBER}/merge:refs/remotes/pull/${PR_NUMBER}/merge
+ git checkout --force --progress refs/remotes/pull/${PR_NUMBER}/merge
+ git submodule sync --recursive
+ git submodule update --init --force --recursive
- name: Download Build Artifact
uses: actions/download-artifact@v1
with:
@@ -196,12 +215,14 @@ jobs:
if: github.event.pull_request.base.repo.id != github.event.pull_request.head.repo.id
name: Ubuntu 16.04 | Build
runs-on: ubuntu-latest
- needs: start-job
steps:
- name: Checkout
- uses: actions/checkout@50fbc622fc4ef5163becd7fab6573eac35f8462e
- with:
- submodules: recursive
+ run: |
+ git clone https://github.com/${GITHUB_REPOSITORY} .
+ git fetch -v --prune origin +refs/pull/${PR_NUMBER}/merge:refs/remotes/pull/${PR_NUMBER}/merge
+ git checkout --force --progress refs/remotes/pull/${PR_NUMBER}/merge
+ git submodule sync --recursive
+ git submodule update --init --force --recursive
- name: Build
run: |
./.cicd/build.sh
@@ -220,9 +241,12 @@ jobs:
needs: ubuntu-1604-build
steps:
- name: Checkout
- uses: actions/checkout@50fbc622fc4ef5163becd7fab6573eac35f8462e
- with:
- submodules: recursive
+ run: |
+ git clone https://github.com/${GITHUB_REPOSITORY} .
+ git fetch -v --prune origin +refs/pull/${PR_NUMBER}/merge:refs/remotes/pull/${PR_NUMBER}/merge
+ git checkout --force --progress refs/remotes/pull/${PR_NUMBER}/merge
+ git submodule sync --recursive
+ git submodule update --init --force --recursive
- name: Download Build Artifact
uses: actions/download-artifact@v1
with:
@@ -240,9 +264,12 @@ jobs:
needs: ubuntu-1604-build
steps:
- name: Checkout
- uses: actions/checkout@50fbc622fc4ef5163becd7fab6573eac35f8462e
- with:
- submodules: recursive
+ run: |
+ git clone https://github.com/${GITHUB_REPOSITORY} .
+ git fetch -v --prune origin +refs/pull/${PR_NUMBER}/merge:refs/remotes/pull/${PR_NUMBER}/merge
+ git checkout --force --progress refs/remotes/pull/${PR_NUMBER}/merge
+ git submodule sync --recursive
+ git submodule update --init --force --recursive
- name: Download Build Artifact
uses: actions/download-artifact@v1
with:
@@ -260,9 +287,12 @@ jobs:
needs: ubuntu-1604-build
steps:
- name: Checkout
- uses: actions/checkout@50fbc622fc4ef5163becd7fab6573eac35f8462e
- with:
- submodules: recursive
+ run: |
+ git clone https://github.com/${GITHUB_REPOSITORY} .
+ git fetch -v --prune origin +refs/pull/${PR_NUMBER}/merge:refs/remotes/pull/${PR_NUMBER}/merge
+ git checkout --force --progress refs/remotes/pull/${PR_NUMBER}/merge
+ git submodule sync --recursive
+ git submodule update --init --force --recursive
- name: Download Build Artifact
uses: actions/download-artifact@v1
with:
@@ -280,12 +310,14 @@ jobs:
if: github.event.pull_request.base.repo.id != github.event.pull_request.head.repo.id
name: Ubuntu 18.04 | Build
runs-on: ubuntu-latest
- needs: start-job
steps:
- name: Checkout
- uses: actions/checkout@50fbc622fc4ef5163becd7fab6573eac35f8462e
- with:
- submodules: recursive
+ run: |
+ git clone https://github.com/${GITHUB_REPOSITORY} .
+ git fetch -v --prune origin +refs/pull/${PR_NUMBER}/merge:refs/remotes/pull/${PR_NUMBER}/merge
+ git checkout --force --progress refs/remotes/pull/${PR_NUMBER}/merge
+ git submodule sync --recursive
+ git submodule update --init --force --recursive
- name: Build
run: |
./.cicd/build.sh
@@ -304,9 +336,12 @@ jobs:
needs: ubuntu-1804-build
steps:
- name: Checkout
- uses: actions/checkout@50fbc622fc4ef5163becd7fab6573eac35f8462e
- with:
- submodules: recursive
+ run: |
+ git clone https://github.com/${GITHUB_REPOSITORY} .
+ git fetch -v --prune origin +refs/pull/${PR_NUMBER}/merge:refs/remotes/pull/${PR_NUMBER}/merge
+ git checkout --force --progress refs/remotes/pull/${PR_NUMBER}/merge
+ git submodule sync --recursive
+ git submodule update --init --force --recursive
- name: Download Build Artifact
uses: actions/download-artifact@v1
with:
@@ -324,9 +359,12 @@ jobs:
needs: ubuntu-1804-build
steps:
- name: Checkout
- uses: actions/checkout@50fbc622fc4ef5163becd7fab6573eac35f8462e
- with:
- submodules: recursive
+ run: |
+ git clone https://github.com/${GITHUB_REPOSITORY} .
+ git fetch -v --prune origin +refs/pull/${PR_NUMBER}/merge:refs/remotes/pull/${PR_NUMBER}/merge
+ git checkout --force --progress refs/remotes/pull/${PR_NUMBER}/merge
+ git submodule sync --recursive
+ git submodule update --init --force --recursive
- name: Download Build Artifact
uses: actions/download-artifact@v1
with:
@@ -344,9 +382,12 @@ jobs:
needs: ubuntu-1804-build
steps:
- name: Checkout
- uses: actions/checkout@50fbc622fc4ef5163becd7fab6573eac35f8462e
- with:
- submodules: recursive
+ run: |
+ git clone https://github.com/${GITHUB_REPOSITORY} .
+ git fetch -v --prune origin +refs/pull/${PR_NUMBER}/merge:refs/remotes/pull/${PR_NUMBER}/merge
+ git checkout --force --progress refs/remotes/pull/${PR_NUMBER}/merge
+ git submodule sync --recursive
+ git submodule update --init --force --recursive
- name: Download Build Artifact
uses: actions/download-artifact@v1
with:
@@ -364,12 +405,14 @@ jobs:
if: github.event.pull_request.base.repo.id != github.event.pull_request.head.repo.id
name: MacOS 10.15 | Build
runs-on: macos-latest
- needs: start-job
steps:
- name: Checkout
- uses: actions/checkout@50fbc622fc4ef5163becd7fab6573eac35f8462e
- with:
- submodules: recursive
+ run: |
+ git clone https://github.com/${GITHUB_REPOSITORY} .
+ git fetch -v --prune origin +refs/pull/${PR_NUMBER}/merge:refs/remotes/pull/${PR_NUMBER}/merge
+ git checkout --force --progress refs/remotes/pull/${PR_NUMBER}/merge
+ git submodule sync --recursive
+ git submodule update --init --force --recursive
- name: Build
run: |
./.cicd/platforms/unpinned/macos-10.14-unpinned.sh
@@ -386,9 +429,12 @@ jobs:
needs: macos-1015-build
steps:
- name: Checkout
- uses: actions/checkout@50fbc622fc4ef5163becd7fab6573eac35f8462e
- with:
- submodules: recursive
+ run: |
+ git clone https://github.com/${GITHUB_REPOSITORY} .
+ git fetch -v --prune origin +refs/pull/${PR_NUMBER}/merge:refs/remotes/pull/${PR_NUMBER}/merge
+ git checkout --force --progress refs/remotes/pull/${PR_NUMBER}/merge
+ git submodule sync --recursive
+ git submodule update --init --force --recursive
- name: Download Build Artifact
uses: actions/download-artifact@v1
with:
@@ -404,9 +450,12 @@ jobs:
needs: macos-1015-build
steps:
- name: Checkout
- uses: actions/checkout@50fbc622fc4ef5163becd7fab6573eac35f8462e
- with:
- submodules: recursive
+ run: |
+ git clone https://github.com/${GITHUB_REPOSITORY} .
+ git fetch -v --prune origin +refs/pull/${PR_NUMBER}/merge:refs/remotes/pull/${PR_NUMBER}/merge
+ git checkout --force --progress refs/remotes/pull/${PR_NUMBER}/merge
+ git submodule sync --recursive
+ git submodule update --init --force --recursive
- name: Download Build Artifact
uses: actions/download-artifact@v1
with:
@@ -422,9 +471,12 @@ jobs:
needs: macos-1015-build
steps:
- name: Checkout
- uses: actions/checkout@50fbc622fc4ef5163becd7fab6573eac35f8462e
- with:
- submodules: recursive
+ run: |
+ git clone https://github.com/${GITHUB_REPOSITORY} .
+ git fetch -v --prune origin +refs/pull/${PR_NUMBER}/merge:refs/remotes/pull/${PR_NUMBER}/merge
+ git checkout --force --progress refs/remotes/pull/${PR_NUMBER}/merge
+ git submodule sync --recursive
+ git submodule update --init --force --recursive
- name: Download Build Artifact
uses: actions/download-artifact@v1
with:
diff --git a/.gitignore b/.gitignore
index 6dd6c1ca492..4c6f4f1a0f8 100644
--- a/.gitignore
+++ b/.gitignore
@@ -85,3 +85,5 @@ var/lib/node_*
.idea/
*.iws
.DS_Store
+
+!*.swagger.*
diff --git a/CMakeLists.txt b/CMakeLists.txt
index a3c0709b636..57894e10c8b 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -25,7 +25,7 @@ set( CXX_STANDARD_REQUIRED ON)
set(VERSION_MAJOR 2)
set(VERSION_MINOR 0)
-set(VERSION_PATCH 3)
+set(VERSION_PATCH 4)
#set(VERSION_SUFFIX rc3)
if(VERSION_SUFFIX)
@@ -80,9 +80,8 @@ if(CMAKE_SIZEOF_VOID_P EQUAL 8 AND NOT WIN32)
endif()
if(CMAKE_SIZEOF_VOID_P EQUAL 8 AND NOT WIN32)
- list(APPEND EOSIO_WASM_RUNTIMES eos-vm)
if(CMAKE_SYSTEM_PROCESSOR STREQUAL x86_64)
- list(APPEND EOSIO_WASM_RUNTIMES eos-vm-jit)
+ list(APPEND EOSIO_WASM_RUNTIMES eos-vm eos-vm-jit)
endif()
endif()
diff --git a/README.md b/README.md
index a71d2b2b160..0e445ca5360 100644
--- a/README.md
+++ b/README.md
@@ -74,13 +74,13 @@ $ brew remove eosio
#### Ubuntu 18.04 Package Install
```sh
-$ wget https://github.com/eosio/eos/releases/download/v2.0.3/eosio_2.0.3-1-ubuntu-18.04_amd64.deb
-$ sudo apt install ./eosio_2.0.3-1-ubuntu-18.04_amd64.deb
+$ wget https://github.com/eosio/eos/releases/download/v2.0.4/eosio_2.0.4-1-ubuntu-18.04_amd64.deb
+$ sudo apt install ./eosio_2.0.4-1-ubuntu-18.04_amd64.deb
```
#### Ubuntu 16.04 Package Install
```sh
-$ wget https://github.com/eosio/eos/releases/download/v2.0.3/eosio_2.0.3-1-ubuntu-16.04_amd64.deb
-$ sudo apt install ./eosio_2.0.3-1-ubuntu-16.04_amd64.deb
+$ wget https://github.com/eosio/eos/releases/download/v2.0.4/eosio_2.0.4-1-ubuntu-16.04_amd64.deb
+$ sudo apt install ./eosio_2.0.4-1-ubuntu-16.04_amd64.deb
```
#### Ubuntu Package Uninstall
```sh
@@ -91,8 +91,8 @@ $ sudo apt remove eosio
#### RPM Package Install
```sh
-$ wget https://github.com/eosio/eos/releases/download/v2.0.3/eosio-2.0.3-1.el7.x86_64.rpm
-$ sudo yum install ./eosio-2.0.3-1.el7.x86_64.rpm
+$ wget https://github.com/eosio/eos/releases/download/v2.0.4/eosio-2.0.4-1.el7.x86_64.rpm
+$ sudo yum install ./eosio-2.0.4-1.el7.x86_64.rpm
```
#### RPM Package Uninstall
```sh
@@ -126,7 +126,7 @@ To uninstall the EOSIO built/installed binaries and dependencies, run:
## Getting Started
-Instructions detailing the process of getting the software, building it, running a simple test network that produces blocks, account creation and uploading a sample contract to the blockchain can be found in the [Getting Started](https://developers.eos.io/eosio-home/docs) walkthrough.
+Instructions detailing the process of getting the software, building it, running a simple test network that produces blocks, account creation and uploading a sample contract to the blockchain can be found in the [Getting Started](https://developers.eos.io/welcome/latest/getting-started) walkthrough.
## Contributing
diff --git a/docs.json b/docs.json
new file mode 100644
index 00000000000..698c666cecb
--- /dev/null
+++ b/docs.json
@@ -0,0 +1,65 @@
+{
+ "name": "eos",
+ "generators": [
+ {
+ "name": "collate_markdown",
+ "options": {
+ "docs_dir": "docs"
+ }
+ },
+ {
+ "name": "swagger",
+ "options": {
+ "swagger_path": "plugins/chain_api_plugin/chain.swagger.yaml",
+ "swagger_dest_path": "nodeos/plugins/chain_api_plugin/api-reference",
+ "disable_filters": true,
+ "disable_summary_gen": true
+ }
+ },
+ {
+ "name": "swagger",
+ "options": {
+ "swagger_path": "plugins/db_size_api_plugin/db_size.swagger.yaml",
+ "swagger_dest_path": "nodeos/plugins/db_size_api_plugin/api-reference",
+ "disable_filters": true,
+ "disable_summary_gen": true
+ }
+ },
+ {
+ "name": "swagger",
+ "options": {
+ "swagger_path": "plugins/producer_api_plugin/producer.swagger.yaml",
+ "swagger_dest_path": "nodeos/plugins/producer_api_plugin/api-reference",
+ "disable_filters": true,
+ "disable_summary_gen": true
+ }
+ },
+ {
+ "name": "swagger",
+ "options": {
+ "swagger_path": "plugins/net_api_plugin/net.swagger.yaml",
+ "swagger_dest_path": "nodeos/plugins/net_api_plugin/api-reference",
+ "disable_filters": true,
+ "disable_summary_gen": true
+ }
+ },
+ {
+ "name": "swagger",
+ "options": {
+ "swagger_path": "plugins/test_control_api_plugin/test_control.swagger.yaml",
+ "swagger_dest_path": "nodeos/plugins/test_control_api_plugin/api-reference",
+ "disable_filters": true,
+ "disable_summary_gen": true
+ }
+ },
+ {
+ "name": "swagger",
+ "options": {
+ "swagger_path": "plugins/trace_api_plugin/trace_api.swagger.yaml",
+ "swagger_dest_path": "nodeos/plugins/trace_api_plugin/api-reference",
+ "disable_filters": true,
+ "disable_summary_gen": true
+ }
+ }
+ ]
+}
diff --git a/docs/00_install/00_install-prebuilt-binaries.md b/docs/00_install/00_install-prebuilt-binaries.md
index ae121a22fb5..9c62df641ed 100644
--- a/docs/00_install/00_install-prebuilt-binaries.md
+++ b/docs/00_install/00_install-prebuilt-binaries.md
@@ -25,13 +25,13 @@ brew remove eosio
#### Ubuntu 18.04 Package Install
```sh
-wget https://github.com/eosio/eos/releases/download/v2.0.3/eosio_2.0.3-1-ubuntu-18.04_amd64.deb
-sudo apt install ./eosio_2.0.3-1-ubuntu-18.04_amd64.deb
+wget https://github.com/eosio/eos/releases/download/v2.0.4/eosio_2.0.4-1-ubuntu-18.04_amd64.deb
+sudo apt install ./eosio_2.0.4-1-ubuntu-18.04_amd64.deb
```
#### Ubuntu 16.04 Package Install
```sh
-wget https://github.com/eosio/eos/releases/download/v2.0.3/eosio_2.0.3-1-ubuntu-16.04_amd64.deb
-sudo apt install ./eosio_2.0.3-1-ubuntu-16.04_amd64.deb
+wget https://github.com/eosio/eos/releases/download/v2.0.4/eosio_2.0.4-1-ubuntu-16.04_amd64.deb
+sudo apt install ./eosio_2.0.4-1-ubuntu-16.04_amd64.deb
```
#### Ubuntu Package Uninstall
```sh
@@ -42,8 +42,8 @@ sudo apt remove eosio
#### RPM Package Install
```sh
-wget https://github.com/eosio/eos/releases/download/v2.0.3/eosio-2.0.3-1.el7.x86_64.rpm
-sudo yum install ./eosio-2.0.3-1.el7.x86_64.rpm
+wget https://github.com/eosio/eos/releases/download/v2.0.4/eosio-2.0.4-1.el7.x86_64.rpm
+sudo yum install ./eosio-2.0.4-1.el7.x86_64.rpm
```
#### RPM Package Uninstall
```sh
diff --git a/docs/01_nodeos/02_usage/00_nodeos-options.md b/docs/01_nodeos/02_usage/00_nodeos-options.md
index 3d4d0a79300..b2ab1561c33 100644
--- a/docs/01_nodeos/02_usage/00_nodeos-options.md
+++ b/docs/01_nodeos/02_usage/00_nodeos-options.md
@@ -30,422 +30,6 @@ Application Command Line Options:
## Plugin-specific Options
-Plugin-specific options control the behavior of the nodeos plugins. Every plugin-specific option has a unique name, so it can be specified in any order within the command line or `config.ini` file. When specifying one or more plugin-specific option(s), the applicable plugin(s) must also be enabled using the `--plugin` option or else the corresponding option(s) will be ignored. A sample output from running `nodeos --help` is displayed below, showing an excerpt from the plugin-specific options:
-
-```console
-Config Options for eosio::chain_plugin:
- --blocks-dir arg (="blocks") the location of the blocks directory
- (absolute path or relative to
- application data dir)
- --protocol-features-dir arg (="protocol_features")
- the location of the protocol_features
- directory (absolute path or relative to
- application config dir)
- --checkpoint arg Pairs of [BLOCK_NUM,BLOCK_ID] that
- should be enforced as checkpoints.
- --wasm-runtime runtime Override default WASM runtime
- --abi-serializer-max-time-ms arg (=15000)
- Override default maximum ABI
- serialization time allowed in ms
- --chain-state-db-size-mb arg (=1024) Maximum size (in MiB) of the chain
- state database
- --chain-state-db-guard-size-mb arg (=128)
- Safely shut down node when free space
- remaining in the chain state database
- drops below this size (in MiB).
- --reversible-blocks-db-size-mb arg (=340)
- Maximum size (in MiB) of the reversible
- blocks database
- --reversible-blocks-db-guard-size-mb arg (=2)
- Safely shut down node when free space
- remaining in the reverseible blocks
- database drops below this size (in
- MiB).
- --signature-cpu-billable-pct arg (=50)
- Percentage of actual signature recovery
- cpu to bill. Whole number percentages,
- e.g. 50 for 50%
- --chain-threads arg (=2) Number of worker threads in controller
- thread pool
- --contracts-console print contract's output to console
- --actor-whitelist arg Account added to actor whitelist (may
- specify multiple times)
- --actor-blacklist arg Account added to actor blacklist (may
- specify multiple times)
- --contract-whitelist arg Contract account added to contract
- whitelist (may specify multiple times)
- --contract-blacklist arg Contract account added to contract
- blacklist (may specify multiple times)
- --action-blacklist arg Action (in the form code::action) added
- to action blacklist (may specify
- multiple times)
- --key-blacklist arg Public key added to blacklist of keys
- that should not be included in
- authorities (may specify multiple
- times)
- --sender-bypass-whiteblacklist arg Deferred transactions sent by accounts
- in this list do not have any of the
- subjective whitelist/blacklist checks
- applied to them (may specify multiple
- times)
- --read-mode arg (=speculative) Database read mode ("speculative",
- "head", "read-only", "irreversible").
- In "speculative" mode database contains
- changes done up to the head block plus
- changes made by transactions not yet
- included to the blockchain.
- In "head" mode database contains
- changes done up to the current head
- block.
- In "read-only" mode database contains
- changes done up to the current head
- block and transactions cannot be pushed
- to the chain API.
- In "irreversible" mode database
- contains changes done up to the last
- irreversible block and transactions
- cannot be pushed to the chain API.
-
- --validation-mode arg (=full) Chain validation mode ("full" or
- "light").
- In "full" mode all incoming blocks will
- be fully validated.
- In "light" mode all incoming blocks
- headers will be fully validated;
- transactions in those validated blocks
- will be trusted
-
- --disable-ram-billing-notify-checks Disable the check which subjectively
- fails a transaction if a contract bills
- more RAM to another account within the
- context of a notification handler (i.e.
- when the receiver is not the code of
- the action).
- --maximum-variable-signature-length arg (=16384)
- Subjectively limit the maximum length
- of variable components in a variable
- legnth signature to this size in bytes
- --trusted-producer arg Indicate a producer whose blocks
- headers signed by it will be fully
- validated, but transactions in those
- validated blocks will be trusted.
- --database-map-mode arg (=mapped) Database map mode ("mapped", "heap", or
- "locked").
- In "mapped" mode database is memory
- mapped as a file.
- In "heap" mode database is preloaded in
- to swappable memory.
- In "locked" mode database is preloaded
- and locked in to memory.
-
-
-Command Line Options for eosio::chain_plugin:
- --genesis-json arg File to read Genesis State from
- --genesis-timestamp arg override the initial timestamp in the
- Genesis State file
- --print-genesis-json extract genesis_state from blocks.log
- as JSON, print to console, and exit
- --extract-genesis-json arg extract genesis_state from blocks.log
- as JSON, write into specified file, and
- exit
- --print-build-info print build environment information to
- console as JSON and exit
- --extract-build-info arg extract build environment information
- as JSON, write into specified file, and
- exit
- --fix-reversible-blocks recovers reversible block database if
- that database is in a bad state
- --force-all-checks do not skip any checks that can be
- skipped while replaying irreversible
- blocks
- --disable-replay-opts disable optimizations that specifically
- target replay
- --replay-blockchain clear chain state database and replay
- all blocks
- --hard-replay-blockchain clear chain state database, recover as
- many blocks as possible from the block
- log, and then replay those blocks
- --delete-all-blocks clear chain state database and block
- log
- --truncate-at-block arg (=0) stop hard replay / block log recovery
- at this block number (if set to
- non-zero number)
- --import-reversible-blocks arg replace reversible block database with
- blocks imported from specified file and
- then exit
- --export-reversible-blocks arg export reversible block database in
- portable format into specified file and
- then exit
- --snapshot arg File to read Snapshot State from
-
-Config Options for eosio::history_plugin:
- -f [ --filter-on ] arg Track actions which match
- receiver:action:actor. Actor may be
- blank to include all. Action and Actor
- both blank allows all from Recieiver.
- Receiver may not be blank.
- -F [ --filter-out ] arg Do not track actions which match
- receiver:action:actor. Action and Actor
- both blank excludes all from Reciever.
- Actor blank excludes all from
- reciever:action. Receiver may not be
- blank.
-
-Config Options for eosio::http_client_plugin:
- --https-client-root-cert arg PEM encoded trusted root certificate
- (or path to file containing one) used
- to validate any TLS connections made.
- (may specify multiple times)
-
- --https-client-validate-peers arg (=1)
- true: validate that the peer
- certificates are valid and trusted,
- false: ignore cert errors
-
-Config Options for eosio::http_plugin:
- --unix-socket-path arg The filename (relative to data-dir) to
- create a unix socket for HTTP RPC; set
- blank to disable.
- --http-server-address arg (=127.0.0.1:8888)
- The local IP and port to listen for
- incoming http connections; set blank to
- disable.
- --https-server-address arg The local IP and port to listen for
- incoming https connections; leave blank
- to disable.
- --https-certificate-chain-file arg Filename with the certificate chain to
- present on https connections. PEM
- format. Required for https.
- --https-private-key-file arg Filename with https private key in PEM
- format. Required for https
- --https-ecdh-curve arg (=secp384r1) Configure https ECDH curve to use:
- secp384r1 or prime256v1
- --access-control-allow-origin arg Specify the Access-Control-Allow-Origin
- to be returned on each request.
- --access-control-allow-headers arg Specify the Access-Control-Allow-Header
- s to be returned on each request.
- --access-control-max-age arg Specify the Access-Control-Max-Age to
- be returned on each request.
- --access-control-allow-credentials Specify if Access-Control-Allow-Credent
- ials: true should be returned on each
- request.
- --max-body-size arg (=1048576) The maximum body size in bytes allowed
- for incoming RPC requests
- --http-max-bytes-in-flight-mb arg (=500)
- Maximum size in megabytes http_plugin
- should use for processing http
- requests. 503 error response when
- exceeded.
- --verbose-http-errors Append the error log to HTTP responses
- --http-validate-host arg (=1) If set to false, then any incoming
- "Host" header is considered valid
- --http-alias arg Additionaly acceptable values for the
- "Host" header of incoming HTTP
- requests, can be specified multiple
- times. Includes http/s_server_address
- by default.
- --http-threads arg (=2) Number of worker threads in http thread
- pool
-
-Config Options for eosio::login_plugin:
- --max-login-requests arg (=1000000) The maximum number of pending login
- requests
- --max-login-timeout arg (=60) The maximum timeout for pending login
- requests (in seconds)
-
-Config Options for eosio::net_plugin:
- --p2p-listen-endpoint arg (=0.0.0.0:9876)
- The actual host:port used to listen for
- incoming p2p connections.
- --p2p-server-address arg An externally accessible host:port for
- identifying this node. Defaults to
- p2p-listen-endpoint.
- --p2p-peer-address arg The public endpoint of a peer node to
- connect to. Use multiple
- p2p-peer-address options as needed to
- compose a network.
- Syntax: host:port[:|]
- The optional 'trx' and 'blk'
- indicates to node that only
- transactions 'trx' or blocks 'blk'
- should be sent. Examples:
- p2p.eos.io:9876
- p2p.trx.eos.io:9876:trx
- p2p.blk.eos.io:9876:blk
-
- --p2p-max-nodes-per-host arg (=1) Maximum number of client nodes from any
- single IP address
- --agent-name arg (="EOS Test Agent") The name supplied to identify this node
- amongst the peers.
- --allowed-connection arg (=any) Can be 'any' or 'producers' or
- 'specified' or 'none'. If 'specified',
- peer-key must be specified at least
- once. If only 'producers', peer-key is
- not required. 'producers' and
- 'specified' may be combined.
- --peer-key arg Optional public key of peer allowed to
- connect. May be used multiple times.
- --peer-private-key arg Tuple of [PublicKey, WIF private key]
- (may specify multiple times)
- --max-clients arg (=25) Maximum number of clients from which
- connections are accepted, use 0 for no
- limit
- --connection-cleanup-period arg (=30) number of seconds to wait before
- cleaning up dead connections
- --max-cleanup-time-msec arg (=10) max connection cleanup time per cleanup
- call in millisec
- --net-threads arg (=2) Number of worker threads in net_plugin
- thread pool
- --sync-fetch-span arg (=100) number of blocks to retrieve in a chunk
- from any individual peer during
- synchronization
- --use-socket-read-watermark arg (=0) Enable expirimental socket read
- watermark optimization
- --peer-log-format arg (=["${_name}" ${_ip}:${_port}])
- The string used to format peers when
- logging messages about them. Variables
- are escaped with ${}.
- Available Variables:
- _name self-reported name
-
- _id self-reported ID (64 hex
- characters)
-
- _sid first 8 characters of
- _peer.id
-
- _ip remote IP address of peer
-
- _port remote port number of peer
-
- _lip local IP address connected to
- peer
-
- _lport local port number connected
- to peer
-
-
-
-Config Options for eosio::producer_plugin:
-
- -e [ --enable-stale-production ] Enable block production, even if the
- chain is stale.
- -x [ --pause-on-startup ] Start this node in a state where
- production is paused
- --max-transaction-time arg (=30) Limits the maximum time (in
- milliseconds) that is allowed a pushed
- transaction's code to execute before
- being considered invalid
- --max-irreversible-block-age arg (=-1)
- Limits the maximum age (in seconds) of
- the DPOS Irreversible Block for a chain
- this node will produce blocks on (use
- negative value to indicate unlimited)
- -p [ --producer-name ] arg ID of producer controlled by this node
- (e.g. inita; may specify multiple
- times)
- --private-key arg (DEPRECATED - Use signature-provider
- instead) Tuple of [public key, WIF
- private key] (may specify multiple
- times)
- --signature-provider arg (=EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV=KEY:5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3)
- Key=Value pairs in the form
- =
- Where:
- is a string form of
- a vaild EOSIO public
- key
-
- is a string in the
- form
- :
-
- is KEY, or KEOSD
-
- KEY: is a string form of
- a valid EOSIO
- private key which
- maps to the provided
- public key
-
- KEOSD: is the URL where
- keosd is available
- and the approptiate
- wallet(s) are
- unlocked
- --keosd-provider-timeout arg (=5) Limits the maximum time (in
- milliseconds) that is allowed for
- sending blocks to a keosd provider for
- signing
- --greylist-account arg account that can not access to extended
- CPU/NET virtual resources
- --greylist-limit arg (=1000) Limit (between 1 and 1000) on the
- multiple that CPU/NET virtual resources
- can extend during low usage (only
- enforced subjectively; use 1000 to not
- enforce any limit)
- --produce-time-offset-us arg (=0) offset of non last block producing time
- in microseconds. Negative number
- results in blocks to go out sooner, and
- positive number results in blocks to go
- out later
- --last-block-time-offset-us arg (=0) offset of last block producing time in
- microseconds. Negative number results
- in blocks to go out sooner, and
- positive number results in blocks to go
- out later
- --max-scheduled-transaction-time-per-block-ms arg (=100)
- Maximum wall-clock time, in
- milliseconds, spent retiring scheduled
- transactions in any block before
- returning to normal transaction
- processing.
- --subjective-cpu-leeway-us arg (=31000)
- Time in microseconds allowed for a
- transaction that starts with
- insufficient CPU quota to complete and
- cover its CPU usage.
- --incoming-defer-ratio arg (=1) ratio between incoming transations and
- deferred transactions when both are
- exhausted
- --incoming-transaction-queue-size-mb arg (=1024)
- Maximum size (in MiB) of the incoming
- transaction queue. Exceeding this value
- will subjectively drop transaction with
- resource exhaustion.
- --producer-threads arg (=2) Number of worker threads in producer
- thread pool
- --snapshots-dir arg (="snapshots") the location of the snapshots directory
- (absolute path or relative to
- application data dir)
-
-Config Options for eosio::state_history_plugin:
- --state-history-dir arg (="state-history")
- the location of the state-history
- directory (absolute path or relative to
- application data dir)
- --trace-history enable trace history
- --chain-state-history enable chain state history
- --state-history-endpoint arg (=127.0.0.1:8080)
- the endpoint upon which to listen for
- incoming connections. Caution: only
- expose this port to your internal
- network.
- --trace-history-debug-mode enable debug mode for trace history
-
-Command Line Options for eosio::state_history_plugin:
- --delete-state-history clear state history files
-
-Config Options for eosio::txn_test_gen_plugin:
- --txn-reference-block-lag arg (=0) Lag in number of blocks from the head
- block when selecting the reference
- block for transactions (-1 means Last
- Irreversible Block)
- --txn-test-gen-threads arg (=2) Number of worker threads in
- txn_test_gen thread pool
- --txn-test-gen-account-prefix arg (=txn.test.)
- Prefix to use for accounts generated
- and used by this plugin
-```
+Plugin-specific options control the behavior of the nodeos plugins. Every plugin-specific option has a unique name, so it can be specified in any order within the command line or `config.ini` file. When specifying one or more plugin-specific option(s), the applicable plugin(s) must also be enabled using the `--plugin` option or else the corresponding option(s) will be ignored.
For more information on each plugin-specific option, just visit the [Plugins](../03_plugins/index.md) section.
diff --git a/docs/01_nodeos/03_plugins/chain_api_plugin/api-reference/index.md b/docs/01_nodeos/03_plugins/chain_api_plugin/api-reference/index.md
new file mode 100644
index 00000000000..6451c708686
--- /dev/null
+++ b/docs/01_nodeos/03_plugins/chain_api_plugin/api-reference/index.md
@@ -0,0 +1 @@
+
diff --git a/docs/01_nodeos/03_plugins/chain_plugin/index.md b/docs/01_nodeos/03_plugins/chain_plugin/index.md
index c9934991125..b820df3775f 100644
--- a/docs/01_nodeos/03_plugins/chain_plugin/index.md
+++ b/docs/01_nodeos/03_plugins/chain_plugin/index.md
@@ -73,7 +73,8 @@ Config Options for eosio::chain_plugin:
application config dir)
--checkpoint arg Pairs of [BLOCK_NUM,BLOCK_ID] that
should be enforced as checkpoints.
- --wasm-runtime wavm/wabt Override default WASM runtime
+ --wasm-runtime eos-vm|eos-vm-jit Override default WASM runtime (wabt)
+ --eos-vm-oc-enable Enable optimized compilation in WASM
--abi-serializer-max-time-ms arg (=15000)
Override default maximum ABI
serialization time allowed in ms
diff --git a/docs/01_nodeos/03_plugins/db_size_api_plugin/api-reference/index.md b/docs/01_nodeos/03_plugins/db_size_api_plugin/api-reference/index.md
new file mode 100644
index 00000000000..6451c708686
--- /dev/null
+++ b/docs/01_nodeos/03_plugins/db_size_api_plugin/api-reference/index.md
@@ -0,0 +1 @@
+
diff --git a/docs/01_nodeos/03_plugins/net_api_plugin/api-reference/index.md b/docs/01_nodeos/03_plugins/net_api_plugin/api-reference/index.md
new file mode 100644
index 00000000000..6451c708686
--- /dev/null
+++ b/docs/01_nodeos/03_plugins/net_api_plugin/api-reference/index.md
@@ -0,0 +1 @@
+
diff --git a/docs/01_nodeos/03_plugins/producer_api_plugin/api-reference/index.md b/docs/01_nodeos/03_plugins/producer_api_plugin/api-reference/index.md
new file mode 100644
index 00000000000..6451c708686
--- /dev/null
+++ b/docs/01_nodeos/03_plugins/producer_api_plugin/api-reference/index.md
@@ -0,0 +1 @@
+
diff --git a/docs/01_nodeos/03_plugins/producer_plugin/10_block-producing-explained.md b/docs/01_nodeos/03_plugins/producer_plugin/10_block-producing-explained.md
new file mode 100644
index 00000000000..1c3d56ef14a
--- /dev/null
+++ b/docs/01_nodeos/03_plugins/producer_plugin/10_block-producing-explained.md
@@ -0,0 +1,90 @@
+---
+content_title: Block Production Explained
+---
+
+For simplicity of the explanation let's consider the following notations:
+
+m = max_block_cpu_usage
+
+t = block-time
+
+e = last-block-cpu-effort-percent
+
+w = block_time_interval = 500ms
+
+a = produce-block-early-amount = (w - w*e/100) ms
+
+p = produce-block-time; p = t - a
+
+c = billed_cpu_in_block = minimum(m, w - a)
+
+n = network tcp/ip latency
+
+peer validation for similar hardware/eosio-version/config will be <= m
+
+**Let's consider for exemplification the following four BPs and their network topology as depicted in below diagram**
+
+
+```dot-svg
+#p2p_local_chain_prunning.dot - local chain prunning
+#
+#notes: * to see image copy/paste to https://dreampuf.github.io/GraphvizOnline
+# * image will be rendered by gatsby-remark-graphviz plugin in eosio docs.
+
+digraph {
+ newrank=true #allows ranks inside subgraphs (important!)
+ compound=true #allows edges connecting nodes with subgraphs
+ graph [rankdir=LR]
+ node [style=filled, fillcolor=lightgray, shape=square, fixedsize=true, width=.55, fontsize=10]
+ edge [dir=both, arrowsize=.6, weight=100]
+ splines=false
+
+ subgraph cluster_chain {
+ label="Block Producers Peers"; labelloc="b"
+ graph [color=invis]
+ b0 [label="...", color=invis, style=""]
+ b1 [label="BP-A"]; b2 [label="BP-A\nPeer"]; b3 [label="BP-B\nPeer"]; b4 [label="BP-B"]
+ b5 [label="...", color=invis, style=""]
+ b0 -> b1 -> b2 -> b3 -> b4 -> b5
+ } //cluster_chain
+
+} //digraph
+```
+
+`BP-A` will send block at `p` and,
+
+`BP-B` needs block at time `t` or otherwise will drop it.
+
+If `BP-A`is producing 12 blocks as follows `b(lock) at t(ime) 1`, `bt 1.5`, `bt 2`, `bt 2.5`, `bt 3`, `bt 3.5`, `bt 4`, `bt 4.5`, `bt 5`, `bt 5.5`, `bt 6`, `bt 6.5` then `BP-B` needs `bt 6.5` by time `6.5` so it has `.5` to produce `bt 7`.
+
+Please notice that the time of `bt 7` minus `.5` equals the time of `bt 6.5` therefore time `t` is the last block time of `BP-A` and when `BP-B` needs to start its first block.
+
+## Example 1
+`BP-A` has 50% e, m = 200ms, c = 200ms, n = 0ms, a = 250ms:
+`BP-A` sends at (t-250ms) <-> `BP-A-Peer` processes for 200ms and sends at (t - 50ms) <-> `BP-B-Peer` processes for 200ms and sends at (t + 150ms) <-> arrive at `BP-B` 150ms too late.
+
+## Example 2
+`BP-A` has 40% e and m = 200ms, c = 200ms, n = 0ms, a = 300ms:
+(t-300ms) <-> (+200ms) <-> (+200ms) <-> arrive at `BP-B` 100ms too late.
+
+## Example 3
+`BP-A` has 30% e and m = 200ms, c = 150ms, n = 0ms, a = 350ms:
+(t-350ms) <-> (+150ms) <-> (+150ms) <-> arrive at `BP-B` with 50ms to spare.
+
+## Example 4
+`BP-A` has 25% e and m = 200ms, c = 125ms, n = 0ms, a = 375ms:
+(t-375ms) <-> (+125ms) <-> (+125ms) <-> arrive at `BP-B` with 125ms to spare.
+
+## Example 5
+`BP-A` has 10% e and m = 200ms, c = 50ms, n = 0ms, a = 450ms:
+(t-450ms) <-> (+50ms) <-> (+50ms) <-> arrive at `BP-B` with 350ms to spare.
+
+## Example 6
+`BP-A` has 10% e and m = 200ms, c = 50ms, n = 15ms, a = 450ms:
+(t-450ms) <- +15ms -> (+50ms) <- +15ms -> (+50ms) <- +15ms -> `BP-B` <-> arrive with 305ms to spare.
+
+## Example 7
+Example world-wide network:`BP-A`has 10% e and m = 200ms, c = 50ms, n = 15ms/250ms, a = 450ms:
+(t-450ms) <- +15ms -> (+50ms) <- +250ms -> (+50ms) <- +15ms -> `BP-B` <-> arrive with 70ms to spare.
+
+Running wasm-runtime=eos-vm-jit eos-vm-oc-enable on relay node will reduce the validation time.
diff --git a/docs/01_nodeos/03_plugins/producer_plugin/index.md b/docs/01_nodeos/03_plugins/producer_plugin/index.md
index 8a50fbeb23d..23204885236 100644
--- a/docs/01_nodeos/03_plugins/producer_plugin/index.md
+++ b/docs/01_nodeos/03_plugins/producer_plugin/index.md
@@ -5,7 +5,7 @@
The `producer_plugin` loads functionality required for a node to produce blocks.
[[info]]
-| Additional configuration is required to produce blocks. Please read [Configuring Block Producing Node](https://developers.eos.io/eosio-nodeos/docs/environment-producing-node).
+| Additional configuration is required to produce blocks. Please read [Configuring Block Producing Node](../../02_usage/02_node-setups/00_producing-node.md).
## Usage
@@ -38,6 +38,14 @@ Config Options for eosio::producer_plugin:
the DPOS Irreversible Block for a chain
this node will produce blocks on (use
negative value to indicate unlimited)
+ --max-block-cpu-usage-threshold-us Threshold of CPU block production to
+ consider block full; when within threshold
+ of max-block-cpu-usage block can be
+ produced immediately. Default value 5000
+ --max-block-net-usage-threshold-bytes Threshold of NET block production to
+ consider block full; when within threshold
+ of max-block-net-usage block can be produced
+ immediately. Default value 1024
-p [ --producer-name ] arg ID of producer controlled by this node
(e.g. inita; may specify multiple
times)
@@ -92,9 +100,10 @@ Config Options for eosio::producer_plugin:
transactions in any block before
returning to normal transaction
processing.
- --incoming-defer-ratio arg (=1) ratio between incoming transations and
+ --incoming-defer-ratio arg (=1) ratio between incoming transactions and
deferred transactions when both are
- exhausted
+ queued for execution
+
--producer-threads arg (=2) Number of worker threads in producer
thread pool
--snapshots-dir arg (="snapshots") the location of the snapshots directory
@@ -106,6 +115,21 @@ Config Options for eosio::producer_plugin:
* [`chain_plugin`](../chain_plugin/index.md)
+## The priority of transaction
+
+You can give one of the transaction types priority over another when the producer plugin has a queue of transactions pending.
+
+The option below sets the ratio between the incoming transaction and the deferred transaction:
+
+```console
+ --incoming-defer-ratio arg (=1)
+```
+
+By default value of `1`, the `producer` plugin processes one incoming transaction per deferred transaction. When `arg` sets to `10`, the `producer` plugin processes 10 incoming transactions per deferred transaction.
+
+If the `arg` is set to a sufficiently large number, the plugin always processes the incoming transaction first until the queue of the incoming transactions is empty. Respectively, if the `arg` is 0, the `producer` plugin processes the deferred transactions queue first.
+
+
### Load Dependency Examples
```console
@@ -116,3 +140,5 @@ plugin = eosio::chain_plugin [operations] [options]
# command-line
nodeos ... --plugin eosio::chain_plugin [operations] [options]
```
+
+For details about how blocks are produced please read the following [block producing explainer](10_block-producing-explained.md).
diff --git a/docs/01_nodeos/03_plugins/test_control_api_plugin/api-reference/index.md b/docs/01_nodeos/03_plugins/test_control_api_plugin/api-reference/index.md
new file mode 100644
index 00000000000..6451c708686
--- /dev/null
+++ b/docs/01_nodeos/03_plugins/test_control_api_plugin/api-reference/index.md
@@ -0,0 +1 @@
+
diff --git a/docs/01_nodeos/03_plugins/trace_api_plugin/api-reference/index.md b/docs/01_nodeos/03_plugins/trace_api_plugin/api-reference/index.md
new file mode 100644
index 00000000000..6451c708686
--- /dev/null
+++ b/docs/01_nodeos/03_plugins/trace_api_plugin/api-reference/index.md
@@ -0,0 +1 @@
+
diff --git a/docs/01_nodeos/03_plugins/trace_api_plugin/index.md b/docs/01_nodeos/03_plugins/trace_api_plugin/index.md
new file mode 100644
index 00000000000..18deaa23ce7
--- /dev/null
+++ b/docs/01_nodeos/03_plugins/trace_api_plugin/index.md
@@ -0,0 +1,121 @@
+# trace_api_plugin
+
+## Description
+
+The `trace_api_plugin` provides a consumer-focused long-term API for retrieving retired actions and related metadata from a specified block. The plugin defines a new HTTP endpoint accessible (see the [API reference](api-reference/index.md) for more information).
+
+## Usage
+
+```console
+# config.ini
+plugin = eosio::trace_api_plugin
+[options]
+```
+```sh
+# command-line
+nodeos ... --plugin eosio::trace_api_plugin [options]
+```
+
+## Options
+
+These can be specified from both the `nodeos` command-line or the `config.ini` file:
+
+```console
+Config Options for eosio::trace_api_plugin:
+
+ --trace-dir (="traces") the location of the trace directory
+ (absolute path or relative to
+ application data dir)
+ --trace-slice-stride (=10000) the number of blocks each "slice" of
+ trace data will contain on the
+ filesystem
+ --trace-minimum-irreversible-history-blocks (=-1)
+ Number of blocks to ensure are kept
+ past LIB for retrieval before "slice"
+ files can be automatically removed.
+ A value of -1 indicates that automatic
+ removal of "slice" files will be
+ turned off.
+ --trace-rpc-abi ABIs used when decoding trace RPC
+ responses.
+ There must be at least one ABI
+ specified OR the flag trace-no-abis
+ must be used.
+ ABIs are specified as "Key=Value" pairs
+ in the form =
+ Where can be:
+ an absolute path to a file
+ containing a valid JSON-encoded ABI
+ a relative path from `data-dir` to a
+ file containing valid JSON-encoded ABI.
+ --trace-no-abis Use to indicate that the RPC responses
+ will not use ABIs.
+ Failure to specify this option when
+ there are no trace-rpc-abi
+ configurations will result in an Error.
+ This option is mutually exclusive with
+ trace-rpc-api
+```
+
+## Dependencies
+
+* [`chain_plugin`](../chain_plugin/index.md)
+* [`http_plugin`](../http_plugin/index.md)
+
+### Load Dependency Examples
+
+The following plugins are loaded with default settings if not specified on the command line or `config.ini`:
+
+```console
+# config.ini
+plugin = eosio::chain_plugin
+[options]
+plugin = eosio::http_plugin
+[options]
+```
+```sh
+# command-line
+nodeos ... --plugin eosio::chain_plugin [options] \
+ --plugin eosio::http_plugin [options]
+```
+
+## Purpose
+
+While integrating applications such as block explorers and exchanges with an EOSIO blockchain, the user might require a complete transcript of actions that are processed by the blockchain, including those spawned from the execution of smart contracts and scheduled transactions. The `trace_api_plugin` aims to serve this need. The purpose of the plugin is to provide:
+
+* A transcript of retired actions and related metadata
+* A consumer focused long-term API to retrieve blocks
+* Maintainable resource commitments at the EOSIO nodes
+
+Therefore, one crucial goal of the `trace_api_plugin` is to have better maintenance of node resources (file system, disk, memory, etc.). This goal is different from the existing `history_plugin` which provides far more configurable filtering and querying capabilities, or the existing `state_history_plugin` which provides a binary streaming interface to access structural chain data, action data, as well as state deltas.
+
+## Examples
+
+Below it is a `nodeos` configuration example for the `trace_api_plugin` when tracing some EOSIO reference contracts:
+
+```sh
+nodeos --data-dir data_dir --config-dir config_dir --trace-dir traces_dir
+--plugin eosio::trace_api_plugin
+--trace-rpc-abi=eosio=abis/eosio.abi
+--trace-rpc-abi=eosio.token=abis/eosio.token.abi
+--trace-rpc-abi=eosio.msig=abis/eosio.msig.abi
+--trace-rpc-abi=eosio.wrap=abis/eosio.wrap.abi
+```
+
+## Maintenance Note
+
+To reduce the disk space consummed by the `trace_api_plugin`, you can configure the following option:
+
+```console
+ --trace-minimum-irreversible-history-blocks N (=-1)
+```
+
+Once the value is no longer `-1`, only `N` number of blocks before the current LIB block will be kept on disk.
+
+If resource usage cannot be effectively managed via the `trace-minimum-irreversible-history-blocks` configuration option, then there might be a need for ongoing maintenance. In that case, the user may prefer to manage resources with an external system or process.
+
+### Manual Filesystem Management
+
+The `trace-dir` configuration option defines a location on the filesystem where all artefacts created by the `trace_api_plugin` are stored. These files are stable once the LIB block has progressed past that slice and then can be deleted at any time to reclaim filesystem space. The conventions regarding these files are to-be-determined. However, the remainder of the system will tolerate any out-of-process management system that removes some or all of these files in this directory regardless of what data they represent, or whether there is a running `nodeos` instance accessing them or not. Data which would nominally be available, but is no longer so due to manual maintenance, will result in a HTTP 404 response from the appropriate API endpoint(s).
+
+In conjunction with the `trace-minimum-irreversible-history-blocks=-1` option, administrators can take full control over the lifetime of the data available via the `trace-api-plugin` and the associated filesystem resources.
diff --git a/docs/01_nodeos/04_replays/how-to-generate-a-snapshot.md b/docs/01_nodeos/04_replays/how-to-generate-a-snapshot.md
index b9ecb8e9bf8..09f44db9c92 100644
--- a/docs/01_nodeos/04_replays/how-to-generate-a-snapshot.md
+++ b/docs/01_nodeos/04_replays/how-to-generate-a-snapshot.md
@@ -15,5 +15,5 @@ curl --request POST \
--header 'content-type: application/x-www-form-urlencoded; charset=UTF-8'
```
-[[info | Other `blocks.log` files]]
+[[info | Getting other `blocks.log` files]]
| You can also download a `blocks.log` file from third party providers.
diff --git a/docs/01_nodeos/08_troubleshooting/index.md b/docs/01_nodeos/08_troubleshooting/index.md
index 77462fb4bd6..bb4a0d7edcb 100644
--- a/docs/01_nodeos/08_troubleshooting/index.md
+++ b/docs/01_nodeos/08_troubleshooting/index.md
@@ -43,3 +43,7 @@ To focus only on the version line within the block:
```sh
cleos --url http://localhost:8888 get info | grep server_version
```
+
+### Error 3070000: WASM Exception Error
+
+If you try to deploy the `eosio.bios` contract or `eosio.system` contract in an attempt to boot an EOSIO-based blockchain and you get the following error or similar: `Publishing contract... Error 3070000: WASM Exception Error Details: env.set_proposed_producers_ex unresolveable`, it is because you have to activate the `PREACTIVATE_FEATURE` protocol first. More details about it and how to enable it can be found in the [Bios Boot Sequence Tutorial](https://developers.eos.io/welcome/latest/tutorials/bios-boot-sequence/#112-set-the-eosiosystem-contract). For more information, you may also visit the [Nodeos Upgrade Guides](https://developers.eos.io/manuals/eos/latest/nodeos/upgrade-guides/).
diff --git a/docs/02_cleos/03_command-reference/create/account.md b/docs/02_cleos/03_command-reference/create/account.md
index 4a1edc32c46..976c986f561 100755
--- a/docs/02_cleos/03_command-reference/create/account.md
+++ b/docs/02_cleos/03_command-reference/create/account.md
@@ -30,7 +30,7 @@ Options:
```
## Command
-A set of EOS keys is required to create an account. A set of EOS keys can be generated by using `cleos create key`.
+A set of EOSIO keys is required to create an account. The EOSIO keys can be generated by using `cleos create key`.
```sh
cleos create account inita tester EOS4toFS3YXEQCkuuw1aqDLrtHim86Gz9u3hBdcBw5KNPZcursVHq EOS7d9A3uLe6As66jzN8j44TXJUqJSK3bFjjEEqR4oTvNAB3iM9SA
diff --git a/docs/02_cleos/03_command-reference/net/status.md b/docs/02_cleos/03_command-reference/net/status.md
index d24f42ef07c..318e21ce35b 100755
--- a/docs/02_cleos/03_command-reference/net/status.md
+++ b/docs/02_cleos/03_command-reference/net/status.md
@@ -14,3 +14,18 @@ Usage: cleos net status host
Positionals:
host TEXT The hostname:port to query status of connection
```
+
+Given, a valid, existing `hostname:port` parameter the above command returns a json response looking similar to the one below:
+
+```
+{
+ "peer": "hostname:port",
+ "connecting": false/true,
+ "syncing": false/true,
+ "last_handshake": {
+ ...
+ }
+}
+```
+
+The `last_handshake` structure is explained in detail in the [Network Peer Protocol](https://developers.eos.io/welcome/latest/protocol/network_peer_protocol#421-handshake-message) documentation section.
\ No newline at end of file
diff --git a/libraries/appbase b/libraries/appbase
index 72e93b39672..c51544732e3 160000
--- a/libraries/appbase
+++ b/libraries/appbase
@@ -1 +1 @@
-Subproject commit 72e93b396726916a596482897ab13f99a8197379
+Subproject commit c51544732e305207a4ba064cb3dd3cfd61e8af30
diff --git a/libraries/chain/CMakeLists.txt b/libraries/chain/CMakeLists.txt
index a2bc309a8fb..e5f9c9ef988 100644
--- a/libraries/chain/CMakeLists.txt
+++ b/libraries/chain/CMakeLists.txt
@@ -11,6 +11,7 @@ else()
try_run(POSIX_TIMER_TEST_RUN_RESULT POSIX_TIMER_TEST_COMPILE_RESULT ${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/platform_timer_posix_test.c)
if(POSIX_TIMER_TEST_RUN_RESULT EQUAL 0)
set(PLATFORM_TIMER_IMPL platform_timer_posix.cpp)
+ set(CHAIN_RT_LINKAGE rt)
else()
set(PLATFORM_TIMER_IMPL platform_timer_asio_fallback.cpp)
endif()
@@ -105,7 +106,7 @@ add_library( eosio_chain
)
target_link_libraries( eosio_chain fc chainbase Logging IR WAST WASM Runtime
- softfloat builtins wabt ${CHAIN_EOSVM_LIBRARIES} ${LLVM_LIBS}
+ softfloat builtins wabt ${CHAIN_EOSVM_LIBRARIES} ${LLVM_LIBS} ${CHAIN_RT_LINKAGE}
)
target_include_directories( eosio_chain
PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" "${CMAKE_CURRENT_BINARY_DIR}/include"
diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp
index 0d40e0425b2..b69b57cbf55 100644
--- a/libraries/chain/controller.cpp
+++ b/libraries/chain/controller.cpp
@@ -1203,6 +1203,10 @@ struct controller_impl {
transaction_trace_ptr push_scheduled_transaction( const generated_transaction_object& gto, fc::time_point deadline, uint32_t billed_cpu_time_us, bool explicit_billed_cpu_time = false )
{ try {
+
+ const bool validating = !self.is_producing_block();
+ EOS_ASSERT( !validating || explicit_billed_cpu_time, transaction_exception, "validating requires explicit billing" );
+
maybe_session undo_session;
if ( !self.skip_db_sessions() )
undo_session = maybe_session(db);
@@ -1309,7 +1313,7 @@ struct controller_impl {
// Only subjective OR soft OR hard failure logic below:
- if( gtrx.sender != account_name() && !(explicit_billed_cpu_time ? failure_is_subjective(*trace->except) : scheduled_failure_is_subjective(*trace->except))) {
+ if( gtrx.sender != account_name() && !(validating ? failure_is_subjective(*trace->except) : scheduled_failure_is_subjective(*trace->except))) {
// Attempt error handling for the generated transaction.
auto error_trace = apply_onerror( gtrx, deadline, trx_context.pseudo_start,
@@ -1331,7 +1335,7 @@ struct controller_impl {
// subjectivity changes based on producing vs validating
bool subjective = false;
- if (explicit_billed_cpu_time) {
+ if (validating) {
subjective = failure_is_subjective(*trace->except);
} else {
subjective = scheduled_failure_is_subjective(*trace->except);
@@ -1340,15 +1344,18 @@ struct controller_impl {
if ( !subjective ) {
// hard failure logic
- if( !explicit_billed_cpu_time ) {
+ if( !validating ) {
auto& rl = self.get_mutable_resource_limits_manager();
rl.update_account_usage( trx_context.bill_to_accounts, block_timestamp_type(self.pending_block_time()).slot );
int64_t account_cpu_limit = 0;
std::tie( std::ignore, account_cpu_limit, std::ignore, std::ignore ) = trx_context.max_bandwidth_billed_accounts_can_pay( true );
- cpu_time_to_bill_us = static_cast( std::min( std::min( static_cast(cpu_time_to_bill_us),
- account_cpu_limit ),
- trx_context.initial_objective_duration_limit.count() ) );
+ uint32_t limited_cpu_time_to_bill_us = static_cast( std::min(
+ std::min( static_cast(cpu_time_to_bill_us), account_cpu_limit ),
+ trx_context.initial_objective_duration_limit.count() ) );
+ EOS_ASSERT( !explicit_billed_cpu_time || (cpu_time_to_bill_us == limited_cpu_time_to_bill_us),
+ transaction_exception, "cpu to bill ${cpu} != limited ${limit}", ("cpu", cpu_time_to_bill_us)("limit", limited_cpu_time_to_bill_us) );
+ cpu_time_to_bill_us = limited_cpu_time_to_bill_us;
}
resource_limits.add_transaction_usage( trx_context.bill_to_accounts, cpu_time_to_bill_us, 0,
@@ -1395,7 +1402,7 @@ struct controller_impl {
transaction_trace_ptr push_transaction( const transaction_metadata_ptr& trx,
fc::time_point deadline,
uint32_t billed_cpu_time_us,
- bool explicit_billed_cpu_time = false )
+ bool explicit_billed_cpu_time )
{
EOS_ASSERT(deadline != fc::time_point(), transaction_exception, "deadline cannot be uninitialized");
@@ -1458,6 +1465,7 @@ struct controller_impl {
? transaction_receipt::executed
: transaction_receipt::delayed;
trace->receipt = push_receipt(*trx->packed_trx(), s, trx_context.billed_cpu_time_us, trace->net_usage);
+ trx->billed_cpu_time_us = trx_context.billed_cpu_time_us;
pending->_block_stage.get()._pending_trx_metas.emplace_back(trx);
} else {
transaction_receipt_header r;
@@ -2000,7 +2008,7 @@ struct controller_impl {
fork_db.add( bsp );
- if (conf.trusted_producers.count(b->producer)) {
+ if (self.is_trusted_producer(b->producer)) {
trusted_producer_light_validation = true;
};
@@ -2670,22 +2678,20 @@ void controller::push_block( std::future& block_state_future,
my->push_block( block_state_future, forked_branch_cb, trx_lookup );
}
-bool controller::in_immutable_mode()const{
- return (db_mode_is_immutable(get_read_mode()));
-}
-
-transaction_trace_ptr controller::push_transaction( const transaction_metadata_ptr& trx, fc::time_point deadline, uint32_t billed_cpu_time_us ) {
+transaction_trace_ptr controller::push_transaction( const transaction_metadata_ptr& trx, fc::time_point deadline,
+ uint32_t billed_cpu_time_us, bool explicit_billed_cpu_time ) {
validate_db_available_size();
- EOS_ASSERT( !in_immutable_mode(), transaction_type_exception, "push transaction not allowed in read-only mode" );
+ EOS_ASSERT( get_read_mode() != db_read_mode::IRREVERSIBLE, transaction_type_exception, "push transaction not allowed in irreversible mode" );
EOS_ASSERT( trx && !trx->implicit && !trx->scheduled, transaction_type_exception, "Implicit/Scheduled transaction not allowed" );
- return my->push_transaction(trx, deadline, billed_cpu_time_us, billed_cpu_time_us > 0 );
+ return my->push_transaction(trx, deadline, billed_cpu_time_us, explicit_billed_cpu_time );
}
-transaction_trace_ptr controller::push_scheduled_transaction( const transaction_id_type& trxid, fc::time_point deadline, uint32_t billed_cpu_time_us )
+transaction_trace_ptr controller::push_scheduled_transaction( const transaction_id_type& trxid, fc::time_point deadline,
+ uint32_t billed_cpu_time_us, bool explicit_billed_cpu_time )
{
- EOS_ASSERT( !in_immutable_mode(), transaction_type_exception, "push scheduled transaction not allowed in read-only mode" );
+ EOS_ASSERT( get_read_mode() != db_read_mode::IRREVERSIBLE, transaction_type_exception, "push scheduled transaction not allowed in irreversible mode" );
validate_db_available_size();
- return my->push_scheduled_transaction( trxid, deadline, billed_cpu_time_us, billed_cpu_time_us > 0 );
+ return my->push_scheduled_transaction( trxid, deadline, billed_cpu_time_us, explicit_billed_cpu_time );
}
const flat_set& controller::get_actor_whitelist() const {
@@ -2828,6 +2834,11 @@ block_id_type controller::last_irreversible_block_id() const {
return get_block_id_for_num( lib_num );
}
+time_point controller::last_irreversible_block_time() const {
+ return my->fork_db.root()->header.timestamp.to_time_point();
+}
+
+
const dynamic_global_property_object& controller::get_dynamic_global_properties()const {
return my->db.get();
}
@@ -3045,6 +3056,10 @@ bool controller::skip_trx_checks() const {
return light_validation_allowed(my->conf.disable_replay_opts);
}
+bool controller::is_trusted_producer( const account_name& producer) const {
+ return get_validation_mode() == chain::validation_mode::LIGHT || my->conf.trusted_producers.count(producer);
+}
+
bool controller::contracts_console()const {
return my->conf.contracts_console;
}
diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp
index c262f5446be..805bef4c412 100644
--- a/libraries/chain/include/eosio/chain/controller.hpp
+++ b/libraries/chain/include/eosio/chain/controller.hpp
@@ -50,8 +50,6 @@ namespace eosio { namespace chain {
IRREVERSIBLE
};
- inline bool db_mode_is_immutable(db_read_mode m) {return db_read_mode::READ_ONLY == m || db_read_mode::IRREVERSIBLE ==m;}
-
enum class validation_mode {
FULL,
LIGHT
@@ -144,13 +142,15 @@ namespace eosio { namespace chain {
/**
*
*/
- transaction_trace_ptr push_transaction( const transaction_metadata_ptr& trx, fc::time_point deadline, uint32_t billed_cpu_time_us = 0 );
+ transaction_trace_ptr push_transaction( const transaction_metadata_ptr& trx, fc::time_point deadline,
+ uint32_t billed_cpu_time_us, bool explicit_billed_cpu_time );
/**
* Attempt to execute a specific transaction in our deferred trx database
*
*/
- transaction_trace_ptr push_scheduled_transaction( const transaction_id_type& scheduled, fc::time_point deadline, uint32_t billed_cpu_time_us = 0 );
+ transaction_trace_ptr push_scheduled_transaction( const transaction_id_type& scheduled, fc::time_point deadline,
+ uint32_t billed_cpu_time_us, bool explicit_billed_cpu_time );
block_state_ptr finalize_block( const signer_callback_type& signer_callback );
void sign_block( const signer_callback_type& signer_callback );
@@ -226,6 +226,7 @@ namespace eosio { namespace chain {
uint32_t last_irreversible_block_num() const;
block_id_type last_irreversible_block_id() const;
+ time_point last_irreversible_block_time() const;
signed_block_ptr fetch_block_by_number( uint32_t block_num )const;
signed_block_ptr fetch_block_by_id( block_id_type id )const;
@@ -275,6 +276,7 @@ namespace eosio { namespace chain {
bool skip_db_sessions( )const;
bool skip_db_sessions( block_status bs )const;
bool skip_trx_checks()const;
+ bool is_trusted_producer( const account_name& producer) const;
bool contracts_console()const;
@@ -282,7 +284,6 @@ namespace eosio { namespace chain {
db_read_mode get_read_mode()const;
validation_mode get_validation_mode()const;
- bool in_immutable_mode()const;
void set_subjective_cpu_leeway(fc::microseconds leeway);
fc::optional get_subjective_cpu_leeway() const;
diff --git a/libraries/chain/include/eosio/chain/transaction_context.hpp b/libraries/chain/include/eosio/chain/transaction_context.hpp
index 01cdb7d732f..a10c7ba9e9a 100644
--- a/libraries/chain/include/eosio/chain/transaction_context.hpp
+++ b/libraries/chain/include/eosio/chain/transaction_context.hpp
@@ -96,7 +96,8 @@ namespace eosio { namespace chain {
void schedule_transaction();
void record_transaction( const transaction_id_type& id, fc::time_point_sec expire );
- void validate_cpu_usage_to_bill( int64_t u, bool check_minimum = true )const;
+ void validate_cpu_usage_to_bill( int64_t billed_us, int64_t account_cpu_limit, bool check_minimum )const;
+ void validate_account_cpu_usage( int64_t billed_us, int64_t account_cpu_limit, bool estimate )const;
void disallow_transaction_extensions( const char* error_msg )const;
diff --git a/libraries/chain/include/eosio/chain/transaction_metadata.hpp b/libraries/chain/include/eosio/chain/transaction_metadata.hpp
index 945dd9af5e7..bb48c71e9b4 100644
--- a/libraries/chain/include/eosio/chain/transaction_metadata.hpp
+++ b/libraries/chain/include/eosio/chain/transaction_metadata.hpp
@@ -34,7 +34,8 @@ class transaction_metadata {
public:
const bool implicit;
const bool scheduled;
- bool accepted = false; // not thread safe
+ bool accepted = false; // not thread safe
+ uint32_t billed_cpu_time_us = 0; // not thread safe
private:
struct private_type{};
diff --git a/libraries/chain/include/eosio/chain/wasm_interface_private.hpp b/libraries/chain/include/eosio/chain/wasm_interface_private.hpp
index f891602bed4..793a8a28a86 100644
--- a/libraries/chain/include/eosio/chain/wasm_interface_private.hpp
+++ b/libraries/chain/include/eosio/chain/wasm_interface_private.hpp
@@ -8,7 +8,6 @@
#else
#define _REGISTER_EOSVMOC_INTRINSIC(CLS, MOD, METHOD, WASM_SIG, NAME, SIG)
#endif
-#include
#include
#include
#include
@@ -23,7 +22,10 @@
#include "IR/Validate.h"
#if defined(EOSIO_EOS_VM_RUNTIME_ENABLED) || defined(EOSIO_EOS_VM_JIT_RUNTIME_ENABLED)
+#include
#include
+#else
+#define _REGISTER_EOS_VM_INTRINSIC(CLS, MOD, METHOD, WASM_SIG, NAME, SIG)
#endif
using namespace fc;
diff --git a/libraries/chain/resource_limits.cpp b/libraries/chain/resource_limits.cpp
index 10edf69117d..e9ec714fabf 100644
--- a/libraries/chain/resource_limits.cpp
+++ b/libraries/chain/resource_limits.cpp
@@ -386,13 +386,17 @@ std::pair resource_limits_manager::get_account_cpu
account_resource_limit arl;
uint128_t window_size = config.account_cpu_usage_average_window;
- uint64_t greylisted_virtual_cpu_limit = config.cpu_limit_parameters.max * greylist_limit;
bool greylisted = false;
uint128_t virtual_cpu_capacity_in_window = window_size;
- if( greylisted_virtual_cpu_limit < state.virtual_cpu_limit ) {
- virtual_cpu_capacity_in_window *= greylisted_virtual_cpu_limit;
- greylisted = true;
+ if( greylist_limit < config::maximum_elastic_resource_multiplier ) {
+ uint64_t greylisted_virtual_cpu_limit = config.cpu_limit_parameters.max * greylist_limit;
+ if( greylisted_virtual_cpu_limit < state.virtual_cpu_limit ) {
+ virtual_cpu_capacity_in_window *= greylisted_virtual_cpu_limit;
+ greylisted = true;
+ } else {
+ virtual_cpu_capacity_in_window *= state.virtual_cpu_limit;
+ }
} else {
virtual_cpu_capacity_in_window *= state.virtual_cpu_limit;
}
@@ -433,13 +437,17 @@ std::pair resource_limits_manager::get_account_net
account_resource_limit arl;
uint128_t window_size = config.account_net_usage_average_window;
- uint64_t greylisted_virtual_net_limit = config.net_limit_parameters.max * greylist_limit;
bool greylisted = false;
uint128_t virtual_network_capacity_in_window = window_size;
- if( greylisted_virtual_net_limit < state.virtual_net_limit ) {
- virtual_network_capacity_in_window *= greylisted_virtual_net_limit;
- greylisted = true;
+ if( greylist_limit < config::maximum_elastic_resource_multiplier ) {
+ uint64_t greylisted_virtual_net_limit = config.net_limit_parameters.max * greylist_limit;
+ if( greylisted_virtual_net_limit < state.virtual_net_limit ) {
+ virtual_network_capacity_in_window *= greylisted_virtual_net_limit;
+ greylisted = true;
+ } else {
+ virtual_network_capacity_in_window *= state.virtual_net_limit;
+ }
} else {
virtual_network_capacity_in_window *= state.virtual_net_limit;
}
diff --git a/libraries/chain/transaction_context.cpp b/libraries/chain/transaction_context.cpp
index 49a67dff882..9f814e02d00 100644
--- a/libraries/chain/transaction_context.cpp
+++ b/libraries/chain/transaction_context.cpp
@@ -121,8 +121,8 @@ namespace eosio { namespace chain {
initial_objective_duration_limit = objective_duration_limit;
- if( billed_cpu_time_us > 0 ) // could also call on explicit_billed_cpu_time but it would be redundant
- validate_cpu_usage_to_bill( billed_cpu_time_us, false ); // Fail early if the amount to be billed is too high
+ if( explicit_billed_cpu_time )
+ validate_cpu_usage_to_bill( billed_cpu_time_us, std::numeric_limits::max(), false ); // Fail early if the amount to be billed is too high
// Record accounts to be billed for network and CPU usage
if( control.is_builtin_activated(builtin_protocol_feature_t::only_bill_first_authorizer) ) {
@@ -172,6 +172,11 @@ namespace eosio { namespace chain {
deadline_exception_code = billing_timer_exception_code;
}
+ if( !explicit_billed_cpu_time ) {
+ // if account no longer has enough cpu to exec trx, don't try
+ validate_account_cpu_usage( billed_cpu_time_us, account_cpu_limit, true );
+ }
+
eager_net_limit = (eager_net_limit/8)*8; // Round down to nearest multiple of word size (8 bytes) so check_net_usage can be efficient
if( initial_net_usage > 0 )
@@ -330,7 +335,7 @@ namespace eosio { namespace chain {
update_billed_cpu_time( now );
- validate_cpu_usage_to_bill( billed_cpu_time_us );
+ validate_cpu_usage_to_bill( billed_cpu_time_us, account_cpu_limit, true );
rl.add_transaction_usage( bill_to_accounts, static_cast(billed_cpu_time_us), net_usage,
block_timestamp_type(control.pending_block_time()).slot ); // Should never fail
@@ -420,7 +425,7 @@ namespace eosio { namespace chain {
transaction_timer.start(_deadline);
}
- void transaction_context::validate_cpu_usage_to_bill( int64_t billed_us, bool check_minimum )const {
+ void transaction_context::validate_cpu_usage_to_bill( int64_t billed_us, int64_t account_cpu_limit, bool check_minimum )const {
if (!control.skip_trx_checks()) {
if( check_minimum ) {
const auto& cfg = control.get_global_properties().configuration;
@@ -430,25 +435,35 @@ namespace eosio { namespace chain {
);
}
- if( billing_timer_exception_code == block_cpu_usage_exceeded::code_value ) {
+ validate_account_cpu_usage( billed_us, account_cpu_limit, false );
+ }
+ }
+
+ void transaction_context::validate_account_cpu_usage( int64_t billed_us, int64_t account_cpu_limit, bool estimate )const {
+ if( (billed_us > 0) && !control.skip_trx_checks() ) {
+ const bool cpu_limited_by_account = (account_cpu_limit <= objective_duration_limit.count());
+
+ if( !cpu_limited_by_account && (billing_timer_exception_code == block_cpu_usage_exceeded::code_value) ) {
EOS_ASSERT( billed_us <= objective_duration_limit.count(),
block_cpu_usage_exceeded,
- "billed CPU time (${billed} us) is greater than the billable CPU time left in the block (${billable} us)",
- ("billed", billed_us)("billable", objective_duration_limit.count())
- );
+ "${desc} CPU time (${billed} us) is greater than the billable CPU time left in the block (${billable} us)",
+ ("desc", (estimate ? "estimated" : "billed"))("billed", billed_us)( "billable", objective_duration_limit.count() )
+ );
} else {
- if (cpu_limit_due_to_greylist) {
- EOS_ASSERT( billed_us <= objective_duration_limit.count(),
+ if( cpu_limit_due_to_greylist && cpu_limited_by_account ) {
+ EOS_ASSERT( billed_us <= account_cpu_limit,
greylist_cpu_usage_exceeded,
- "billed CPU time (${billed} us) is greater than the maximum greylisted billable CPU time for the transaction (${billable} us)",
- ("billed", billed_us)("billable", objective_duration_limit.count())
+ "${desc} CPU time (${billed} us) is greater than the maximum greylisted billable CPU time for the transaction (${billable} us)",
+ ("desc", (estimate ? "estimated" : "billed"))("billed", billed_us)( "billable", account_cpu_limit )
);
} else {
- EOS_ASSERT( billed_us <= objective_duration_limit.count(),
+ // exceeds trx.max_cpu_usage_ms or cfg.max_transaction_cpu_usage if objective_duration_limit is greater
+ const int64_t cpu_limit = (cpu_limited_by_account ? account_cpu_limit : objective_duration_limit.count());
+ EOS_ASSERT( billed_us <= cpu_limit,
tx_cpu_usage_exceeded,
- "billed CPU time (${billed} us) is greater than the maximum billable CPU time for the transaction (${billable} us)",
- ("billed", billed_us)("billable", objective_duration_limit.count())
- );
+ "${desc} CPU time (${billed} us) is greater than the maximum billable CPU time for the transaction (${billable} us)",
+ ("desc", (estimate ? "estimated" : "billed"))("billed", billed_us)( "billable", cpu_limit )
+ );
}
}
}
@@ -504,6 +519,9 @@ namespace eosio { namespace chain {
}
}
+ EOS_ASSERT( (!force_elastic_limits && control.is_producing_block()) || (!greylisted_cpu && !greylisted_net),
+ transaction_exception, "greylisted when not producing block" );
+
return std::make_tuple(account_net_limit, account_cpu_limit, greylisted_net, greylisted_cpu);
}
@@ -643,7 +661,7 @@ namespace eosio { namespace chain {
EOS_ASSERT( actor != nullptr, transaction_exception,
"action's authorizing actor '${account}' does not exist", ("account", auth.actor) );
EOS_ASSERT( auth_manager.find_permission(auth) != nullptr, transaction_exception,
- "action's authorizations include a non-existent permission: {permission}",
+ "action's authorizations include a non-existent permission: ${permission}",
("permission", auth) );
if( enforce_actor_whitelist_blacklist )
actors.insert( auth.actor );
diff --git a/libraries/chain/webassembly/eos-vm-oc/code_cache.cpp b/libraries/chain/webassembly/eos-vm-oc/code_cache.cpp
index b12388eb3da..9d93b8a2111 100644
--- a/libraries/chain/webassembly/eos-vm-oc/code_cache.cpp
+++ b/libraries/chain/webassembly/eos-vm-oc/code_cache.cpp
@@ -262,8 +262,11 @@ code_cache_base::code_cache_base(const boost::filesystem::path data_dir, const e
for(unsigned i = 0; i < number_entries; ++i) {
code_descriptor cd;
fc::raw::unpack(ds, cd);
- if(cd.codegen_version != 0)
+ if(cd.codegen_version != 0) {
+ allocator->deallocate(code_mapping + cd.code_begin);
+ allocator->deallocate(code_mapping + cd.initdata_begin);
continue;
+ }
_cache_index.push_back(std::move(cd));
}
allocator->deallocate(code_mapping + cache_header.serialized_descriptor_index);
diff --git a/libraries/fc b/libraries/fc
index 5c3740a5efe..89905627463 160000
--- a/libraries/fc
+++ b/libraries/fc
@@ -1 +1 @@
-Subproject commit 5c3740a5efec1e1d592e0f8ce092872df46c92d2
+Subproject commit 89905627463081c15bfa708a04cc7e68edb25dd0
diff --git a/libraries/testing/include/eosio/testing/tester.hpp b/libraries/testing/include/eosio/testing/tester.hpp
index 6df94c8862b..3b779dc97af 100644
--- a/libraries/testing/include/eosio/testing/tester.hpp
+++ b/libraries/testing/include/eosio/testing/tester.hpp
@@ -175,6 +175,8 @@ namespace eosio { namespace testing {
virtual signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) ) = 0;
virtual signed_block_ptr produce_empty_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) ) = 0;
virtual signed_block_ptr finish_block() = 0;
+ // produce one block and return traces for all applied transactions, both failed and executed
+ signed_block_ptr produce_block( std::vector& traces );
void produce_blocks( uint32_t n = 1, bool empty = false );
void produce_blocks_until_end_of_round();
void produce_blocks_for_n_rounds(const uint32_t num_of_rounds = 1);
@@ -413,7 +415,10 @@ namespace eosio { namespace testing {
}
protected:
- signed_block_ptr _produce_block( fc::microseconds skip_time, bool skip_pending_trxs = false );
+ signed_block_ptr _produce_block( fc::microseconds skip_time, bool skip_pending_trxs );
+ signed_block_ptr _produce_block( fc::microseconds skip_time, bool skip_pending_trxs,
+ bool no_throw, std::vector& traces );
+
void _start_block(fc::time_point block_time);
signed_block_ptr _finish_block();
@@ -478,6 +483,8 @@ namespace eosio { namespace testing {
}
}
+ using base_tester::produce_block;
+
signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) )override {
return _produce_block(skip_time, false);
}
diff --git a/libraries/testing/tester.cpp b/libraries/testing/tester.cpp
index 074de397217..06204dbf62c 100644
--- a/libraries/testing/tester.cpp
+++ b/libraries/testing/tester.cpp
@@ -312,7 +312,13 @@ namespace eosio { namespace testing {
}
}
- signed_block_ptr base_tester::_produce_block( fc::microseconds skip_time, bool skip_pending_trxs) {
+ signed_block_ptr base_tester::_produce_block( fc::microseconds skip_time, bool skip_pending_trxs ) {
+ std::vector traces;
+ return _produce_block( skip_time, skip_pending_trxs, false, traces );
+ }
+
+ signed_block_ptr base_tester::_produce_block( fc::microseconds skip_time, bool skip_pending_trxs,
+ bool no_throw, std::vector& traces ) {
auto head = control->head_block_state();
auto head_time = control->head_block_time();
auto next_time = head_time + skip_time;
@@ -323,8 +329,9 @@ namespace eosio { namespace testing {
if( !skip_pending_trxs ) {
for( auto itr = unapplied_transactions.begin(); itr != unapplied_transactions.end(); ) {
- auto trace = control->push_transaction( itr->trx_meta, fc::time_point::maximum(), DEFAULT_BILLED_CPU_TIME_US );
- if(trace->except) {
+ auto trace = control->push_transaction( itr->trx_meta, fc::time_point::maximum(), DEFAULT_BILLED_CPU_TIME_US, true );
+ traces.emplace_back( trace );
+ if(!no_throw && trace->except) {
trace->except->dynamic_rethrow_exception();
}
itr = unapplied_transactions.erase( itr );
@@ -333,8 +340,9 @@ namespace eosio { namespace testing {
vector scheduled_trxs;
while ((scheduled_trxs = get_scheduled_transactions()).size() > 0 ) {
for( const auto& trx : scheduled_trxs ) {
- auto trace = control->push_scheduled_transaction( trx, fc::time_point::maximum(), DEFAULT_BILLED_CPU_TIME_US );
- if( trace->except ) {
+ auto trace = control->push_scheduled_transaction( trx, fc::time_point::maximum(), DEFAULT_BILLED_CPU_TIME_US, true );
+ traces.emplace_back( trace );
+ if( !no_throw && trace->except ) {
trace->except->dynamic_rethrow_exception();
}
}
@@ -411,6 +419,10 @@ namespace eosio { namespace testing {
return control->head_block_state()->block;
}
+ signed_block_ptr base_tester::produce_block( std::vector& traces ) {
+ return _produce_block( fc::milliseconds(config::block_interval_ms), false, true, traces );
+ }
+
void base_tester::produce_blocks( uint32_t n, bool empty ) {
if( empty ) {
for( uint32_t i = 0; i < n; ++i )
@@ -535,7 +547,7 @@ namespace eosio { namespace testing {
fc::microseconds::maximum() :
fc::microseconds( deadline - fc::time_point::now() );
auto fut = transaction_metadata::start_recover_keys( ptrx, control->get_thread_pool(), control->get_chain_id(), time_limit );
- auto r = control->push_transaction( fut.get(), deadline, billed_cpu_time_us );
+ auto r = control->push_transaction( fut.get(), deadline, billed_cpu_time_us, billed_cpu_time_us > 0 );
if( r->except_ptr ) std::rethrow_exception( r->except_ptr );
if( r->except ) throw *r->except;
return r;
@@ -560,7 +572,7 @@ namespace eosio { namespace testing {
fc::microseconds( deadline - fc::time_point::now() );
auto ptrx = std::make_shared( trx, c );
auto fut = transaction_metadata::start_recover_keys( ptrx, control->get_thread_pool(), control->get_chain_id(), time_limit );
- auto r = control->push_transaction( fut.get(), deadline, billed_cpu_time_us );
+ auto r = control->push_transaction( fut.get(), deadline, billed_cpu_time_us, billed_cpu_time_us > 0 );
if (no_throw) return r;
if( r->except_ptr ) std::rethrow_exception( r->except_ptr );
if( r->except) throw *r->except;
diff --git a/plugins/CMakeLists.txt b/plugins/CMakeLists.txt
index e07a10c5b8d..5f196cc8c4f 100644
--- a/plugins/CMakeLists.txt
+++ b/plugins/CMakeLists.txt
@@ -9,6 +9,7 @@ add_subdirectory(producer_api_plugin)
add_subdirectory(history_plugin)
add_subdirectory(history_api_plugin)
add_subdirectory(state_history_plugin)
+add_subdirectory(trace_api_plugin)
add_subdirectory(wallet_plugin)
add_subdirectory(wallet_api_plugin)
diff --git a/plugins/chain_api_plugin/chain.swagger.yaml b/plugins/chain_api_plugin/chain.swagger.yaml
new file mode 100644
index 00000000000..f52db0cd9ee
--- /dev/null
+++ b/plugins/chain_api_plugin/chain.swagger.yaml
@@ -0,0 +1,682 @@
+openapi: 3.0.0
+info:
+ title: Chain API
+ description: "OAS 3.0 Nodeos [chain_api_plugin](https://eosio.github.io/eos/latest/nodeos/plugins/chain_api_plugin/index) API Specification\r"
+ version: 1.0.0
+ license:
+ name: MIT
+ url: https://opensource.org/licenses/MIT
+ contact:
+ url: https://eos.io
+servers:
+ - url: "{protocol}://{host}:{port}/v1/chain"
+ variables:
+ protocol:
+ enum:
+ - http
+ - https
+ default: http
+ host:
+ default: localhost
+ port:
+ default: "8080"
+components:
+ schemas: {}
+paths:
+ /get_account:
+ post:
+ description: Returns an object containing various details about a specific account on the blockchain.
+ operationId: get_account
+ requestBody:
+ description: JSON Object with single member "account_name"
+ content:
+ application/json:
+ schema:
+ type: object
+ required:
+ - account_name
+ properties:
+ account_name:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml"
+ responses:
+ "200":
+ description: OK
+ content:
+ application/json:
+ schema:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/Account.yaml"
+ /get_block:
+ post:
+ description: Returns an object containing various details about a specific block on the blockchain.
+ operationId: get_block
+ requestBody:
+ content:
+ application/json:
+ schema:
+ type: object
+ required:
+ - block_num_or_id
+ properties:
+ block_num_or_id:
+ type: string
+ description: Provide a `block number` or a `block id`
+ responses:
+ "200":
+ description: OK
+ content:
+ application/json:
+ schema:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/Block.yaml"
+ /get_info:
+ post:
+ description: Returns an object containing various details about the blockchain.
+ operationId: get_info
+ security: []
+ responses:
+ "200":
+ description: OK
+ content:
+ application/json:
+ schema:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/Info.yaml"
+
+ /push_transaction:
+ post:
+ description: This method expects a transaction in JSON format and will attempt to apply it to the blockchain.
+ operationId: push_transaction
+ requestBody:
+ content:
+ application/json:
+ schema:
+ type: object
+ properties:
+ signatures:
+ type: array
+ description: array of signatures required to authorize transaction
+ items:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/Signature.yaml"
+ compression:
+ type: boolean
+ description: Compression used, usually false
+ packed_context_free_data:
+ type: string
+ description: json to hex
+ packed_trx:
+ type: string
+ description: Transaction object json to hex
+ responses:
+ "200":
+ description: OK
+ content:
+ application/json:
+ schema:
+ description: Returns Nothing
+
+ /send_transaction:
+ post:
+ description: This method expects a transaction in JSON format and will attempt to apply it to the blockchain.
+ operationId: send_transaction
+ requestBody:
+ content:
+ application/json:
+ schema:
+ type: object
+ properties:
+ signatures:
+ type: array
+ description: array of signatures required to authorize transaction
+ items:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/Signature.yaml"
+ compression:
+ type: boolean
+ description: Compression used, usually false
+ packed_context_free_data:
+ type: string
+ description: json to hex
+ packed_trx:
+ type: string
+ description: Transaction object json to hex
+
+ responses:
+ "200":
+ description: OK
+ content:
+ application/json:
+ schema:
+ description: Returns Nothing
+
+ /push_transactions:
+ post:
+ description: This method expects a transaction in JSON format and will attempt to apply it to the blockchain.
+ operationId: push_transactions
+ requestBody:
+ content:
+ application/json:
+ schema:
+ type: array
+ items:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/Transaction.yaml"
+ responses:
+ "200":
+ description: OK
+ content:
+ application/json:
+ schema:
+ description: Returns Nothing
+
+ /get_block_header_state:
+ post:
+ description: Retrieves the glock header state
+ operationId: get_block_header_state
+ requestBody:
+ content:
+ application/json:
+ schema:
+ type: object
+ required:
+ - block_num_or_id
+ properties:
+ block_num_or_id:
+ type: string
+ description: Provide a block_number or a block_id
+
+ responses:
+ "200":
+ description: OK
+ content:
+ application/json:
+ schema:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/BlockHeaderState.yaml"
+
+ /get_abi:
+ post:
+ description: Retrieves the ABI for a contract based on its account name
+ operationId: get_abi
+ requestBody:
+ content:
+ application/json:
+ schema:
+ type: object
+ required:
+ - account_name
+ properties:
+ account_name:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml"
+ responses:
+ "200":
+ description: OK
+ content:
+ application/json:
+ schema:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/Abi.yaml"
+ /get_currency_balance:
+ post:
+ description: Retrieves the current balance
+ operationId: get_currency_balance
+ requestBody:
+ content:
+ application/json:
+ schema:
+ type: object
+ required:
+ - code
+ - account
+ - symbol
+ properties:
+ code:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml"
+ account:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml"
+ symbol:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/Symbol.yaml"
+
+ responses:
+ "200":
+ description: OK
+ content:
+ application/json:
+ schema:
+ type: array
+ items:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/Symbol.yaml"
+
+ /get_currency_stats:
+ post:
+ description: Retrieves currency stats
+ operationId: get_currency_stats
+ requestBody:
+ content:
+ application/json:
+ schema:
+ type: object
+ properties:
+ code:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml"
+ symbol:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/Symbol.yaml"
+
+ responses:
+ "200":
+ description: OK
+ content:
+ application/json:
+ schema:
+ description: "Returns an object with one member labeled as the symbol you requested, the object has three members: supply (Symbol), max_supply (Symbol) and issuer (Name)"
+
+ /get_required_keys:
+ post:
+ description: Returns the required keys needed to sign a transaction.
+ operationId: get_required_keys
+ requestBody:
+ content:
+ application/json:
+ schema:
+ type: object
+ required:
+ - transaction
+ - available_keys
+ properties:
+ transaction:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/Transaction.yaml"
+ available_keys:
+ type: array
+ description: Provide the available keys
+ items:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/PublicKey.yaml"
+ responses:
+ "200":
+ description: OK
+ content:
+ application/json:
+ schema:
+ {}
+
+ /get_producers:
+ post:
+ description: Retrieves producers list
+ operationId: get_producers
+ requestBody:
+ content:
+ application/json:
+ schema:
+ title: "GetProducersRequest"
+ type: object
+ required:
+ - limit
+ - lower_bound
+ properties:
+ limit:
+ type: string
+ description: total number of producers to retrieve
+ lower_bound:
+ type: string
+ description: In conjunction with limit can be used to paginate through the results. For example, limit=10 and lower_bound=10 would be page 2
+ json:
+ type: boolean
+ description: return result in JSON format
+
+ responses:
+ "200":
+ description: OK
+ content:
+ application/json:
+ schema:
+ title: "GetProducersResponse"
+ type: object
+ additionalProperties: false
+ minProperties: 3
+ required:
+ - active
+ - pending
+ - proposed
+ properties:
+ active:
+ type: array
+ nullable: true
+ items:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/ProducerSchedule.yaml"
+ pending:
+ type: array
+ nullable: true
+ items:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/ProducerSchedule.yaml"
+ proposed:
+ type: array
+ nullable: true
+ items:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/ProducerSchedule.yaml"
+
+
+ /get_raw_code_and_abi:
+ post:
+ description: Retrieves raw code and ABI for a contract based on account name
+ operationId: get_raw_code_and_abi
+ requestBody:
+ content:
+ application/json:
+ schema:
+ type: object
+ required:
+ - account_name
+ properties:
+ account_name:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml"
+
+ responses:
+ "200":
+ description: OK
+ content:
+ application/json:
+ schema:
+ type: object
+ properties:
+ account_name:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml"
+ wasm:
+ type: string
+ description: base64 encoded wasm
+ abi:
+ type: string
+ description: base64 encoded ABI
+
+ /get_scheduled_transaction:
+ post:
+ description: Retrieves the scheduled transaction
+ operationId: get_scheduled_transaction
+ requestBody:
+ content:
+ application/json:
+ schema:
+ type: object
+ properties:
+ lower_bound:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/DateTimeSeconds.yaml"
+ limit:
+ description: The maximum number of transactions to return
+ type: integer
+ json:
+ description: true/false whether the packed transaction is converted to json
+ type: boolean
+ responses:
+ "200":
+ description: OK
+ content:
+ application/json:
+ schema:
+ type: object
+ properties:
+ transactions:
+ type: array
+ items:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/Transaction.yaml"
+
+
+ /get_table_by_scope:
+ post:
+ description: Retrieves table scope
+ operationId: get_table_by_scope
+ requestBody:
+ content:
+ application/json:
+ schema:
+ type: object
+ required:
+ - code
+ properties:
+ code:
+ type: string
+ description: "`name` of the contract to return table data for"
+ table:
+ type: string
+ description: Filter results by table
+ lower_bound:
+ type: string
+ description: Filters results to return the first element that is not less than provided value in set
+ upper_bound:
+ type: string
+ description: Filters results to return the first element that is greater than provided value in set
+ limit:
+ type: integer
+ description: Limit number of results returned.
+ format: int32
+ reverse:
+ type: boolean
+ description: Reverse the order of returned results
+ responses:
+ "200":
+ description: OK
+ content:
+ application/json:
+ schema:
+ type: object
+ properties:
+ rows:
+ type: array
+ items:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/TableScope.yaml"
+ more:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml"
+
+ /get_table_rows:
+ post:
+ description: Returns an object containing rows from the specified table.
+ operationId: get_table_rows
+ requestBody:
+ content:
+ application/json:
+ schema:
+ type: object
+ required:
+ - code
+ - table
+ - scope
+ properties:
+ code:
+ type: string
+ description: The name of the smart contract that controls the provided table
+ table:
+ type: string
+ description: The name of the table to query
+ scope:
+ type: string
+ description: The account to which this data belongs
+ index_position:
+ type: string
+ description: Position of the index used, accepted parameters `primary`, `secondary`, `tertiary`, `fourth`, `fifth`, `sixth`, `seventh`, `eighth`, `ninth` , `tenth`
+ key_type:
+ type: string
+ description: Type of key specified by index_position (for example - `uint64_t` or `name`)
+ encode_type:
+ type: string
+ upper_bound:
+ type: string
+ lower_bound:
+ type: string
+
+ responses:
+ "200":
+ description: OK
+ content:
+ application/json:
+ schema:
+ type: object
+ properties:
+ rows:
+ type: array
+ items: {}
+
+ /abi_json_to_bin:
+ post:
+ description: Returns an object containing rows from the specified table.
+ operationId: abi_json_to_bin
+ requestBody:
+ content:
+ application/json:
+ schema:
+ type: object
+ title: AbiJsonToBinRequest
+ properties:
+ binargs:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/Hex.yaml"
+
+ responses:
+ "200":
+ description: OK
+ content:
+ application/json:
+ schema:
+ type: object
+ properties:
+ binargs:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/Hex.yaml"
+
+ /abi_bin_to_json:
+ post:
+ description: Returns an object containing rows from the specified table.
+ operationId: abi_bin_to_json
+ requestBody:
+ content:
+ application/json:
+ schema:
+ type: object
+ title: AbiBinToJsonRequest
+ properties:
+ code:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml"
+ action:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml"
+ binargs:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/Hex.yaml"
+ responses:
+ "200":
+ description: OK
+ content:
+ application/json:
+ schema:
+ type: string
+
+ /get_code:
+ post:
+ description: Returns an object containing rows from the specified table.
+ operationId: get_code
+ requestBody:
+ content:
+ application/json:
+ schema:
+ type: object
+ required:
+ - account_name
+ - code_as_wasm
+ properties:
+ account_name:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml"
+ code_as_wasm:
+ type: integer
+ default: 1
+ description: This must be 1 (true)
+ responses:
+ "200":
+ description: OK
+ content:
+ application/json:
+ schema:
+ type: object
+ title: GetCodeResponse.yaml
+ properties:
+ name:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml"
+ code_hash:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/Sha256.yaml"
+ wast:
+ type: string
+ wasm:
+ type: string
+ abi:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/Abi.yaml"
+
+ /get_raw_abi:
+ post:
+ description: Returns an object containing rows from the specified table.
+ operationId: get_raw_abi
+ requestBody:
+ content:
+ application/json:
+ schema:
+ type: object
+ required:
+ - account_name
+ properties:
+ account_name:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml"
+ responses:
+ "200":
+ description: OK
+ content:
+ application/json:
+ schema:
+ type: object
+ properties:
+ account_name:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml"
+ code_hash:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/Sha256.yaml"
+ abi_hash:
+ allOf:
+ - $ref: "https://eosio.github.io/schemata/v2.0/oas/Sha256.yaml"
+ abi:
+ type: string
+
+
+ /get_activated_protocol_features:
+ post:
+ description: Retreives the activated protocol features for producer node
+ operationId: get_activated_protocol_features
+ requestBody:
+ content:
+ application/json:
+ schema:
+ type: object
+ required:
+ - params
+ properties:
+ params:
+ type: object
+ description: Defines the filters to retreive the protocol features by
+ required:
+ - search_by_block_num
+ - reverse
+ properties:
+ lower_bound:
+ type: integer
+ description: Lower bound
+ upper_bound:
+ type: integer
+ description: Upper bound
+ limit:
+ type: integer
+ description: The limit, default is 10
+ search_by_block_num:
+ type: boolean
+ description: Flag to indicate it is has to search by block number
+ reverse:
+ type: boolean
+ description: Flag to indicate it has to search in reverse
+ responses:
+ "200":
+ description: OK
+ content:
+ application/json:
+ schema:
+ type: object
+ description: Returns activated protocol features
+ required:
+ - activated_protocol_features
+ properties:
+ activated_protocol_features:
+ type: array
+ description: Variant type, an array of strings with the activated protocol features
+ items:
+ type: string
+ more:
+ type: integer
+ description: "In case there's more activated protocol features than the input parameter `limit` requested, returns the ordinal of the next activated protocol feature which was not returned, otherwise zero."
diff --git a/plugins/chain_api_plugin/chain_api_plugin.cpp b/plugins/chain_api_plugin/chain_api_plugin.cpp
index 334d560baae..7b2be3bf414 100644
--- a/plugins/chain_api_plugin/chain_api_plugin.cpp
+++ b/plugins/chain_api_plugin/chain_api_plugin.cpp
@@ -79,7 +79,8 @@ void chain_api_plugin::plugin_startup() {
ro_api.set_shorten_abi_errors( !_http_plugin.verbose_errors() );
_http_plugin.add_api({
- CHAIN_RO_CALL(get_info, 200l),
+ CHAIN_RO_CALL(get_info, 200)}, appbase::priority::medium);
+ _http_plugin.add_api({
CHAIN_RO_CALL(get_activated_protocol_features, 200),
CHAIN_RO_CALL(get_block, 200),
CHAIN_RO_CALL(get_block_header_state, 200),
diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp
index 4400c1d69d1..b36d7fbabca 100644
--- a/plugins/chain_plugin/chain_plugin.cpp
+++ b/plugins/chain_plugin/chain_plugin.cpp
@@ -43,7 +43,7 @@ std::ostream& operator<<(std::ostream& osm, eosio::chain::db_read_mode m) {
osm << "speculative";
} else if ( m == eosio::chain::db_read_mode::HEAD ) {
osm << "head";
- } else if ( m == eosio::chain::db_read_mode::READ_ONLY ) {
+ } else if ( m == eosio::chain::db_read_mode::READ_ONLY ) { // deprecated
osm << "read-only";
} else if ( m == eosio::chain::db_read_mode::IRREVERSIBLE ) {
osm << "irreversible";
@@ -140,6 +140,9 @@ class chain_plugin_impl {
bfs::path blocks_dir;
bool readonly = false;
flat_map loaded_checkpoints;
+ bool accept_transactions = false;
+ bool api_accept_transactions = true;
+
fc::optional fork_db;
fc::optional block_logger;
@@ -235,11 +238,12 @@ void chain_plugin::set_program_options(options_description& cli, options_descrip
"Deferred transactions sent by accounts in this list do not have any of the subjective whitelist/blacklist checks applied to them (may specify multiple times)")
("read-mode", boost::program_options::value()->default_value(eosio::chain::db_read_mode::SPECULATIVE),
"Database read mode (\"speculative\", \"head\", \"read-only\", \"irreversible\").\n"
- "In \"speculative\" mode database contains changes done up to the head block plus changes made by transactions not yet included to the blockchain.\n"
- "In \"head\" mode database contains changes done up to the current head block.\n"
- "In \"read-only\" mode database contains changes done up to the current head block and transactions cannot be pushed to the chain API.\n"
- "In \"irreversible\" mode database contains changes done up to the last irreversible block and transactions cannot be pushed to the chain API.\n"
+ "In \"speculative\" mode: database contains state changes by transactions in the blockchain up to the head block as well as some transactions not yet included in the blockchain.\n"
+ "In \"head\" mode: database contains state changes by only transactions in the blockchain up to the head block; transactions received by the node are relayed if valid.\n"
+ "In \"read-only\" mode: (DEPRECATED: see p2p-accept-transactions & api-accept-transactions) database contains state changes by only transactions in the blockchain up to the head block; transactions received via the P2P network are not relayed and transactions cannot be pushed via the chain API.\n"
+ "In \"irreversible\" mode: database contains state changes by only transactions in the blockchain up to the last irreversible block; transactions received via the P2P network are not relayed and transactions cannot be pushed via the chain API.\n"
)
+ ( "api-accept-transactions", bpo::value()->default_value(true), "Allow API transactions to be evaluated and relayed if valid.")
("validation-mode", boost::program_options::value()->default_value(eosio::chain::validation_mode::FULL),
"Chain validation mode (\"full\" or \"light\").\n"
"In \"full\" mode all incoming blocks will be fully validated.\n"
@@ -991,6 +995,21 @@ void chain_plugin::plugin_initialize(const variables_map& options) {
if ( options.count("read-mode") ) {
my->chain_config->read_mode = options.at("read-mode").as();
}
+ my->api_accept_transactions = options.at( "api-accept-transactions" ).as();
+
+ if( my->chain_config->read_mode == db_read_mode::IRREVERSIBLE || my->chain_config->read_mode == db_read_mode::READ_ONLY ) {
+ if( my->chain_config->read_mode == db_read_mode::READ_ONLY ) {
+ wlog( "read-mode = read-only is deprecated use p2p-accept-transactions = false, api-accept-transactions = false instead." );
+ }
+ if( my->api_accept_transactions ) {
+ my->api_accept_transactions = false;
+ std::stringstream ss; ss << my->chain_config->read_mode;
+ wlog( "api-accept-transactions set to false due to read-mode: ${m}", ("m", ss.str()) );
+ }
+ }
+ if( my->api_accept_transactions ) {
+ enable_accept_transactions();
+ }
if ( options.count("validation-mode") ) {
my->chain_config->block_validation_mode = options.at("validation-mode").as();
@@ -1077,6 +1096,8 @@ void chain_plugin::plugin_initialize(const variables_map& options) {
void chain_plugin::plugin_startup()
{ try {
+ EOS_ASSERT( my->chain_config->read_mode != db_read_mode::IRREVERSIBLE || !accept_transactions(), plugin_config_exception,
+ "read-mode = irreversible. transactions should not be enabled by enable_accept_transactions" );
try {
auto shutdown = [](){ return app().is_quiting(); };
if (my->snapshot_path) {
@@ -1123,14 +1144,16 @@ void chain_plugin::plugin_shutdown() {
my->chain.reset();
}
-chain_apis::read_write::read_write(controller& db, const fc::microseconds& abi_serializer_max_time)
+chain_apis::read_write::read_write(controller& db, const fc::microseconds& abi_serializer_max_time, bool api_accept_transactions)
: db(db)
, abi_serializer_max_time(abi_serializer_max_time)
+, api_accept_transactions(api_accept_transactions)
{
}
void chain_apis::read_write::validate() const {
- EOS_ASSERT( !db.in_immutable_mode(), missing_chain_api_plugin_exception, "Not allowed, node in read-only mode" );
+ EOS_ASSERT( api_accept_transactions, missing_chain_api_plugin_exception,
+ "Not allowed, node has api-accept-transactions = false" );
}
bool chain_plugin::accept_block(const signed_block_ptr& block, const block_id_type& id ) {
@@ -1366,6 +1389,19 @@ fc::microseconds chain_plugin::get_abi_serializer_max_time() const {
return my->abi_serializer_max_time_us;
}
+bool chain_plugin::api_accept_transactions() const{
+ return my->api_accept_transactions;
+}
+
+bool chain_plugin::accept_transactions() const {
+ return my->accept_transactions;
+}
+
+void chain_plugin::enable_accept_transactions() {
+ my->accept_transactions = true;
+}
+
+
void chain_plugin::log_guard_exception(const chain::guard_exception&e ) {
if (e.code() == chain::database_guard_exception::code_value) {
elog("Database has reached an unsafe level of usage, shutting down to avoid corrupting the database. "
diff --git a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp
index 2b608c4e4a8..1bc4c52dc86 100644
--- a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp
+++ b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp
@@ -592,8 +592,9 @@ class read_only {
class read_write {
controller& db;
const fc::microseconds abi_serializer_max_time;
+ const bool api_accept_transactions;
public:
- read_write(controller& db, const fc::microseconds& abi_serializer_max_time);
+ read_write(controller& db, const fc::microseconds& abi_serializer_max_time, bool api_accept_transactions);
void validate() const;
using push_block_params = chain::signed_block;
@@ -704,7 +705,7 @@ class chain_plugin : public plugin {
void plugin_shutdown();
chain_apis::read_only get_read_only_api() const { return chain_apis::read_only(chain(), get_abi_serializer_max_time()); }
- chain_apis::read_write get_read_write_api() { return chain_apis::read_write(chain(), get_abi_serializer_max_time()); }
+ chain_apis::read_write get_read_write_api() { return chain_apis::read_write(chain(), get_abi_serializer_max_time(), api_accept_transactions()); }
bool accept_block( const chain::signed_block_ptr& block, const chain::block_id_type& id );
void accept_transaction(const chain::packed_transaction_ptr& trx, chain::plugin_interface::next_function next);
@@ -733,6 +734,10 @@ class chain_plugin : public plugin {
chain::chain_id_type get_chain_id() const;
fc::microseconds get_abi_serializer_max_time() const;
+ bool api_accept_transactions() const;
+ // set true by other plugins if any plugin allows transactions
+ bool accept_transactions() const;
+ void enable_accept_transactions();
static void handle_guard_exception(const chain::guard_exception& e);
void do_hard_replay(const variables_map& options);
diff --git a/plugins/db_size_api_plugin/db_size.swagger.yaml b/plugins/db_size_api_plugin/db_size.swagger.yaml
new file mode 100644
index 00000000000..95e39183450
--- /dev/null
+++ b/plugins/db_size_api_plugin/db_size.swagger.yaml
@@ -0,0 +1,60 @@
+openapi: 3.0.0
+info:
+ title: DB Size API
+ version: 1.0.0
+ license:
+ name: MIT
+ url: https://opensource.org/licenses/MIT
+ contact:
+ url: https://eos.io
+servers:
+ - url: '{protocol}://{host}:{port}/v1/'
+ variables:
+ protocol:
+ enum:
+ - http
+ - https
+ default: http
+ host:
+ default: localhost
+ port:
+ default: "8080"
+components:
+ schemas: {}
+paths:
+ /db_size/get:
+ post:
+ summary: get
+ description: Retrieves database stats
+ operationId: get
+ parameters: []
+ requestBody:
+ content:
+ application/json:
+ schema:
+ type: object
+ properties: {}
+ responses:
+ '200':
+ description: OK
+ content:
+ application/json:
+ schema:
+ type: object
+ description: Defines the database stats
+ properties:
+ free_bytes:
+ type: integer
+ used_bytes:
+ type: integer
+ size:
+ type: integer
+ indices:
+ type: array
+ items:
+ type: object
+ properties:
+ index:
+ type: string
+ row_count:
+ type: integer
diff --git a/plugins/http_plugin/http_plugin.cpp b/plugins/http_plugin/http_plugin.cpp
index 887a2958c92..e4ef905479c 100644
--- a/plugins/http_plugin/http_plugin.cpp
+++ b/plugins/http_plugin/http_plugin.cpp
@@ -140,7 +140,8 @@ namespace eosio {
class http_plugin_impl {
public:
- map url_handlers;
+ // key -> priority, url_handler
+ map> url_handlers;
optional listen_endpoint;
string access_control_allow_origin;
string access_control_allow_headers;
@@ -327,7 +328,7 @@ namespace eosio {
if( handler_itr != url_handlers.end()) {
con->defer_http_response();
bytes_in_flight += body.size();
- app().post( appbase::priority::low,
+ app().post( handler_itr->second.first,
[&ioc = thread_pool->get_executor(), &bytes_in_flight = this->bytes_in_flight,
handler_itr, this, resource{std::move( resource )}, body{std::move( body )}, con]() mutable {
const size_t body_size = body.size();
@@ -337,7 +338,7 @@ namespace eosio {
return;
}
try {
- handler_itr->second( std::move( resource ), std::move( body ),
+ handler_itr->second.second( std::move( resource ), std::move( body ),
[&ioc, &bytes_in_flight, con, this]( int code, fc::variant response_body ) {
size_t response_size = 0;
try {
@@ -695,9 +696,9 @@ namespace eosio {
app().post( 0, [me = my](){} ); // keep my pointer alive until queue is drained
}
- void http_plugin::add_handler(const string& url, const url_handler& handler) {
+ void http_plugin::add_handler(const string& url, const url_handler& handler, int priority) {
fc_ilog( logger, "add api url: ${c}", ("c", url) );
- my->url_handlers.insert(std::make_pair(url,handler));
+ my->url_handlers.insert(std::make_pair(url,std::make_pair(priority, handler)));
}
void http_plugin::handle_exception( const char *api_name, const char *call_name, const string& body, url_response_callback cb ) {
diff --git a/plugins/http_plugin/include/eosio/http_plugin/http_plugin.hpp b/plugins/http_plugin/include/eosio/http_plugin/http_plugin.hpp
index 29c31474fec..515ae6c796a 100644
--- a/plugins/http_plugin/include/eosio/http_plugin/http_plugin.hpp
+++ b/plugins/http_plugin/include/eosio/http_plugin/http_plugin.hpp
@@ -77,10 +77,10 @@ namespace eosio {
void plugin_shutdown();
void handle_sighup() override;
- void add_handler(const string& url, const url_handler&);
- void add_api(const api_description& api) {
+ void add_handler(const string& url, const url_handler&, int priority = appbase::priority::medium_low);
+ void add_api(const api_description& api, int priority = appbase::priority::medium_low) {
for (const auto& call : api)
- add_handler(call.first, call.second);
+ add_handler(call.first, call.second, priority);
}
// standard exception handling for api handlers
diff --git a/plugins/net_api_plugin/net.swagger.yaml b/plugins/net_api_plugin/net.swagger.yaml
new file mode 100644
index 00000000000..4bba46ef39e
--- /dev/null
+++ b/plugins/net_api_plugin/net.swagger.yaml
@@ -0,0 +1,225 @@
+openapi: 3.0.0
+info:
+ title: Net API
+ version: 1.0.0
+ license:
+ name: MIT
+ url: https://opensource.org/licenses/MIT
+ contact:
+ url: https://eos.io
+servers:
+ - url: '{protocol}://{host}:{port}/v1/'
+ variables:
+ protocol:
+ enum:
+ - http
+ - https
+ default: http
+ host:
+ default: localhost
+ port:
+ default: "8080"
+components:
+ schemas: {}
+paths:
+ /net/connections:
+ post:
+ summary: connections
+ description: Returns an array of all peer connection statuses.
+ operationId: connections
+ parameters: []
+ requestBody:
+ content:
+ application/json:
+ schema:
+ type: object
+ properties: {}
+ responses:
+ '200':
+ description: OK
+ content:
+ application/json:
+ schema:
+ type: array
+ items:
+ type: object
+ properties:
+ peer:
+ description: The IP address or URL of the peer
+ type: string
+ connecting:
+ description: True if the peer is connecting, otherwise false
+ type: boolean
+ syncing:
+ description: True if the peer is syncing, otherwise false
+ type: boolean
+ last_handshake:
+ description: Structure holding detailed information about the connection
+ type: object
+ properties:
+ network_version:
+ description: Incremental value above a computed base
+ type: integer
+ chain_id:
+ $ref: 'https://eosio.github.io/schemata/v2.0/oas/Sha256.yaml'
+ node_id:
+ $ref: 'https://eosio.github.io/schemata/v2.0/oas/Sha256.yaml'
+ key:
+ $ref: 'https://eosio.github.io/schemata/v2.0/oas/PublicKey.yaml'
+ time:
+ $ref: 'https://eosio.github.io/schemata/v2.0/oas/DateTimeSeconds.yaml'
+ token:
+ $ref: 'https://eosio.github.io/schemata/v2.0/oas/Sha256.yaml'
+ sig:
+ $ref: 'https://eosio.github.io/schemata/v2.0/oas/Signature.yaml'
+ p2p_address:
+ description: IP address or URL of the peer
+ type: string
+ last_irreversible_block_num:
+ description: Last irreversible block number
+ type: integer
+ last_irreversible_block_id:
+ $ref: 'https://eosio.github.io/schemata/v2.0/oas/Sha256.yaml'
+ head_num:
+ description: Head number
+ type: integer
+ head_id:
+ $ref: 'https://eosio.github.io/schemata/v2.0/oas/Sha256.yaml'
+ os:
+ description: Operating system name
+ type: string
+ agent:
+ description: Agent name
+ type: string
+ generation:
+ description: Generation number
+ type: integer
+
+ /net/connect:
+ post:
+ summary: connect
+ description: Initiate a connection to a specified peer.
+ operationId: connect
+ parameters: []
+ requestBody:
+ content:
+ application/json:
+ schema:
+ type: object
+ required:
+ - endpoint
+ properties:
+ endpoint:
+ type: string
+ description: the endpoint to connect to expressed as either IP address or URL
+
+ responses:
+ '200':
+ description: OK
+ content:
+ application/json:
+ schema:
+ type: string
+ description: '"already connected" or "added connection"'
+ /net/disconnect:
+ post:
+ summary: disconnect
+ description: Initiate disconnection from a specified peer.
+ operationId: disconnect
+ parameters: []
+ requestBody:
+ content:
+ application/json:
+ schema:
+ type: object
+ required:
+ - endpoint
+ properties:
+ endpoint:
+ type: string
+ description: the endpoint to disconnect from, expressed as either IP address or URL
+
+ responses:
+ '200':
+ description: OK
+ content:
+ application/json:
+ schema:
+ type: string
+ description: '"connection removed" or "no known connection for host"'
+ /net/status:
+ post:
+ summary: status
+ description: Retrieves the connection status for a specified peer.
+ operationId: status
+ parameters: []
+ requestBody:
+ content:
+ application/json:
+ schema:
+ type: object
+ required:
+ - endpoint
+ properties:
+ endpoint:
+ type: string
+ description: the endpoint to get the status for, to expressed as either IP address or URL
+
+ responses:
+ '200':
+ description: OK
+ content:
+ application/json:
+ schema:
+ type: object
+ properties:
+ peer:
+ description: The IP address or URL of the peer
+ type: string
+ connecting:
+ description: True if the peer is connecting, otherwise false
+ type: boolean
+ syncing:
+ description: True if the peer is syncing, otherwise false
+ type: boolean
+ last_handshake:
+ description: Structure holding detailed information about the connection
+ type: object
+ properties:
+ network_version:
+ description: Incremental value above a computed base
+ type: integer
+ chain_id:
+ $ref: 'https://eosio.github.io/schemata/v2.0/oas/Sha256.yaml'
+ node_id:
+ $ref: 'https://eosio.github.io/schemata/v2.0/oas/Sha256.yaml'
+ key:
+ $ref: 'https://eosio.github.io/schemata/v2.0/oas/PublicKey.yaml'
+ time:
+ $ref: 'https://eosio.github.io/schemata/v2.0/oas/DateTimeSeconds.yaml'
+ token:
+ $ref: 'https://eosio.github.io/schemata/v2.0/oas/Sha256.yaml'
+ sig:
+ $ref: 'https://eosio.github.io/schemata/v2.0/oas/Signature.yaml'
+ p2p_address:
+ description: IP address or URL of the peer
+ type: string
+ last_irreversible_block_num:
+ description: Last irreversible block number
+ type: integer
+ last_irreversible_block_id:
+ $ref: 'https://eosio.github.io/schemata/v2.0/oas/Sha256.yaml'
+ head_num:
+ description: Head number
+ type: integer
+ head_id:
+ $ref: 'https://eosio.github.io/schemata/v2.0/oas/Sha256.yaml'
+ os:
+ description: Operating system name
+ type: string
+ agent:
+ description: Agent name
+ type: string
+ generation:
+ description: Generation number
+ type: integer
diff --git a/plugins/net_api_plugin/net_api_plugin.cpp b/plugins/net_api_plugin/net_api_plugin.cpp
index 17d6af921d5..948b79d4187 100644
--- a/plugins/net_api_plugin/net_api_plugin.cpp
+++ b/plugins/net_api_plugin/net_api_plugin.cpp
@@ -75,7 +75,7 @@ void net_api_plugin::plugin_startup() {
INVOKE_R_V(net_mgr, connections), 201),
// CALL(net, net_mgr, open,
// INVOKE_V_R(net_mgr, open, std::string), 200),
- });
+ }, appbase::priority::medium);
}
void net_api_plugin::plugin_initialize(const variables_map& options) {
diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp
index a2226f67824..19107c7ebf0 100644
--- a/plugins/net_plugin/net_plugin.cpp
+++ b/plugins/net_plugin/net_plugin.cpp
@@ -180,7 +180,7 @@ namespace eosio {
void bcast_transaction(const packed_transaction& trx);
void rejected_transaction(const packed_transaction_ptr& trx, uint32_t head_blk_num);
- void bcast_block(const block_state_ptr& bs);
+ void bcast_block( const signed_block_ptr& b, const block_id_type& id );
void bcast_notice( const block_id_type& id );
void rejected_block(const block_id_type& id);
@@ -237,6 +237,7 @@ namespace eosio {
int max_cleanup_time_ms = 0;
uint32_t max_client_count = 0;
uint32_t max_nodes_per_host = 1;
+ bool p2p_accept_transactions = true;
/// Peer clock may be no more than 1 second skewed from our clock, including network latency.
const std::chrono::system_clock::duration peer_authentication_interval{std::chrono::seconds{1}};
@@ -245,7 +246,6 @@ namespace eosio {
fc::sha256 node_id;
string user_agent_name;
- eosio::db_read_mode db_read_mode = eosio::db_read_mode::SPECULATIVE;
chain_plugin* chain_plug = nullptr;
producer_plugin* producer_plug = nullptr;
bool use_socket_read_watermark = false;
@@ -267,7 +267,6 @@ namespace eosio {
std::atomic in_shutdown{false};
compat::channels::transaction_ack::channel_type::handle incoming_transaction_ack_subscription;
- channels::irreversible_block::channel_type::handle incoming_irreversible_block_subscription;
uint16_t thread_pool_size = 2;
optional thread_pool;
@@ -289,6 +288,7 @@ namespace eosio {
void start_listen_loop();
void on_accepted_block( const block_state_ptr& bs );
+ void on_pre_accepted_block( const signed_block_ptr& bs );
void transaction_ack(const std::pair&);
void on_irreversible_block( const block_state_ptr& blk );
@@ -1919,8 +1919,8 @@ namespace eosio {
}
// thread safe
- void dispatch_manager::bcast_block(const block_state_ptr& bs) {
- fc_dlog( logger, "bcast block ${b}", ("b", bs->block_num) );
+ void dispatch_manager::bcast_block(const signed_block_ptr& b, const block_id_type& id) {
+ fc_dlog( logger, "bcast block ${b}", ("b", b->block_num()) );
if( my_impl->sync_master->syncing_with_peer() ) return;
@@ -1937,19 +1937,18 @@ namespace eosio {
} );
if( !have_connection ) return;
- std::shared_ptr> send_buffer = create_send_buffer( bs->block );
+ std::shared_ptr> send_buffer = create_send_buffer( b );
- for_each_block_connection( [this, bs, send_buffer]( auto& cp ) {
+ for_each_block_connection( [this, &id, bnum = b->block_num(), &send_buffer]( auto& cp ) {
if( !cp->current() ) {
return true;
}
- cp->strand.post( [this, cp, bs, send_buffer]() {
- uint32_t bnum = bs->block_num;
+ cp->strand.post( [this, cp, id, bnum, send_buffer]() {
std::unique_lock g_conn( cp->conn_mtx );
bool has_block = cp->last_handshake_recv.last_irreversible_block_num >= bnum;
g_conn.unlock();
if( !has_block ) {
- if( !add_peer_block( bs->id, cp->connection_id ) ) {
+ if( !add_peer_block( id, cp->connection_id ) ) {
fc_dlog( logger, "not bcast block ${b} to ${p}", ("b", bnum)("p", cp->peer_name()) );
return;
}
@@ -2433,6 +2432,12 @@ namespace eosio {
handle_message( blk_id, std::move( ptr ) );
} else if( which == packed_transaction_which ) {
+ if( !my_impl->p2p_accept_transactions ) {
+ fc_dlog( logger, "p2p-accept-transaction=false - dropping txn" );
+ pending_message_buffer.advance_read_ptr( message_length );
+ return true;
+ }
+
auto ds = pending_message_buffer.create_datastream();
fc::raw::unpack( ds, which ); // throw away
shared_ptr ptr = std::make_shared();
@@ -2606,7 +2611,7 @@ namespace eosio {
uint32_t peer_lib = msg.last_irreversible_block_num;
connection_wptr weak = shared_from_this();
- app().post( priority::low, [peer_lib, chain_plug = my_impl->chain_plug, weak{std::move(weak)},
+ app().post( priority::medium, [peer_lib, chain_plug = my_impl->chain_plug, weak{std::move(weak)},
msg_lib_id = msg.last_irreversible_block_id]() {
connection_ptr c = weak.lock();
if( !c ) return;
@@ -2832,11 +2837,6 @@ namespace eosio {
}
void connection::handle_message( packed_transaction_ptr trx ) {
- if( db_mode_is_immutable(my_impl->db_read_mode) ) {
- fc_dlog( logger, "got a txn in read-only mode - dropping" );
- return;
- }
-
const auto& tid = trx->id();
peer_dlog( this, "received packed_transaction ${id}", ("id", tid) );
@@ -2882,7 +2882,8 @@ namespace eosio {
// called from connection strand
void connection::handle_message( const block_id_type& id, signed_block_ptr ptr ) {
peer_dlog( this, "received signed_block ${id}", ("id", ptr->block_num() ) );
- app().post(priority::high, [ptr{std::move(ptr)}, id, c = shared_from_this()]() mutable {
+ auto priority = my_impl->sync_master->syncing_with_peer() ? priority::medium : priority::high;
+ app().post(priority, [ptr{std::move(ptr)}, id, c = shared_from_this()]() mutable {
c->process_signed_block( id, std::move( ptr ) );
});
}
@@ -2923,21 +2924,17 @@ namespace eosio {
if( !accepted ) return;
reason = no_reason;
} catch( const unlinkable_block_exception &ex) {
- peer_elog(c, "bad signed_block ${n} ${id}...: ${m}", ("n", blk_num)("id", blk_id.str().substr(8,16))("m",ex.what()));
+ peer_elog(c, "unlinkable_block_exception #${n} ${id}...: ${m}", ("n", blk_num)("id", blk_id.str().substr(8,16))("m",ex.to_string()));
reason = unlinkable;
} catch( const block_validate_exception &ex) {
- peer_elog(c, "bad signed_block ${n} ${id}...: ${m}", ("n", blk_num)("id", blk_id.str().substr(8,16))("m",ex.what()));
- fc_elog( logger, "block_validate_exception accept block #${n} syncing from ${p}",("n",blk_num)("p",c->peer_name()) );
+ peer_elog(c, "block_validate_exception #${n} ${id}...: ${m}", ("n", blk_num)("id", blk_id.str().substr(8,16))("m",ex.to_string()));
reason = validation;
} catch( const assert_exception &ex) {
- peer_elog(c, "bad signed_block ${n} ${id}...: ${m}", ("n", blk_num)("id", blk_id.str().substr(8,16))("m",ex.what()));
- fc_elog( logger, "unable to accept block on assert exception ${n} from ${p}",("n",ex.to_string())("p",c->peer_name()));
+ peer_elog(c, "block assert_exception #${n} ${id}...: ${m}", ("n", blk_num)("id", blk_id.str().substr(8,16))("m",ex.to_string()));
} catch( const fc::exception &ex) {
- peer_elog(c, "bad signed_block ${n} ${id}...: ${m}", ("n", blk_num)("id", blk_id.str().substr(8,16))("m",ex.what()));
- fc_elog( logger, "accept_block threw a non-assert exception ${x} from ${p}",( "x",ex.to_string())("p",c->peer_name()));
+ peer_elog(c, "bad block exception #${n} ${id}...: ${m}", ("n", blk_num)("id", blk_id.str().substr(8,16))("m",ex.to_string()));
} catch( ...) {
- peer_elog(c, "bad signed_block ${n} ${id}...: unknown exception", ("n", blk_num)("id", blk_id.str().substr(8,16)));
- fc_elog( logger, "handle sync block caught something else from ${p}",("p",c->peer_name()));
+ peer_elog(c, "bad block #${n} ${id}...: unknown exception", ("n", blk_num)("id", blk_id.str().substr(8,16)));
}
if( reason == no_reason ) {
@@ -3088,14 +3085,28 @@ namespace eosio {
}
// called from application thread
- void net_plugin_impl::on_accepted_block(const block_state_ptr& block) {
+ void net_plugin_impl::on_accepted_block(const block_state_ptr& bs) {
update_chain_info();
- dispatcher->strand.post( [this, block]() {
- fc_dlog( logger, "signaled, blk num = ${num}, id = ${id}", ("num", block->block_num)("id", block->id) );
- dispatcher->bcast_block( block );
+ controller& cc = chain_plug->chain();
+ dispatcher->strand.post( [this, bs]() {
+ fc_dlog( logger, "signaled accepted_block, blk num = ${num}, id = ${id}", ("num", bs->block_num)("id", bs->id) );
+ dispatcher->bcast_block( bs->block, bs->id );
});
}
+ // called from application thread
+ void net_plugin_impl::on_pre_accepted_block(const signed_block_ptr& block) {
+ update_chain_info();
+ controller& cc = chain_plug->chain();
+ if( cc.is_trusted_producer(block->producer) ) {
+ dispatcher->strand.post( [this, block]() {
+ auto id = block->id();
+ fc_dlog( logger, "signaled pre_accepted_block, blk num = ${num}, id = ${id}", ("num", block->block_num())("id", id) );
+ dispatcher->bcast_block( block, id );
+ });
+ }
+ }
+
// called from application thread
void net_plugin_impl::on_irreversible_block( const block_state_ptr& block) {
fc_dlog( logger, "on_irreversible_block, blk num = ${num}, id = ${id}", ("num", block->block_num)("id", block->id) );
@@ -3258,6 +3269,7 @@ namespace eosio {
" p2p.trx.eos.io:9876:trx\n"
" p2p.blk.eos.io:9876:blk\n")
( "p2p-max-nodes-per-host", bpo::value()->default_value(def_max_nodes_per_host), "Maximum number of client nodes from any single IP address")
+ ( "p2p-accept-transactions", bpo::value()->default_value(true), "Allow transactions received over p2p network to be evaluated and relayed if valid.")
( "agent-name", bpo::value()->default_value("\"EOS Test Agent\""), "The name supplied to identify this node amongst the peers.")
( "allowed-connection", bpo::value>()->multitoken()->default_value({"any"}, "any"), "Can be 'any' or 'producers' or 'specified' or 'none'. If 'specified', peer-key must be specified at least once. If only 'producers', peer-key is not required. 'producers' and 'specified' may be combined.")
( "peer-key", bpo::value>()->composing()->multitoken(), "Optional public key of peer allowed to connect. May be used multiple times.")
@@ -3269,7 +3281,7 @@ namespace eosio {
( "net-threads", bpo::value()->default_value(my->thread_pool_size),
"Number of worker threads in net_plugin thread pool" )
( "sync-fetch-span", bpo::value()->default_value(def_sync_fetch_span), "number of blocks to retrieve in a chunk from any individual peer during synchronization")
- ( "use-socket-read-watermark", bpo::value()->default_value(false), "Enable expirimental socket read watermark optimization")
+ ( "use-socket-read-watermark", bpo::value()->default_value(false), "Enable experimental socket read watermark optimization")
( "peer-log-format", bpo::value()->default_value( "[\"${_name}\" ${_ip}:${_port}]" ),
"The string used to format peers when logging messages about them. Variables are escaped with ${}.\n"
"Available Variables:\n"
@@ -3301,6 +3313,7 @@ namespace eosio {
my->resp_expected_period = def_resp_expected_wait;
my->max_client_count = options.at( "max-clients" ).as();
my->max_nodes_per_host = options.at( "p2p-max-nodes-per-host" ).as();
+ my->p2p_accept_transactions = options.at( "p2p-accept-transactions" ).as();
my->use_socket_read_watermark = options.at( "use-socket-read-watermark" ).as();
@@ -3367,6 +3380,18 @@ namespace eosio {
EOS_ASSERT( my->chain_plug, chain::missing_chain_plugin_exception, "" );
my->chain_id = my->chain_plug->get_chain_id();
fc::rand_pseudo_bytes( my->node_id.data(), my->node_id.data_size());
+ const controller& cc = my->chain_plug->chain();
+
+ if( cc.get_read_mode() == db_read_mode::IRREVERSIBLE || cc.get_read_mode() == db_read_mode::READ_ONLY ) {
+ if( my->p2p_accept_transactions ) {
+ my->p2p_accept_transactions = false;
+ string m = cc.get_read_mode() == db_read_mode::IRREVERSIBLE ? "irreversible" : "read-only";
+ wlog( "p2p-accept-transactions set to false due to read-mode: ${m}", ("m", m) );
+ }
+ }
+ if( my->p2p_accept_transactions ) {
+ my->chain_plug->enable_accept_transactions();
+ }
} FC_LOG_AND_RETHROW()
}
@@ -3383,14 +3408,12 @@ namespace eosio {
my->dispatcher.reset( new dispatch_manager( my_impl->thread_pool->get_executor() ) );
- chain::controller&cc = my->chain_plug->chain();
- my->db_read_mode = cc.get_read_mode();
- if( cc.in_immutable_mode() && my->p2p_address.size() ) {
- fc_wlog( logger, "\n"
- "**********************************\n"
- "* Read Only Mode *\n"
- "* - Transactions not forwarded - *\n"
- "**********************************\n" );
+ if( !my->p2p_accept_transactions && my->p2p_address.size() ) {
+ fc_ilog( logger, "\n"
+ "***********************************\n"
+ "* p2p-accept-transactions = false *\n"
+ "* Transactions not forwarded *\n"
+ "***********************************\n" );
}
tcp::endpoint listen_endpoint;
@@ -3437,9 +3460,13 @@ namespace eosio {
my->start_listen_loop();
}
{
+ chain::controller& cc = my->chain_plug->chain();
cc.accepted_block.connect( [my = my]( const block_state_ptr& s ) {
my->on_accepted_block( s );
} );
+ cc.pre_accepted_block.connect( [my = my]( const signed_block_ptr& s ) {
+ my->on_pre_accepted_block( s );
+ } );
cc.irreversible_block.connect( [my = my]( const block_state_ptr& s ) {
my->on_irreversible_block( s );
} );
diff --git a/plugins/producer_api_plugin/producer.swagger.yaml b/plugins/producer_api_plugin/producer.swagger.yaml
new file mode 100644
index 00000000000..34a87875d25
--- /dev/null
+++ b/plugins/producer_api_plugin/producer.swagger.yaml
@@ -0,0 +1,497 @@
+openapi: 3.0.0
+info:
+ title: EOSIO API
+ version: 1.0.0
+ license:
+ name: MIT
+ url: https://opensource.org/licenses/MIT
+ contact:
+ url: https://eos.io
+tags:
+ - name: eosio
+servers:
+ - url: "{protocol}://{host}:{port}/v1/"
+ variables:
+ protocol:
+ enum:
+ - http
+ - https
+ default: http
+ host:
+ default: localhost
+ port:
+ default: "8080"
+components:
+ securitySchemes: {}
+ schemas: {}
+security:
+ - {}
+paths:
+ /producer/pause:
+ post:
+ summary: pause
+ description: Pause producer node
+ operationId: pause
+ parameters: []
+ requestBody:
+ content:
+ application/json:
+ schema:
+ type: object
+ properties: {}
+ responses:
+ "200":
+ description: OK
+ content:
+ application/json:
+ schema:
+ type: boolean
+ description: "returns status"
+ /producer/resume:
+ post:
+ summary: resume
+ description: Resume producer node
+ operationId: resume
+ parameters: []
+ requestBody:
+ content:
+ application/json:
+ schema:
+ type: object
+ description: Resumes activity for producer
+ responses:
+ "200":
+ description: OK
+ content:
+ application/json:
+ schema:
+ description: Returns Nothing
+ /producer/paused:
+ post:
+ summary: paused
+ description: Retreives paused status for producer node
+ operationId: paused
+ parameters: []
+ requestBody:
+ content:
+ application/json:
+ schema:
+ type: object
+ properties: {}
+ responses:
+ "200":
+ description: OK
+ content:
+ application/json:
+ schema:
+ type: boolean
+ description: True if producer is paused, false otherwise
+ /producer/get_runtime_options:
+ post:
+ summary: get_runtime_options
+ description: Retreives run time options for producer node
+ operationId: get_runtime_options
+ parameters: []
+ requestBody:
+ content:
+ application/json:
+ schema:
+ type: object
+ properties: {}
+ responses:
+ "200":
+ description: OK
+ content:
+ application/json:
+ schema:
+ type: object
+ description: Returns run time options set for the producer
+ properties:
+ max_transaction_time:
+ type: integer
+ description: Max transaction time
+ max_irreversible_block_age:
+ type: integer
+ description: Max irreversible block age
+ produce_time_offset_us:
+ type: integer
+ description: Time offset
+ last_block_time_offset_us:
+ type: integer
+ description: Last block time offset
+ max_scheduled_transaction_time_per_block_ms:
+ type: integer
+ description: Max scheduled transaction time per block in ms
+ subjective_cpu_leeway_us:
+ type: integer
+ description: Subjective CPU leeway
+ incoming_defer_ratio:
+ type: integer
+ description: Incoming defer ration
+ /producer/update_runtime_options:
+ post:
+ summary: update_runtime_options
+ description: Update run time options for producer node
+ operationId: update_runtime_options
+ parameters: []
+ requestBody:
+ content:
+ application/json:
+ schema:
+ type: object
+ required:
+ - options
+ properties:
+ options:
+ type: object
+ description: Defines the run time options to set for the producer
+ properties:
+ max_transaction_time:
+ type: integer
+ description: Max transaction time
+ max_irreversible_block_age:
+ type: integer
+ description: Max irreversible block age
+ produce_time_offset_us:
+ type: integer
+ description: Time offset
+ last_block_time_offset_us:
+ type: integer
+ description: Last block time offset
+ max_scheduled_transaction_time_per_block_ms:
+ type: integer
+ description: Max scheduled transaction time per block in ms
+ subjective_cpu_leeway_us:
+ type: integer
+ description: Subjective CPU leeway
+ incoming_defer_ratio:
+ type: integer
+ description: Incoming defer ration
+
+ responses:
+ "200":
+ description: OK
+ content:
+ application/json:
+ schema:
+ description: Returns Nothing
+ /producer/get_greylist:
+ post:
+ summary: get_greylist
+ description: Retreives the greylist for producer node
+ operationId: get_greylist
+ parameters: []
+ requestBody:
+ content:
+ application/json:
+ schema:
+ type: object
+ properties: {}
+ responses:
+ "200":
+ description: OK
+ content:
+ application/json:
+ schema:
+ type: array
+ description: List of account names stored in the greylist
+ items:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml"
+
+ /producer/add_greylist_accounts:
+ post:
+ summary: add_greylist_accounts
+ description: Adds accounts to grey list for producer node
+ operationId: add_greylist_accounts
+ parameters: []
+ requestBody:
+ content:
+ application/json:
+ schema:
+ type: object
+ required:
+ - params
+ properties:
+ params:
+ type: object
+ properties:
+ accounts:
+ type: array
+ description: List of account names to add
+ items:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml"
+
+ responses:
+ "200":
+ description: OK
+ content:
+ application/json:
+ schema:
+ description: Returns Nothing
+ /producer/remove_greylist_accounts:
+ post:
+ summary: remove_greylist_accounts
+ description: Removes accounts from greylist for producer node
+ operationId: remove_greylist_accounts
+ parameters: []
+ requestBody:
+ content:
+ application/json:
+ schema:
+ type: object
+ required:
+ - params
+ properties:
+ params:
+ type: object
+ properties:
+ accounts:
+ type: array
+ description: List of account names to remove
+ items:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml"
+
+ responses:
+ "200":
+ description: OK
+ content:
+ application/json:
+ schema:
+ type: array
+ description: List of account names stored in the greylist
+ items:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml"
+
+ /producer/get_whitelist_blacklist:
+ post:
+ summary: get_whitelist_blacklist
+ description: Retreives the white list and black list for producer node
+ operationId: get_whitelist_blacklist
+ parameters: []
+ requestBody:
+ content:
+ application/json:
+ schema:
+ type: object
+ properties: {}
+
+ responses:
+ "200":
+ description: OK
+ content:
+ application/json:
+ schema:
+ type: object
+ description: Defines the actor whitelist and blacklist, the contract whitelist and blacklist, the action blacklist and key blacklist
+ properties:
+ actor_whitelist:
+ type: array
+ items:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml"
+ actor_blacklist:
+ type: array
+ items:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml"
+ contract_whitelist:
+ type: array
+ items:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml"
+ contract_blacklist:
+ type: array
+ items:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml"
+ action_blacklist:
+ type: array
+ items:
+ type: array
+ description: Array of two string values, the account name as the first and action name as the second
+ items:
+ allOf:
+ - $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml"
+ - $ref: "https://eosio.github.io/schemata/v2.0/oas/CppSignature.yaml"
+ key_blacklist:
+ type: array
+ items:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/KeyType.yaml"
+
+ /producer/set_whitelist_blacklist:
+ post:
+ summary: set_whitelist_blacklist
+ description: Sets the white list and black list for producer node
+ operationId: set_whitelist_blacklist
+ parameters: []
+ requestBody:
+ content:
+ application/json:
+ schema:
+ type: object
+ required:
+ - params
+ properties:
+ params:
+ type: object
+ description: Defines the actor whitelist and blacklist, the contract whitelist and blacklist, the action blacklist and key blacklist
+ properties:
+ actor_whitelist:
+ type: array
+ items:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml"
+ actor_blacklist:
+ type: array
+ items:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml"
+ contract_whitelist:
+ type: array
+ items:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml"
+ contract_blacklist:
+ type: array
+ items:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml"
+ action_blacklist:
+ type: array
+ items:
+ type: array
+ description: Array of two string values, the account name as the first and action name as the second
+ items:
+ allOf:
+ - $ref: "https://eosio.github.io/schemata/v2.0/oas/Name.yaml"
+ - $ref: "https://eosio.github.io/schemata/v2.0/oas/CppSignature.yaml"
+ key_blacklist:
+ type: array
+ items:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/KeyType.yaml"
+
+ responses:
+ "200":
+ description: OK
+ content:
+ application/json:
+ schema:
+ description: Returns Nothing
+ /producer/create_snapshot:
+ post:
+ summary: create_snapshot
+ description: Creates a snapshot for producer node
+ operationId: create_snapshot
+ parameters: []
+ requestBody:
+ content:
+ application/json:
+ schema:
+ type: object
+ required:
+ - next
+ properties:
+ next:
+ type: object
+ description: Defines the snapshot to be created
+ properties:
+ snapshot_name:
+ type: string
+ description: The name of the snapshot
+ head_block_id:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/Sha256.yaml"
+
+
+ responses:
+ "200":
+ description: OK
+ content:
+ application/json:
+ schema:
+ description: Returns Nothing
+
+ /producer/get_integrity_hash:
+ post:
+ summary: get_integrity_hash
+ description: Retreives the integrity hash for producer node
+ operationId: get_integrity_hash
+ parameters: []
+ requestBody:
+ content:
+ application/json:
+ schema:
+ type: object
+ properties: {}
+
+ responses:
+ "200":
+ description: OK
+ content:
+ application/json:
+ schema:
+ type: object
+ description: Defines the integrity hash information details
+ properties:
+ integrity_hash:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/Sha256.yaml"
+ head_block_id:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/Sha256.yaml"
+
+ /producer/schedule_protocol_feature_activations:
+ post:
+ summary: schedule_protocol_feature_activations
+ description: Schedule protocol feature activation for producer node
+ operationId: schedule_protocol_feature_activations
+ parameters: []
+ requestBody:
+ content:
+ application/json:
+ schema:
+ type: object
+ required:
+ - schedule
+ properties:
+ schedule:
+ type: object
+ properties:
+ protocol_features_to_activate:
+ type: array
+ description: List of protocol features to activate
+ items:
+ $ref: "https://eosio.github.io/schemata/v2.0/oas/Sha256.yaml"
+
+ responses:
+ "200":
+ description: OK
+ content:
+ application/json:
+ schema:
+ description: Returns Nothing
+
+ /producer/get_supported_protocol_features:
+ post:
+ summary: get_supported_protocol_features
+ description: Retreives supported protocol features for producer node
+ operationId: get_supported_protocol_features
+ parameters: []
+ requestBody:
+ content:
+ application/json:
+ schema:
+ type: object
+ required:
+ - params
+ properties:
+ params:
+ type: object
+ description: Defines filters based on which to return the supported protocol features
+ properties:
+ exclude_disabled:
+ type: boolean
+ description: Exclude disabled protocol features
+ exclude_unactivatable:
+ type: boolean
+ description: Exclude unactivatable protocol features
+
+ responses:
+ "200":
+ description: OK
+ content:
+ application/json:
+ schema:
+ type: array
+ description: Variant type, an array of strings with the supported protocol features
+ items:
+ type: string
diff --git a/plugins/producer_api_plugin/producer_api_plugin.cpp b/plugins/producer_api_plugin/producer_api_plugin.cpp
index bd3feb1e9bb..6c30ee04cfb 100644
--- a/plugins/producer_api_plugin/producer_api_plugin.cpp
+++ b/plugins/producer_api_plugin/producer_api_plugin.cpp
@@ -124,7 +124,7 @@ void producer_api_plugin::plugin_startup() {
producer_plugin::get_supported_protocol_features_params), 201),
CALL(producer, producer, get_account_ram_corrections,
INVOKE_R_R(producer, get_account_ram_corrections, producer_plugin::get_account_ram_corrections_params), 201),
- });
+ }, appbase::priority::medium);
}
void producer_api_plugin::plugin_initialize(const variables_map& options) {
diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp
index c67f53a1e11..63998650cec 100644
--- a/plugins/producer_plugin/producer_plugin.cpp
+++ b/plugins/producer_plugin/producer_plugin.cpp
@@ -78,7 +78,7 @@ using namespace eosio::chain;
using namespace eosio::chain::plugin_interface;
namespace {
- bool failure_is_subjective(const fc::exception& e, bool deadline_is_subjective) {
+ bool exception_is_exhausted(const fc::exception& e, bool deadline_is_subjective) {
auto code = e.code();
return (code == block_cpu_usage_exceeded::code_value) ||
(code == block_net_usage_exceeded::code_value) ||
@@ -181,12 +181,14 @@ class producer_plugin_impl : public std::enable_shared_from_this calculate_next_block_time(const account_name& producer_name, const block_timestamp_type& current_block_time) const;
void schedule_production_loop();
+ void schedule_maybe_produce_block( bool exhausted );
void produce_block();
bool maybe_produce_block();
+ bool block_is_exhausted() const;
bool remove_expired_persisted_trxs( const fc::time_point& deadline );
bool remove_expired_blacklisted_trxs( const fc::time_point& deadline );
bool process_unapplied_trxs( const fc::time_point& deadline );
- bool process_scheduled_and_incoming_trxs( const fc::time_point& deadline, size_t& pending_incoming_process_limit );
+ void process_scheduled_and_incoming_trxs( const fc::time_point& deadline, size_t& pending_incoming_process_limit );
bool process_incoming_trxs( const fc::time_point& deadline, size_t& pending_incoming_process_limit );
boost::program_options::variables_map _options;
@@ -207,6 +209,8 @@ class producer_plugin_impl : public std::enable_shared_from_this& block_id) {
auto& chain = chain_plug->chain();
if ( _pending_block_mode == pending_block_mode::producing ) {
- fc_wlog( _log, "dropped incoming block #${num} while producing #${pbn} for ${bt}, id: ${id}",
- ("num", block->block_num())("pbn", chain.head_block_num() + 1)
- ("bt", chain.pending_block_time())("id", block_id ? (*block_id).str() : "UNKNOWN") );
+ fc_wlog( _log, "dropped incoming block #${num} id: ${id}",
+ ("num", block->block_num())("id", block_id ? (*block_id).str() : "UNKNOWN") );
return false;
}
@@ -463,14 +466,19 @@ class producer_plugin_impl : public std::enable_shared_from_thisprocess_incoming_transaction_async( future.get(), persist_until_expired, std::move( next ) );
+ if( !self->process_incoming_transaction_async( future.get(), persist_until_expired, std::move( next ) ) ) {
+ if( self->_pending_block_mode == pending_block_mode::producing ) {
+ self->schedule_maybe_produce_block( true );
+ }
+ }
} CATCH_AND_CALL(next);
} );
}
});
}
- void process_incoming_transaction_async(const transaction_metadata_ptr& trx, bool persist_until_expired, next_function next) {
+ bool process_incoming_transaction_async(const transaction_metadata_ptr& trx, bool persist_until_expired, next_function next) {
+ bool exhausted = false;
chain::controller& chain = chain_plug->chain();
auto send_response = [this, &trx, &chain, &next](const fc::static_variant& response) {
@@ -511,18 +519,18 @@ class producer_plugin_impl : public std::enable_shared_from_this(
FC_LOG_MESSAGE( error, "expired transaction ${id}, expiration ${e}, block time ${bt}",
("id", id)("e", trx->packed_trx()->expiration())( "bt", bt )))));
- return;
+ return true;
}
if( chain.is_known_unexpired_transaction( id )) {
send_response( std::static_pointer_cast( std::make_shared(
FC_LOG_MESSAGE( error, "duplicate transaction ${id}", ("id", id)))) );
- return;
+ return true;
}
if( !chain.is_building_block()) {
_pending_incoming_transactions.add( trx, persist_until_expired, next );
- return;
+ return true;
}
auto deadline = fc::time_point::now() + fc::milliseconds( _max_transaction_time_ms );
@@ -534,9 +542,9 @@ class producer_plugin_impl : public std::enable_shared_from_thisbilled_cpu_time_us, false );
if( trace->except ) {
- if( failure_is_subjective( *trace->except, deadline_is_subjective )) {
+ if( exception_is_exhausted( *trace->except, deadline_is_subjective )) {
_pending_incoming_transactions.add( trx, persist_until_expired, next );
if( _pending_block_mode == pending_block_mode::producing ) {
fc_dlog( _trx_trace_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} COULD NOT FIT, tx: ${txid} RETRYING ",
@@ -547,6 +555,8 @@ class producer_plugin_impl : public std::enable_shared_from_thisid()));
}
+ if( !exhausted )
+ exhausted = block_is_exhausted();
} else {
auto e_ptr = trace->except->dynamic_copy_exception();
send_response( e_ptr );
@@ -567,6 +577,8 @@ class producer_plugin_impl : public std::enable_shared_from_this()->default_value(config::default_block_cpu_effort_pct / config::percent_1),
"Percentage of cpu block production time used to produce last block. Whole number percentages, e.g. 80 for 80%")
+ ("max-block-cpu-usage-threshold-us", bpo::value()->default_value( 5000 ),
+ "Threshold of CPU block production to consider block full; when within threshold of max-block-cpu-usage block can be produced immediately")
+ ("max-block-net-usage-threshold-bytes", bpo::value()->default_value( 1024 ),
+ "Threshold of NET block production to consider block full; when within threshold of max-block-net-usage block can be produced immediately")
("max-scheduled-transaction-time-per-block-ms", boost::program_options::value()->default_value(100),
"Maximum wall-clock time, in milliseconds, spent retiring scheduled transactions in any block before returning to normal transaction processing.")
("subjective-cpu-leeway-us", boost::program_options::value()->default_value( config::default_subjective_cpu_leeway_us ),
"Time in microseconds allowed for a transaction that starts with insufficient CPU quota to complete and cover its CPU usage.")
("incoming-defer-ratio", bpo::value()->default_value(1.0),
- "ratio between incoming transations and deferred transactions when both are exhausted")
+ "ratio between incoming transactions and deferred transactions when both are queued for execution")
("incoming-transaction-queue-size-mb", bpo::value()->default_value( 1024 ),
"Maximum size (in MiB) of the incoming transaction queue. Exceeding this value will subjectively drop transaction with resource exhaustion.")
("producer-threads", bpo::value()->default_value(config::default_controller_thread_pool_size),
@@ -838,6 +854,12 @@ void producer_plugin::plugin_initialize(const boost::program_options::variables_
my->_produce_time_offset_us = std::min( my->_produce_time_offset_us, cpu_effort_offset_us );
my->_last_block_time_offset_us = std::min( my->_last_block_time_offset_us, last_block_cpu_effort_offset_us );
+ my->_max_block_cpu_usage_threshold_us = options.at( "max-block-cpu-usage-threshold-us" ).as();
+ EOS_ASSERT( my->_max_block_cpu_usage_threshold_us < config::block_interval_us, plugin_config_exception,
+ "max-block-cpu-usage-threshold-us ${t} must be 0 .. ${bi}", ("bi", config::block_interval_us)("t", my->_max_block_cpu_usage_threshold_us) );
+
+ my->_max_block_net_usage_threshold_bytes = options.at( "max-block-net-usage-threshold-bytes" ).as();
+
my->_max_scheduled_transaction_time_per_block_ms = options.at("max-scheduled-transaction-time-per-block-ms").as();
if( options.at( "subjective-cpu-leeway-us" ).as() != config::default_subjective_cpu_leeway_us ) {
@@ -931,6 +953,9 @@ void producer_plugin::plugin_startup()
EOS_ASSERT( my->_producers.empty() || chain.get_validation_mode() == chain::validation_mode::FULL, plugin_config_exception,
"node cannot have any producer-name configured because block production is not safe when validation_mode is not \"full\"" );
+ EOS_ASSERT( my->_producers.empty() || my->chain_plug->accept_transactions(), plugin_config_exception,
+ "node cannot have any producer-name configured because no block production is possible with no [api|p2p]-accepted-transactions" );
+
my->_accepted_block_connection.emplace(chain.accepted_block.connect( [this]( const auto& bsp ){ my->on_block( bsp ); } ));
my->_accepted_block_header_connection.emplace(chain.accepted_block_header.connect( [this]( const auto& bsp ){ my->on_block_header( bsp ); } ));
my->_irreversible_block_connection.emplace(chain.irreversible_block.connect( [this]( const auto& bsp ){ my->on_irreversible_block( bsp->block ); } ));
@@ -983,6 +1008,7 @@ void producer_plugin::handle_sighup() {
}
void producer_plugin::pause() {
+ fc_ilog(_log, "Producer paused.");
my->_pause_production = true;
}
@@ -994,7 +1020,10 @@ void producer_plugin::resume() {
if (my->_pending_block_mode == pending_block_mode::speculating) {
chain::controller& chain = my->chain_plug->chain();
my->_unapplied_transactions.add_aborted( chain.abort_block() );
+ fc_ilog(_log, "Producer resumed. Scheduling production.");
my->schedule_production_loop();
+ } else {
+ fc_ilog(_log, "Producer resumed.");
}
}
@@ -1382,11 +1411,6 @@ fc::time_point producer_plugin_impl::calculate_pending_block_time() const {
const fc::time_point base = std::max(now, chain.head_block_time());
const int64_t min_time_to_next_block = (config::block_interval_us) - (base.time_since_epoch().count() % (config::block_interval_us) );
fc::time_point block_time = base + fc::microseconds(min_time_to_next_block);
-
-
- if((block_time - now) < fc::microseconds(config::block_interval_us/10) ) { // we must sleep for at least 50ms
- block_time += fc::microseconds(config::block_interval_us);
- }
return block_time;
}
@@ -1398,13 +1422,11 @@ fc::time_point producer_plugin_impl::calculate_block_deadline( const fc::time_po
producer_plugin_impl::start_block_result producer_plugin_impl::start_block() {
chain::controller& chain = chain_plug->chain();
- if( chain.in_immutable_mode() )
+ if( !chain_plug->accept_transactions() )
return start_block_result::waiting_for_block;
const auto& hbs = chain.head_block_state();
- //Schedule for the next second's tick regardless of chain state
- // If we would wait less than 50ms (1/10 of block_interval), wait for the whole block interval.
const fc::time_point now = fc::time_point::now();
const fc::time_point block_time = calculate_pending_block_time();
@@ -1583,7 +1605,7 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() {
if( app().is_quiting() ) // db guard exception above in LOG_AND_DROP could have called app().quit()
return start_block_result::failed;
- if (preprocess_deadline <= fc::time_point::now()) {
+ if (preprocess_deadline <= fc::time_point::now() || block_is_exhausted()) {
return start_block_result::exhausted;
} else {
if( !process_incoming_trxs( preprocess_deadline, pending_incoming_process_limit ) )
@@ -1654,12 +1676,14 @@ bool producer_plugin_impl::remove_expired_blacklisted_trxs( const fc::time_point
{
bool exhausted = false;
auto& blacklist_by_expiry = _blacklisted_transactions.get();
- auto now = fc::time_point::now();
if(!blacklist_by_expiry.empty()) {
+ const chain::controller& chain = chain_plug->chain();
+ const auto lib_time = chain.last_irreversible_block_time();
+
int num_expired = 0;
int orig_count = _blacklisted_transactions.size();
- while (!blacklist_by_expiry.empty() && blacklist_by_expiry.begin()->expiry <= now) {
+ while (!blacklist_by_expiry.empty() && blacklist_by_expiry.begin()->expiry <= lib_time) {
if (deadline <= fc::time_point::now()) {
exhausted = true;
break;
@@ -1702,12 +1726,14 @@ bool producer_plugin_impl::process_unapplied_trxs( const fc::time_point& deadlin
trx_deadline = deadline;
}
- auto trace = chain.push_transaction( trx, trx_deadline );
+ auto trace = chain.push_transaction( trx, trx_deadline, trx->billed_cpu_time_us, false );
if( trace->except ) {
- if( failure_is_subjective( *trace->except, deadline_is_subjective ) ) {
- exhausted = true;
- // don't erase, subjective failure so try again next time
- break;
+ if( exception_is_exhausted( *trace->except, deadline_is_subjective ) ) {
+ if( block_is_exhausted() ) {
+ exhausted = true;
+ // don't erase, subjective failure so try again next time
+ break;
+ }
} else {
// this failed our configured maximum transaction time, we don't want to replay it
++num_failed;
@@ -1729,7 +1755,7 @@ bool producer_plugin_impl::process_unapplied_trxs( const fc::time_point& deadlin
return !exhausted;
}
-bool producer_plugin_impl::process_scheduled_and_incoming_trxs( const fc::time_point& deadline, size_t& pending_incoming_process_limit )
+void producer_plugin_impl::process_scheduled_and_incoming_trxs( const fc::time_point& deadline, size_t& pending_incoming_process_limit )
{
// scheduled transactions
int num_applied = 0;
@@ -1746,21 +1772,22 @@ bool producer_plugin_impl::process_scheduled_and_incoming_trxs( const fc::time_p
auto sch_itr = sch_idx.begin();
while( sch_itr != sch_idx.end() ) {
if( sch_itr->delay_until > pending_block_time) break; // not scheduled yet
+ if( exhausted || deadline <= fc::time_point::now() ) {
+ exhausted = true;
+ break;
+ }
if( sch_itr->published >= pending_block_time ) {
++sch_itr;
continue; // do not allow schedule and execute in same block
}
- if( deadline <= fc::time_point::now() ) {
- exhausted = true;
- break;
- }
- const transaction_id_type trx_id = sch_itr->trx_id; // make copy since reference could be invalidated
- if (blacklist_by_id.find(trx_id) != blacklist_by_id.end()) {
+ if (blacklist_by_id.find(sch_itr->trx_id) != blacklist_by_id.end()) {
++sch_itr;
continue;
}
+ const transaction_id_type trx_id = sch_itr->trx_id; // make copy since reference could be invalidated
+ const auto sch_expiration = sch_itr->expiration;
auto sch_itr_next = sch_itr; // save off next since sch_itr may be invalidated by loop
++sch_itr_next;
const auto next_delay_until = sch_itr_next != sch_idx.end() ? sch_itr_next->delay_until : sch_itr->delay_until;
@@ -1778,10 +1805,13 @@ bool producer_plugin_impl::process_scheduled_and_incoming_trxs( const fc::time_p
auto e = _pending_incoming_transactions.pop_front();
--pending_incoming_process_limit;
incoming_trx_weight -= 1.0;
- process_incoming_transaction_async(std::get<0>(e), std::get<1>(e), std::get<2>(e));
+ if( !process_incoming_transaction_async(std::get<0>(e), std::get<1>(e), std::get<2>(e)) ) {
+ exhausted = true;
+ break;
+ }
}
- if (deadline <= fc::time_point::now()) {
+ if (exhausted || deadline <= fc::time_point::now()) {
exhausted = true;
break;
}
@@ -1794,15 +1824,16 @@ bool producer_plugin_impl::process_scheduled_and_incoming_trxs( const fc::time_p
trx_deadline = deadline;
}
- auto trace = chain.push_scheduled_transaction(trx_id, trx_deadline);
+ auto trace = chain.push_scheduled_transaction(trx_id, trx_deadline, 0, false);
if (trace->except) {
- if (failure_is_subjective(*trace->except, deadline_is_subjective)) {
- exhausted = true;
- break;
+ if (exception_is_exhausted(*trace->except, deadline_is_subjective)) {
+ if( block_is_exhausted() ) {
+ exhausted = true;
+ break;
+ }
} else {
- auto expiration = fc::time_point::now() + fc::seconds(chain.get_global_properties().configuration.deferred_trx_expiration_window);
// this failed our configured maximum transaction time, we don't want to replay it add it to a blacklist
- _blacklisted_transactions.insert(transaction_id_with_expiry{trx_id, expiration});
+ _blacklisted_transactions.insert(transaction_id_with_expiry{trx_id, sch_expiration});
num_failed++;
}
} else {
@@ -1822,8 +1853,6 @@ bool producer_plugin_impl::process_scheduled_and_incoming_trxs( const fc::time_p
"Processed ${m} of ${n} scheduled transactions, Applied ${applied}, Failed/Dropped ${failed}",
( "m", num_processed )( "n", scheduled_trxs_size )( "applied", num_applied )( "failed", num_failed ) );
}
-
- return !exhausted;
}
bool producer_plugin_impl::process_incoming_trxs( const fc::time_point& deadline, size_t& pending_incoming_process_limit )
@@ -1839,14 +1868,28 @@ bool producer_plugin_impl::process_incoming_trxs( const fc::time_point& deadline
}
auto e = _pending_incoming_transactions.pop_front();
--pending_incoming_process_limit;
- process_incoming_transaction_async(std::get<0>(e), std::get<1>(e), std::get<2>(e));
++processed;
+ if( !process_incoming_transaction_async(std::get<0>(e), std::get<1>(e), std::get<2>(e)) ) {
+ exhausted = true;
+ break;
+ }
}
fc_dlog(_log, "Processed ${n} pending transactions, ${p} left", ("n", processed)("p", _pending_incoming_transactions.size()));
}
return !exhausted;
}
+bool producer_plugin_impl::block_is_exhausted() const {
+ const chain::controller& chain = chain_plug->chain();
+ const auto& rl = chain.get_resource_limits_manager();
+
+ const uint64_t cpu_limit = rl.get_block_cpu_limit();
+ if( cpu_limit < _max_block_cpu_usage_threshold_us ) return true;
+ const uint64_t net_limit = rl.get_block_net_limit();
+ if( net_limit < _max_block_net_usage_threshold_bytes ) return true;
+ return false;
+}
+
// Example:
// --> Start block A (block time x.500) at time x.000
// -> start_block()
@@ -1854,9 +1897,7 @@ bool producer_plugin_impl::process_incoming_trxs( const fc::time_point& deadline
// -> Idle
// --> Start block B (block time y.000) at time x.500
void producer_plugin_impl::schedule_production_loop() {
- chain::controller& chain = chain_plug->chain();
_timer.cancel();
- std::weak_ptr weak_this = shared_from_this();
auto result = start_block();
@@ -1866,7 +1907,7 @@ void producer_plugin_impl::schedule_production_loop() {
// we failed to start a block, so try again later?
_timer.async_wait( app().get_priority_queue().wrap( priority::high,
- [weak_this, cid = ++_timer_corelation_id]( const boost::system::error_code& ec ) {
+ [weak_this = weak_from_this(), cid = ++_timer_corelation_id]( const boost::system::error_code& ec ) {
auto self = weak_this.lock();
if( self && ec != boost::asio::error::operation_aborted && cid == self->_timer_corelation_id ) {
self->schedule_production_loop();
@@ -1875,7 +1916,7 @@ void producer_plugin_impl::schedule_production_loop() {
} else if (result == start_block_result::waiting_for_block){
if (!_producers.empty() && !production_disabled_by_policy()) {
fc_dlog(_log, "Waiting till another block is received and scheduling Speculative/Production Change");
- schedule_delayed_production_loop(weak_this, calculate_producer_wake_up_time(calculate_pending_block_time()));
+ schedule_delayed_production_loop(weak_from_this(), calculate_producer_wake_up_time(calculate_pending_block_time()));
} else {
fc_dlog(_log, "Waiting till another block is received");
// nothing to do until more blocks arrive
@@ -1885,52 +1926,52 @@ void producer_plugin_impl::schedule_production_loop() {
// scheduled in start_block()
} else if (_pending_block_mode == pending_block_mode::producing) {
+ schedule_maybe_produce_block( result == start_block_result::exhausted );
- // we succeeded but block may be exhausted
- static const boost::posix_time::ptime epoch(boost::gregorian::date(1970, 1, 1));
- auto deadline = calculate_block_deadline(chain.pending_block_time());
-
- if (deadline > fc::time_point::now()) {
- // ship this block off no later than its deadline
- EOS_ASSERT( chain.is_building_block(), missing_pending_block_state, "producing without pending_block_state, start_block succeeded" );
- _timer.expires_at( epoch + boost::posix_time::microseconds( deadline.time_since_epoch().count() ));
- fc_dlog(_log, "Scheduling Block Production on Normal Block #${num} for ${time}",
- ("num", chain.head_block_num()+1)("time",deadline));
- } else {
- EOS_ASSERT( chain.is_building_block(), missing_pending_block_state, "producing without pending_block_state" );
- auto expect_time = chain.pending_block_time() - fc::microseconds(config::block_interval_us);
- // ship this block off up to 1 block time earlier or immediately
- if (fc::time_point::now() >= expect_time) {
- _timer.expires_from_now( boost::posix_time::microseconds( 0 ));
- fc_dlog(_log, "Scheduling Block Production on Exhausted Block #${num} immediately",
- ("num", chain.head_block_num()+1));
- } else {
- _timer.expires_at(epoch + boost::posix_time::microseconds(expect_time.time_since_epoch().count()));
- fc_dlog(_log, "Scheduling Block Production on Exhausted Block #${num} at ${time}",
- ("num", chain.head_block_num()+1)("time",expect_time));
- }
- }
-
- _timer.async_wait( app().get_priority_queue().wrap( priority::high,
- [&chain,weak_this,cid=++_timer_corelation_id](const boost::system::error_code& ec) {
- auto self = weak_this.lock();
- if( self && ec != boost::asio::error::operation_aborted && cid == self->_timer_corelation_id ) {
- // pending_block_state expected, but can't assert inside async_wait
- auto block_num = chain.is_building_block() ? chain.head_block_num() + 1 : 0;
- fc_dlog( _log, "Produce block timer for ${num} running at ${time}", ("num", block_num)("time", fc::time_point::now()) );
- auto res = self->maybe_produce_block();
- fc_dlog( _log, "Producing Block #${num} returned: ${res}", ("num", block_num)( "res", res ) );
- }
- } ) );
} else if (_pending_block_mode == pending_block_mode::speculating && !_producers.empty() && !production_disabled_by_policy()){
+ chain::controller& chain = chain_plug->chain();
fc_dlog(_log, "Speculative Block Created; Scheduling Speculative/Production Change");
EOS_ASSERT( chain.is_building_block(), missing_pending_block_state, "speculating without pending_block_state" );
- schedule_delayed_production_loop(weak_this, calculate_producer_wake_up_time(chain.pending_block_time()));
+ schedule_delayed_production_loop(weak_from_this(), calculate_producer_wake_up_time(chain.pending_block_time()));
} else {
fc_dlog(_log, "Speculative Block Created");
}
}
+void producer_plugin_impl::schedule_maybe_produce_block( bool exhausted ) {
+ chain::controller& chain = chain_plug->chain();
+
+ // we succeeded but block may be exhausted
+ static const boost::posix_time::ptime epoch( boost::gregorian::date( 1970, 1, 1 ) );
+ auto deadline = calculate_block_deadline( chain.pending_block_time() );
+
+ if( !exhausted && deadline > fc::time_point::now() ) {
+ // ship this block off no later than its deadline
+ EOS_ASSERT( chain.is_building_block(), missing_pending_block_state,
+ "producing without pending_block_state, start_block succeeded" );
+ _timer.expires_at( epoch + boost::posix_time::microseconds( deadline.time_since_epoch().count() ) );
+ fc_dlog( _log, "Scheduling Block Production on Normal Block #${num} for ${time}",
+ ("num", chain.head_block_num() + 1)( "time", deadline ) );
+ } else {
+ EOS_ASSERT( chain.is_building_block(), missing_pending_block_state, "producing without pending_block_state" );
+ _timer.expires_from_now( boost::posix_time::microseconds( 0 ) );
+ fc_dlog( _log, "Scheduling Block Production on ${desc} Block #${num} immediately",
+ ("num", chain.head_block_num() + 1)("desc", block_is_exhausted() ? "Exhausted" : "Deadline exceeded") );
+ }
+
+ _timer.async_wait( app().get_priority_queue().wrap( priority::high,
+ [&chain, weak_this = weak_from_this(), cid=++_timer_corelation_id](const boost::system::error_code& ec) {
+ auto self = weak_this.lock();
+ if( self && ec != boost::asio::error::operation_aborted && cid == self->_timer_corelation_id ) {
+ // pending_block_state expected, but can't assert inside async_wait
+ auto block_num = chain.is_building_block() ? chain.head_block_num() + 1 : 0;
+ fc_dlog( _log, "Produce block timer for ${num} running at ${time}", ("num", block_num)("time", fc::time_point::now()) );
+ auto res = self->maybe_produce_block();
+ fc_dlog( _log, "Producing Block #${num} returned: ${res}", ("num", block_num)( "res", res ) );
+ }
+ } ) );
+}
+
optional producer_plugin_impl::calculate_producer_wake_up_time( const block_timestamp_type& ref_block_time ) const {
// if we have any producers then we should at least set a timer for our next available slot
optional wake_up_time;
diff --git a/plugins/test_control_api_plugin/test_control.swagger.yaml b/plugins/test_control_api_plugin/test_control.swagger.yaml
new file mode 100644
index 00000000000..906fe76ea8c
--- /dev/null
+++ b/plugins/test_control_api_plugin/test_control.swagger.yaml
@@ -0,0 +1,58 @@
+openapi: 3.0.0
+info:
+ title: Test Control API
+ version: 1.0.0
+ license:
+ name: MIT
+ url: https://opensource.org/licenses/MIT
+ contact:
+ url: https://eos.io
+tags:
+ - name: eosio
+servers:
+ - url: '{protocol}://{host}:{port}/v1/'
+ variables:
+ protocol:
+ enum:
+ - http
+ - https
+ default: http
+ host:
+ default: localhost
+ port:
+ default: "8080"
+components:
+ schemas: {}
+paths:
+ /test_control/kill_node_or_producer:
+ post:
+ tags:
+ - TestControl
+ summary: kill_node_or_producer
+ description: Kills node or producer
+ operationId: kill_node_or_producer
+ parameters: []
+ requestBody:
+ content:
+ application/json:
+ schema:
+ type: object
+ required:
+ - params
+ properties:
+ params:
+ type: object
+ properties:
+ producer:
+ $ref: 'https://eosio.github.io/schemata/v2.0/oas/Name.yaml'
+ where_in_sequence:
+ type: integer
+ based_on_lib:
+ type: integer
+ responses:
+ '200':
+ description: OK
+ content:
+ application/json:
+ schema:
+ description: Returns Nothing
diff --git a/plugins/trace_api_plugin/.clang-format b/plugins/trace_api_plugin/.clang-format
new file mode 100644
index 00000000000..42dd5b7832c
--- /dev/null
+++ b/plugins/trace_api_plugin/.clang-format
@@ -0,0 +1,8 @@
+BasedOnStyle: LLVM
+IndentWidth: 3
+ColumnLimit: 120
+PointerAlignment: Left
+AlwaysBreakTemplateDeclarations: true
+AlignConsecutiveAssignments: true
+AlignConsecutiveDeclarations: true
+BreakConstructorInitializers: BeforeComma
diff --git a/plugins/trace_api_plugin/CMakeLists.txt b/plugins/trace_api_plugin/CMakeLists.txt
new file mode 100644
index 00000000000..c07c7fe7924
--- /dev/null
+++ b/plugins/trace_api_plugin/CMakeLists.txt
@@ -0,0 +1,12 @@
+file(GLOB HEADERS "include/eosio/trace_api_plugin/*.hpp")
+add_library( trace_api_plugin
+ request_handler.cpp
+ store_provider.cpp
+ abi_data_handler.cpp
+ trace_api_plugin.cpp
+ ${HEADERS} )
+
+target_link_libraries( trace_api_plugin chain_plugin http_plugin eosio_chain appbase )
+target_include_directories( trace_api_plugin PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" )
+
+add_subdirectory( test )
diff --git a/plugins/trace_api_plugin/abi_data_handler.cpp b/plugins/trace_api_plugin/abi_data_handler.cpp
new file mode 100644
index 00000000000..cb8113658b3
--- /dev/null
+++ b/plugins/trace_api_plugin/abi_data_handler.cpp
@@ -0,0 +1,26 @@
+#include
+#include
+
+namespace eosio::trace_api {
+
+ void abi_data_handler::add_abi( const chain::name& name, const chain::abi_def& abi ) {
+ abi_serializer_by_account.emplace(name, std::make_shared(abi, fc::microseconds::maximum()));
+ }
+
+ fc::variant abi_data_handler::process_data(const action_trace_v0& action, const yield_function& yield ) {
+ if (abi_serializer_by_account.count(action.account) > 0) {
+ const auto& serializer_p = abi_serializer_by_account.at(action.account);
+ auto type_name = serializer_p->get_action_type(action.action);
+
+ if (!type_name.empty()) {
+ try {
+ return serializer_p->binary_to_variant(type_name, action.data, fc::microseconds::maximum());
+ } catch (...) {
+ except_handler(MAKE_EXCEPTION_WITH_CONTEXT(std::current_exception()));
+ }
+ }
+ }
+
+ return {};
+ }
+}
\ No newline at end of file
diff --git a/plugins/trace_api_plugin/examples/abis/eosio.abi b/plugins/trace_api_plugin/examples/abis/eosio.abi
new file mode 100644
index 00000000000..a2881a67dca
--- /dev/null
+++ b/plugins/trace_api_plugin/examples/abis/eosio.abi
@@ -0,0 +1,2089 @@
+{
+ "version": "eosio::abi/1.1",
+ "types": [],
+ "structs": [
+ {
+ "name": "abi_hash",
+ "base": "",
+ "fields": [
+ {
+ "name": "owner",
+ "type": "name"
+ },
+ {
+ "name": "hash",
+ "type": "checksum256"
+ }
+ ]
+ },
+ {
+ "name": "activate",
+ "base": "",
+ "fields": [
+ {
+ "name": "feature_digest",
+ "type": "checksum256"
+ }
+ ]
+ },
+ {
+ "name": "authority",
+ "base": "",
+ "fields": [
+ {
+ "name": "threshold",
+ "type": "uint32"
+ },
+ {
+ "name": "keys",
+ "type": "key_weight[]"
+ },
+ {
+ "name": "accounts",
+ "type": "permission_level_weight[]"
+ },
+ {
+ "name": "waits",
+ "type": "wait_weight[]"
+ }
+ ]
+ },
+ {
+ "name": "bid_refund",
+ "base": "",
+ "fields": [
+ {
+ "name": "bidder",
+ "type": "name"
+ },
+ {
+ "name": "amount",
+ "type": "asset"
+ }
+ ]
+ },
+ {
+ "name": "bidname",
+ "base": "",
+ "fields": [
+ {
+ "name": "bidder",
+ "type": "name"
+ },
+ {
+ "name": "newname",
+ "type": "name"
+ },
+ {
+ "name": "bid",
+ "type": "asset"
+ }
+ ]
+ },
+ {
+ "name": "bidrefund",
+ "base": "",
+ "fields": [
+ {
+ "name": "bidder",
+ "type": "name"
+ },
+ {
+ "name": "newname",
+ "type": "name"
+ }
+ ]
+ },
+ {
+ "name": "block_header",
+ "base": "",
+ "fields": [
+ {
+ "name": "timestamp",
+ "type": "uint32"
+ },
+ {
+ "name": "producer",
+ "type": "name"
+ },
+ {
+ "name": "confirmed",
+ "type": "uint16"
+ },
+ {
+ "name": "previous",
+ "type": "checksum256"
+ },
+ {
+ "name": "transaction_mroot",
+ "type": "checksum256"
+ },
+ {
+ "name": "action_mroot",
+ "type": "checksum256"
+ },
+ {
+ "name": "schedule_version",
+ "type": "uint32"
+ },
+ {
+ "name": "new_producers",
+ "type": "producer_schedule?"
+ }
+ ]
+ },
+ {
+ "name": "blockchain_parameters",
+ "base": "",
+ "fields": [
+ {
+ "name": "max_block_net_usage",
+ "type": "uint64"
+ },
+ {
+ "name": "target_block_net_usage_pct",
+ "type": "uint32"
+ },
+ {
+ "name": "max_transaction_net_usage",
+ "type": "uint32"
+ },
+ {
+ "name": "base_per_transaction_net_usage",
+ "type": "uint32"
+ },
+ {
+ "name": "net_usage_leeway",
+ "type": "uint32"
+ },
+ {
+ "name": "context_free_discount_net_usage_num",
+ "type": "uint32"
+ },
+ {
+ "name": "context_free_discount_net_usage_den",
+ "type": "uint32"
+ },
+ {
+ "name": "max_block_cpu_usage",
+ "type": "uint32"
+ },
+ {
+ "name": "target_block_cpu_usage_pct",
+ "type": "uint32"
+ },
+ {
+ "name": "max_transaction_cpu_usage",
+ "type": "uint32"
+ },
+ {
+ "name": "min_transaction_cpu_usage",
+ "type": "uint32"
+ },
+ {
+ "name": "max_transaction_lifetime",
+ "type": "uint32"
+ },
+ {
+ "name": "deferred_trx_expiration_window",
+ "type": "uint32"
+ },
+ {
+ "name": "max_transaction_delay",
+ "type": "uint32"
+ },
+ {
+ "name": "max_inline_action_size",
+ "type": "uint32"
+ },
+ {
+ "name": "max_inline_action_depth",
+ "type": "uint16"
+ },
+ {
+ "name": "max_authority_depth",
+ "type": "uint16"
+ }
+ ]
+ },
+ {
+ "name": "buyram",
+ "base": "",
+ "fields": [
+ {
+ "name": "payer",
+ "type": "name"
+ },
+ {
+ "name": "receiver",
+ "type": "name"
+ },
+ {
+ "name": "quant",
+ "type": "asset"
+ }
+ ]
+ },
+ {
+ "name": "buyrambytes",
+ "base": "",
+ "fields": [
+ {
+ "name": "payer",
+ "type": "name"
+ },
+ {
+ "name": "receiver",
+ "type": "name"
+ },
+ {
+ "name": "bytes",
+ "type": "uint32"
+ }
+ ]
+ },
+ {
+ "name": "buyrex",
+ "base": "",
+ "fields": [
+ {
+ "name": "from",
+ "type": "name"
+ },
+ {
+ "name": "amount",
+ "type": "asset"
+ }
+ ]
+ },
+ {
+ "name": "canceldelay",
+ "base": "",
+ "fields": [
+ {
+ "name": "canceling_auth",
+ "type": "permission_level"
+ },
+ {
+ "name": "trx_id",
+ "type": "checksum256"
+ }
+ ]
+ },
+ {
+ "name": "claimrewards",
+ "base": "",
+ "fields": [
+ {
+ "name": "owner",
+ "type": "name"
+ }
+ ]
+ },
+ {
+ "name": "closerex",
+ "base": "",
+ "fields": [
+ {
+ "name": "owner",
+ "type": "name"
+ }
+ ]
+ },
+ {
+ "name": "cnclrexorder",
+ "base": "",
+ "fields": [
+ {
+ "name": "owner",
+ "type": "name"
+ }
+ ]
+ },
+ {
+ "name": "connector",
+ "base": "",
+ "fields": [
+ {
+ "name": "balance",
+ "type": "asset"
+ },
+ {
+ "name": "weight",
+ "type": "float64"
+ }
+ ]
+ },
+ {
+ "name": "consolidate",
+ "base": "",
+ "fields": [
+ {
+ "name": "owner",
+ "type": "name"
+ }
+ ]
+ },
+ {
+ "name": "defcpuloan",
+ "base": "",
+ "fields": [
+ {
+ "name": "from",
+ "type": "name"
+ },
+ {
+ "name": "loan_num",
+ "type": "uint64"
+ },
+ {
+ "name": "amount",
+ "type": "asset"
+ }
+ ]
+ },
+ {
+ "name": "defnetloan",
+ "base": "",
+ "fields": [
+ {
+ "name": "from",
+ "type": "name"
+ },
+ {
+ "name": "loan_num",
+ "type": "uint64"
+ },
+ {
+ "name": "amount",
+ "type": "asset"
+ }
+ ]
+ },
+ {
+ "name": "delegatebw",
+ "base": "",
+ "fields": [
+ {
+ "name": "from",
+ "type": "name"
+ },
+ {
+ "name": "receiver",
+ "type": "name"
+ },
+ {
+ "name": "stake_net_quantity",
+ "type": "asset"
+ },
+ {
+ "name": "stake_cpu_quantity",
+ "type": "asset"
+ },
+ {
+ "name": "transfer",
+ "type": "bool"
+ }
+ ]
+ },
+ {
+ "name": "delegated_bandwidth",
+ "base": "",
+ "fields": [
+ {
+ "name": "from",
+ "type": "name"
+ },
+ {
+ "name": "to",
+ "type": "name"
+ },
+ {
+ "name": "net_weight",
+ "type": "asset"
+ },
+ {
+ "name": "cpu_weight",
+ "type": "asset"
+ }
+ ]
+ },
+ {
+ "name": "deleteauth",
+ "base": "",
+ "fields": [
+ {
+ "name": "account",
+ "type": "name"
+ },
+ {
+ "name": "permission",
+ "type": "name"
+ }
+ ]
+ },
+ {
+ "name": "deposit",
+ "base": "",
+ "fields": [
+ {
+ "name": "owner",
+ "type": "name"
+ },
+ {
+ "name": "amount",
+ "type": "asset"
+ }
+ ]
+ },
+ {
+ "name": "eosio_global_state",
+ "base": "blockchain_parameters",
+ "fields": [
+ {
+ "name": "max_ram_size",
+ "type": "uint64"
+ },
+ {
+ "name": "total_ram_bytes_reserved",
+ "type": "uint64"
+ },
+ {
+ "name": "total_ram_stake",
+ "type": "int64"
+ },
+ {
+ "name": "last_producer_schedule_update",
+ "type": "block_timestamp_type"
+ },
+ {
+ "name": "last_pervote_bucket_fill",
+ "type": "time_point"
+ },
+ {
+ "name": "pervote_bucket",
+ "type": "int64"
+ },
+ {
+ "name": "perblock_bucket",
+ "type": "int64"
+ },
+ {
+ "name": "total_unpaid_blocks",
+ "type": "uint32"
+ },
+ {
+ "name": "total_activated_stake",
+ "type": "int64"
+ },
+ {
+ "name": "thresh_activated_stake_time",
+ "type": "time_point"
+ },
+ {
+ "name": "last_producer_schedule_size",
+ "type": "uint16"
+ },
+ {
+ "name": "total_producer_vote_weight",
+ "type": "float64"
+ },
+ {
+ "name": "last_name_close",
+ "type": "block_timestamp_type"
+ }
+ ]
+ },
+ {
+ "name": "eosio_global_state2",
+ "base": "",
+ "fields": [
+ {
+ "name": "new_ram_per_block",
+ "type": "uint16"
+ },
+ {
+ "name": "last_ram_increase",
+ "type": "block_timestamp_type"
+ },
+ {
+ "name": "last_block_num",
+ "type": "block_timestamp_type"
+ },
+ {
+ "name": "total_producer_votepay_share",
+ "type": "float64"
+ },
+ {
+ "name": "revision",
+ "type": "uint8"
+ }
+ ]
+ },
+ {
+ "name": "eosio_global_state3",
+ "base": "",
+ "fields": [
+ {
+ "name": "last_vpay_state_update",
+ "type": "time_point"
+ },
+ {
+ "name": "total_vpay_share_change_rate",
+ "type": "float64"
+ }
+ ]
+ },
+ {
+ "name": "eosio_global_state4",
+ "base": "",
+ "fields": [
+ {
+ "name": "continuous_rate",
+ "type": "float64"
+ },
+ {
+ "name": "inflation_pay_factor",
+ "type": "int64"
+ },
+ {
+ "name": "votepay_factor",
+ "type": "int64"
+ }
+ ]
+ },
+ {
+ "name": "exchange_state",
+ "base": "",
+ "fields": [
+ {
+ "name": "supply",
+ "type": "asset"
+ },
+ {
+ "name": "base",
+ "type": "connector"
+ },
+ {
+ "name": "quote",
+ "type": "connector"
+ }
+ ]
+ },
+ {
+ "name": "fundcpuloan",
+ "base": "",
+ "fields": [
+ {
+ "name": "from",
+ "type": "name"
+ },
+ {
+ "name": "loan_num",
+ "type": "uint64"
+ },
+ {
+ "name": "payment",
+ "type": "asset"
+ }
+ ]
+ },
+ {
+ "name": "fundnetloan",
+ "base": "",
+ "fields": [
+ {
+ "name": "from",
+ "type": "name"
+ },
+ {
+ "name": "loan_num",
+ "type": "uint64"
+ },
+ {
+ "name": "payment",
+ "type": "asset"
+ }
+ ]
+ },
+ {
+ "name": "init",
+ "base": "",
+ "fields": [
+ {
+ "name": "version",
+ "type": "varuint32"
+ },
+ {
+ "name": "core",
+ "type": "symbol"
+ }
+ ]
+ },
+ {
+ "name": "key_weight",
+ "base": "",
+ "fields": [
+ {
+ "name": "key",
+ "type": "public_key"
+ },
+ {
+ "name": "weight",
+ "type": "uint16"
+ }
+ ]
+ },
+ {
+ "name": "linkauth",
+ "base": "",
+ "fields": [
+ {
+ "name": "account",
+ "type": "name"
+ },
+ {
+ "name": "code",
+ "type": "name"
+ },
+ {
+ "name": "type",
+ "type": "name"
+ },
+ {
+ "name": "requirement",
+ "type": "name"
+ }
+ ]
+ },
+ {
+ "name": "mvfrsavings",
+ "base": "",
+ "fields": [
+ {
+ "name": "owner",
+ "type": "name"
+ },
+ {
+ "name": "rex",
+ "type": "asset"
+ }
+ ]
+ },
+ {
+ "name": "mvtosavings",
+ "base": "",
+ "fields": [
+ {
+ "name": "owner",
+ "type": "name"
+ },
+ {
+ "name": "rex",
+ "type": "asset"
+ }
+ ]
+ },
+ {
+ "name": "name_bid",
+ "base": "",
+ "fields": [
+ {
+ "name": "newname",
+ "type": "name"
+ },
+ {
+ "name": "high_bidder",
+ "type": "name"
+ },
+ {
+ "name": "high_bid",
+ "type": "int64"
+ },
+ {
+ "name": "last_bid_time",
+ "type": "time_point"
+ }
+ ]
+ },
+ {
+ "name": "newaccount",
+ "base": "",
+ "fields": [
+ {
+ "name": "creator",
+ "type": "name"
+ },
+ {
+ "name": "name",
+ "type": "name"
+ },
+ {
+ "name": "owner",
+ "type": "authority"
+ },
+ {
+ "name": "active",
+ "type": "authority"
+ }
+ ]
+ },
+ {
+ "name": "onblock",
+ "base": "",
+ "fields": [
+ {
+ "name": "header",
+ "type": "block_header"
+ }
+ ]
+ },
+ {
+ "name": "onerror",
+ "base": "",
+ "fields": [
+ {
+ "name": "sender_id",
+ "type": "uint128"
+ },
+ {
+ "name": "sent_trx",
+ "type": "bytes"
+ }
+ ]
+ },
+ {
+ "name": "pair_time_point_sec_int64",
+ "base": "",
+ "fields": [
+ {
+ "name": "key",
+ "type": "time_point_sec"
+ },
+ {
+ "name": "value",
+ "type": "int64"
+ }
+ ]
+ },
+ {
+ "name": "permission_level",
+ "base": "",
+ "fields": [
+ {
+ "name": "actor",
+ "type": "name"
+ },
+ {
+ "name": "permission",
+ "type": "name"
+ }
+ ]
+ },
+ {
+ "name": "permission_level_weight",
+ "base": "",
+ "fields": [
+ {
+ "name": "permission",
+ "type": "permission_level"
+ },
+ {
+ "name": "weight",
+ "type": "uint16"
+ }
+ ]
+ },
+ {
+ "name": "producer_info",
+ "base": "",
+ "fields": [
+ {
+ "name": "owner",
+ "type": "name"
+ },
+ {
+ "name": "total_votes",
+ "type": "float64"
+ },
+ {
+ "name": "producer_key",
+ "type": "public_key"
+ },
+ {
+ "name": "is_active",
+ "type": "bool"
+ },
+ {
+ "name": "url",
+ "type": "string"
+ },
+ {
+ "name": "unpaid_blocks",
+ "type": "uint32"
+ },
+ {
+ "name": "last_claim_time",
+ "type": "time_point"
+ },
+ {
+ "name": "location",
+ "type": "uint16"
+ }
+ ]
+ },
+ {
+ "name": "producer_info2",
+ "base": "",
+ "fields": [
+ {
+ "name": "owner",
+ "type": "name"
+ },
+ {
+ "name": "votepay_share",
+ "type": "float64"
+ },
+ {
+ "name": "last_votepay_share_update",
+ "type": "time_point"
+ }
+ ]
+ },
+ {
+ "name": "producer_key",
+ "base": "",
+ "fields": [
+ {
+ "name": "producer_name",
+ "type": "name"
+ },
+ {
+ "name": "block_signing_key",
+ "type": "public_key"
+ }
+ ]
+ },
+ {
+ "name": "producer_schedule",
+ "base": "",
+ "fields": [
+ {
+ "name": "version",
+ "type": "uint32"
+ },
+ {
+ "name": "producers",
+ "type": "producer_key[]"
+ }
+ ]
+ },
+ {
+ "name": "refund",
+ "base": "",
+ "fields": [
+ {
+ "name": "owner",
+ "type": "name"
+ }
+ ]
+ },
+ {
+ "name": "refund_request",
+ "base": "",
+ "fields": [
+ {
+ "name": "owner",
+ "type": "name"
+ },
+ {
+ "name": "request_time",
+ "type": "time_point_sec"
+ },
+ {
+ "name": "net_amount",
+ "type": "asset"
+ },
+ {
+ "name": "cpu_amount",
+ "type": "asset"
+ }
+ ]
+ },
+ {
+ "name": "regproducer",
+ "base": "",
+ "fields": [
+ {
+ "name": "producer",
+ "type": "name"
+ },
+ {
+ "name": "producer_key",
+ "type": "public_key"
+ },
+ {
+ "name": "url",
+ "type": "string"
+ },
+ {
+ "name": "location",
+ "type": "uint16"
+ }
+ ]
+ },
+ {
+ "name": "regproxy",
+ "base": "",
+ "fields": [
+ {
+ "name": "proxy",
+ "type": "name"
+ },
+ {
+ "name": "isproxy",
+ "type": "bool"
+ }
+ ]
+ },
+ {
+ "name": "rentcpu",
+ "base": "",
+ "fields": [
+ {
+ "name": "from",
+ "type": "name"
+ },
+ {
+ "name": "receiver",
+ "type": "name"
+ },
+ {
+ "name": "loan_payment",
+ "type": "asset"
+ },
+ {
+ "name": "loan_fund",
+ "type": "asset"
+ }
+ ]
+ },
+ {
+ "name": "rentnet",
+ "base": "",
+ "fields": [
+ {
+ "name": "from",
+ "type": "name"
+ },
+ {
+ "name": "receiver",
+ "type": "name"
+ },
+ {
+ "name": "loan_payment",
+ "type": "asset"
+ },
+ {
+ "name": "loan_fund",
+ "type": "asset"
+ }
+ ]
+ },
+ {
+ "name": "rex_balance",
+ "base": "",
+ "fields": [
+ {
+ "name": "version",
+ "type": "uint8"
+ },
+ {
+ "name": "owner",
+ "type": "name"
+ },
+ {
+ "name": "vote_stake",
+ "type": "asset"
+ },
+ {
+ "name": "rex_balance",
+ "type": "asset"
+ },
+ {
+ "name": "matured_rex",
+ "type": "int64"
+ },
+ {
+ "name": "rex_maturities",
+ "type": "pair_time_point_sec_int64[]"
+ }
+ ]
+ },
+ {
+ "name": "rex_fund",
+ "base": "",
+ "fields": [
+ {
+ "name": "version",
+ "type": "uint8"
+ },
+ {
+ "name": "owner",
+ "type": "name"
+ },
+ {
+ "name": "balance",
+ "type": "asset"
+ }
+ ]
+ },
+ {
+ "name": "rex_loan",
+ "base": "",
+ "fields": [
+ {
+ "name": "version",
+ "type": "uint8"
+ },
+ {
+ "name": "from",
+ "type": "name"
+ },
+ {
+ "name": "receiver",
+ "type": "name"
+ },
+ {
+ "name": "payment",
+ "type": "asset"
+ },
+ {
+ "name": "balance",
+ "type": "asset"
+ },
+ {
+ "name": "total_staked",
+ "type": "asset"
+ },
+ {
+ "name": "loan_num",
+ "type": "uint64"
+ },
+ {
+ "name": "expiration",
+ "type": "time_point"
+ }
+ ]
+ },
+ {
+ "name": "rex_order",
+ "base": "",
+ "fields": [
+ {
+ "name": "version",
+ "type": "uint8"
+ },
+ {
+ "name": "owner",
+ "type": "name"
+ },
+ {
+ "name": "rex_requested",
+ "type": "asset"
+ },
+ {
+ "name": "proceeds",
+ "type": "asset"
+ },
+ {
+ "name": "stake_change",
+ "type": "asset"
+ },
+ {
+ "name": "order_time",
+ "type": "time_point"
+ },
+ {
+ "name": "is_open",
+ "type": "bool"
+ }
+ ]
+ },
+ {
+ "name": "rex_pool",
+ "base": "",
+ "fields": [
+ {
+ "name": "version",
+ "type": "uint8"
+ },
+ {
+ "name": "total_lent",
+ "type": "asset"
+ },
+ {
+ "name": "total_unlent",
+ "type": "asset"
+ },
+ {
+ "name": "total_rent",
+ "type": "asset"
+ },
+ {
+ "name": "total_lendable",
+ "type": "asset"
+ },
+ {
+ "name": "total_rex",
+ "type": "asset"
+ },
+ {
+ "name": "namebid_proceeds",
+ "type": "asset"
+ },
+ {
+ "name": "loan_num",
+ "type": "uint64"
+ }
+ ]
+ },
+ {
+ "name": "rex_return_buckets",
+ "base": "",
+ "fields": [
+ {
+ "name": "version",
+ "type": "uint8"
+ },
+ {
+ "name": "return_buckets",
+ "type": "pair_time_point_sec_int64[]"
+ }
+ ]
+ },
+ {
+ "name": "rex_return_pool",
+ "base": "",
+ "fields": [
+ {
+ "name": "version",
+ "type": "uint8"
+ },
+ {
+ "name": "last_dist_time",
+ "type": "time_point_sec"
+ },
+ {
+ "name": "pending_bucket_time",
+ "type": "time_point_sec"
+ },
+ {
+ "name": "oldest_bucket_time",
+ "type": "time_point_sec"
+ },
+ {
+ "name": "pending_bucket_proceeds",
+ "type": "int64"
+ },
+ {
+ "name": "current_rate_of_increase",
+ "type": "int64"
+ },
+ {
+ "name": "proceeds",
+ "type": "int64"
+ }
+ ]
+ },
+ {
+ "name": "rexexec",
+ "base": "",
+ "fields": [
+ {
+ "name": "user",
+ "type": "name"
+ },
+ {
+ "name": "max",
+ "type": "uint16"
+ }
+ ]
+ },
+ {
+ "name": "rmvproducer",
+ "base": "",
+ "fields": [
+ {
+ "name": "producer",
+ "type": "name"
+ }
+ ]
+ },
+ {
+ "name": "sellram",
+ "base": "",
+ "fields": [
+ {
+ "name": "account",
+ "type": "name"
+ },
+ {
+ "name": "bytes",
+ "type": "int64"
+ }
+ ]
+ },
+ {
+ "name": "sellrex",
+ "base": "",
+ "fields": [
+ {
+ "name": "from",
+ "type": "name"
+ },
+ {
+ "name": "rex",
+ "type": "asset"
+ }
+ ]
+ },
+ {
+ "name": "setabi",
+ "base": "",
+ "fields": [
+ {
+ "name": "account",
+ "type": "name"
+ },
+ {
+ "name": "abi",
+ "type": "bytes"
+ }
+ ]
+ },
+ {
+ "name": "setacctcpu",
+ "base": "",
+ "fields": [
+ {
+ "name": "account",
+ "type": "name"
+ },
+ {
+ "name": "cpu_weight",
+ "type": "int64?"
+ }
+ ]
+ },
+ {
+ "name": "setacctnet",
+ "base": "",
+ "fields": [
+ {
+ "name": "account",
+ "type": "name"
+ },
+ {
+ "name": "net_weight",
+ "type": "int64?"
+ }
+ ]
+ },
+ {
+ "name": "setacctram",
+ "base": "",
+ "fields": [
+ {
+ "name": "account",
+ "type": "name"
+ },
+ {
+ "name": "ram_bytes",
+ "type": "int64?"
+ }
+ ]
+ },
+ {
+ "name": "setalimits",
+ "base": "",
+ "fields": [
+ {
+ "name": "account",
+ "type": "name"
+ },
+ {
+ "name": "ram_bytes",
+ "type": "int64"
+ },
+ {
+ "name": "net_weight",
+ "type": "int64"
+ },
+ {
+ "name": "cpu_weight",
+ "type": "int64"
+ }
+ ]
+ },
+ {
+ "name": "setcode",
+ "base": "",
+ "fields": [
+ {
+ "name": "account",
+ "type": "name"
+ },
+ {
+ "name": "vmtype",
+ "type": "uint8"
+ },
+ {
+ "name": "vmversion",
+ "type": "uint8"
+ },
+ {
+ "name": "code",
+ "type": "bytes"
+ }
+ ]
+ },
+ {
+ "name": "setinflation",
+ "base": "",
+ "fields": [
+ {
+ "name": "annual_rate",
+ "type": "int64"
+ },
+ {
+ "name": "inflation_pay_factor",
+ "type": "int64"
+ },
+ {
+ "name": "votepay_factor",
+ "type": "int64"
+ }
+ ]
+ },
+ {
+ "name": "setparams",
+ "base": "",
+ "fields": [
+ {
+ "name": "params",
+ "type": "blockchain_parameters"
+ }
+ ]
+ },
+ {
+ "name": "setpriv",
+ "base": "",
+ "fields": [
+ {
+ "name": "account",
+ "type": "name"
+ },
+ {
+ "name": "is_priv",
+ "type": "uint8"
+ }
+ ]
+ },
+ {
+ "name": "setram",
+ "base": "",
+ "fields": [
+ {
+ "name": "max_ram_size",
+ "type": "uint64"
+ }
+ ]
+ },
+ {
+ "name": "setramrate",
+ "base": "",
+ "fields": [
+ {
+ "name": "bytes_per_block",
+ "type": "uint16"
+ }
+ ]
+ },
+ {
+ "name": "setrex",
+ "base": "",
+ "fields": [
+ {
+ "name": "balance",
+ "type": "asset"
+ }
+ ]
+ },
+ {
+ "name": "undelegatebw",
+ "base": "",
+ "fields": [
+ {
+ "name": "from",
+ "type": "name"
+ },
+ {
+ "name": "receiver",
+ "type": "name"
+ },
+ {
+ "name": "unstake_net_quantity",
+ "type": "asset"
+ },
+ {
+ "name": "unstake_cpu_quantity",
+ "type": "asset"
+ }
+ ]
+ },
+ {
+ "name": "unlinkauth",
+ "base": "",
+ "fields": [
+ {
+ "name": "account",
+ "type": "name"
+ },
+ {
+ "name": "code",
+ "type": "name"
+ },
+ {
+ "name": "type",
+ "type": "name"
+ }
+ ]
+ },
+ {
+ "name": "unregprod",
+ "base": "",
+ "fields": [
+ {
+ "name": "producer",
+ "type": "name"
+ }
+ ]
+ },
+ {
+ "name": "unstaketorex",
+ "base": "",
+ "fields": [
+ {
+ "name": "owner",
+ "type": "name"
+ },
+ {
+ "name": "receiver",
+ "type": "name"
+ },
+ {
+ "name": "from_net",
+ "type": "asset"
+ },
+ {
+ "name": "from_cpu",
+ "type": "asset"
+ }
+ ]
+ },
+ {
+ "name": "updateauth",
+ "base": "",
+ "fields": [
+ {
+ "name": "account",
+ "type": "name"
+ },
+ {
+ "name": "permission",
+ "type": "name"
+ },
+ {
+ "name": "parent",
+ "type": "name"
+ },
+ {
+ "name": "auth",
+ "type": "authority"
+ }
+ ]
+ },
+ {
+ "name": "updaterex",
+ "base": "",
+ "fields": [
+ {
+ "name": "owner",
+ "type": "name"
+ }
+ ]
+ },
+ {
+ "name": "updtrevision",
+ "base": "",
+ "fields": [
+ {
+ "name": "revision",
+ "type": "uint8"
+ }
+ ]
+ },
+ {
+ "name": "user_resources",
+ "base": "",
+ "fields": [
+ {
+ "name": "owner",
+ "type": "name"
+ },
+ {
+ "name": "net_weight",
+ "type": "asset"
+ },
+ {
+ "name": "cpu_weight",
+ "type": "asset"
+ },
+ {
+ "name": "ram_bytes",
+ "type": "int64"
+ }
+ ]
+ },
+ {
+ "name": "voteproducer",
+ "base": "",
+ "fields": [
+ {
+ "name": "voter",
+ "type": "name"
+ },
+ {
+ "name": "proxy",
+ "type": "name"
+ },
+ {
+ "name": "producers",
+ "type": "name[]"
+ }
+ ]
+ },
+ {
+ "name": "voter_info",
+ "base": "",
+ "fields": [
+ {
+ "name": "owner",
+ "type": "name"
+ },
+ {
+ "name": "proxy",
+ "type": "name"
+ },
+ {
+ "name": "producers",
+ "type": "name[]"
+ },
+ {
+ "name": "staked",
+ "type": "int64"
+ },
+ {
+ "name": "last_vote_weight",
+ "type": "float64"
+ },
+ {
+ "name": "proxied_vote_weight",
+ "type": "float64"
+ },
+ {
+ "name": "is_proxy",
+ "type": "bool"
+ },
+ {
+ "name": "flags1",
+ "type": "uint32"
+ },
+ {
+ "name": "reserved2",
+ "type": "uint32"
+ },
+ {
+ "name": "reserved3",
+ "type": "asset"
+ }
+ ]
+ },
+ {
+ "name": "wait_weight",
+ "base": "",
+ "fields": [
+ {
+ "name": "wait_sec",
+ "type": "uint32"
+ },
+ {
+ "name": "weight",
+ "type": "uint16"
+ }
+ ]
+ },
+ {
+ "name": "withdraw",
+ "base": "",
+ "fields": [
+ {
+ "name": "owner",
+ "type": "name"
+ },
+ {
+ "name": "amount",
+ "type": "asset"
+ }
+ ]
+ }
+ ],
+ "actions": [
+ {
+ "name": "activate",
+ "type": "activate",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "bidname",
+ "type": "bidname",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "bidrefund",
+ "type": "bidrefund",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "buyram",
+ "type": "buyram",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "buyrambytes",
+ "type": "buyrambytes",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "buyrex",
+ "type": "buyrex",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "canceldelay",
+ "type": "canceldelay",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "claimrewards",
+ "type": "claimrewards",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "closerex",
+ "type": "closerex",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "cnclrexorder",
+ "type": "cnclrexorder",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "consolidate",
+ "type": "consolidate",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "defcpuloan",
+ "type": "defcpuloan",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "defnetloan",
+ "type": "defnetloan",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "delegatebw",
+ "type": "delegatebw",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "deleteauth",
+ "type": "deleteauth",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "deposit",
+ "type": "deposit",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "fundcpuloan",
+ "type": "fundcpuloan",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "fundnetloan",
+ "type": "fundnetloan",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "init",
+ "type": "init",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "linkauth",
+ "type": "linkauth",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "mvfrsavings",
+ "type": "mvfrsavings",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "mvtosavings",
+ "type": "mvtosavings",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "newaccount",
+ "type": "newaccount",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "onblock",
+ "type": "onblock",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "onerror",
+ "type": "onerror",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "refund",
+ "type": "refund",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "regproducer",
+ "type": "regproducer",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "regproxy",
+ "type": "regproxy",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "rentcpu",
+ "type": "rentcpu",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "rentnet",
+ "type": "rentnet",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "rexexec",
+ "type": "rexexec",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "rmvproducer",
+ "type": "rmvproducer",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "sellram",
+ "type": "sellram",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "sellrex",
+ "type": "sellrex",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "setabi",
+ "type": "setabi",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "setacctcpu",
+ "type": "setacctcpu",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "setacctnet",
+ "type": "setacctnet",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "setacctram",
+ "type": "setacctram",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "setalimits",
+ "type": "setalimits",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "setcode",
+ "type": "setcode",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "setinflation",
+ "type": "setinflation",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "setparams",
+ "type": "setparams",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "setpriv",
+ "type": "setpriv",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "setram",
+ "type": "setram",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "setramrate",
+ "type": "setramrate",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "setrex",
+ "type": "setrex",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "undelegatebw",
+ "type": "undelegatebw",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "unlinkauth",
+ "type": "unlinkauth",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "unregprod",
+ "type": "unregprod",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "unstaketorex",
+ "type": "unstaketorex",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "updateauth",
+ "type": "updateauth",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "updaterex",
+ "type": "updaterex",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "updtrevision",
+ "type": "updtrevision",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "voteproducer",
+ "type": "voteproducer",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "withdraw",
+ "type": "withdraw",
+ "ricardian_contract": ""
+ }
+ ],
+ "tables": [
+ {
+ "name": "abihash",
+ "index_type": "i64",
+ "key_names": [],
+ "key_types": [],
+ "type": "abi_hash"
+ },
+ {
+ "name": "bidrefunds",
+ "index_type": "i64",
+ "key_names": [],
+ "key_types": [],
+ "type": "bid_refund"
+ },
+ {
+ "name": "cpuloan",
+ "index_type": "i64",
+ "key_names": [],
+ "key_types": [],
+ "type": "rex_loan"
+ },
+ {
+ "name": "delband",
+ "index_type": "i64",
+ "key_names": [],
+ "key_types": [],
+ "type": "delegated_bandwidth"
+ },
+ {
+ "name": "global",
+ "index_type": "i64",
+ "key_names": [],
+ "key_types": [],
+ "type": "eosio_global_state"
+ },
+ {
+ "name": "global2",
+ "index_type": "i64",
+ "key_names": [],
+ "key_types": [],
+ "type": "eosio_global_state2"
+ },
+ {
+ "name": "global3",
+ "index_type": "i64",
+ "key_names": [],
+ "key_types": [],
+ "type": "eosio_global_state3"
+ },
+ {
+ "name": "global4",
+ "index_type": "i64",
+ "key_names": [],
+ "key_types": [],
+ "type": "eosio_global_state4"
+ },
+ {
+ "name": "namebids",
+ "index_type": "i64",
+ "key_names": [],
+ "key_types": [],
+ "type": "name_bid"
+ },
+ {
+ "name": "netloan",
+ "index_type": "i64",
+ "key_names": [],
+ "key_types": [],
+ "type": "rex_loan"
+ },
+ {
+ "name": "producers",
+ "index_type": "i64",
+ "key_names": [],
+ "key_types": [],
+ "type": "producer_info"
+ },
+ {
+ "name": "producers2",
+ "index_type": "i64",
+ "key_names": [],
+ "key_types": [],
+ "type": "producer_info2"
+ },
+ {
+ "name": "rammarket",
+ "index_type": "i64",
+ "key_names": [],
+ "key_types": [],
+ "type": "exchange_state"
+ },
+ {
+ "name": "refunds",
+ "index_type": "i64",
+ "key_names": [],
+ "key_types": [],
+ "type": "refund_request"
+ },
+ {
+ "name": "retbuckets",
+ "index_type": "i64",
+ "key_names": [],
+ "key_types": [],
+ "type": "rex_return_buckets"
+ },
+ {
+ "name": "rexbal",
+ "index_type": "i64",
+ "key_names": [],
+ "key_types": [],
+ "type": "rex_balance"
+ },
+ {
+ "name": "rexfund",
+ "index_type": "i64",
+ "key_names": [],
+ "key_types": [],
+ "type": "rex_fund"
+ },
+ {
+ "name": "rexpool",
+ "index_type": "i64",
+ "key_names": [],
+ "key_types": [],
+ "type": "rex_pool"
+ },
+ {
+ "name": "rexqueue",
+ "index_type": "i64",
+ "key_names": [],
+ "key_types": [],
+ "type": "rex_order"
+ },
+ {
+ "name": "rexretpool",
+ "index_type": "i64",
+ "key_names": [],
+ "key_types": [],
+ "type": "rex_return_pool"
+ },
+ {
+ "name": "userres",
+ "index_type": "i64",
+ "key_names": [],
+ "key_types": [],
+ "type": "user_resources"
+ },
+ {
+ "name": "voters",
+ "index_type": "i64",
+ "key_names": [],
+ "key_types": [],
+ "type": "voter_info"
+ }
+ ],
+ "ricardian_clauses": [],
+ "error_messages": [],
+ "abi_extensions": [],
+ "variants": []
+}
diff --git a/plugins/trace_api_plugin/examples/abis/eosio.msig.abi b/plugins/trace_api_plugin/examples/abis/eosio.msig.abi
new file mode 100644
index 00000000000..f2c32898f1b
--- /dev/null
+++ b/plugins/trace_api_plugin/examples/abis/eosio.msig.abi
@@ -0,0 +1,360 @@
+{
+ "version": "eosio::abi/1.1",
+ "types": [],
+ "structs": [
+ {
+ "name": "action",
+ "base": "",
+ "fields": [
+ {
+ "name": "account",
+ "type": "name"
+ },
+ {
+ "name": "name",
+ "type": "name"
+ },
+ {
+ "name": "authorization",
+ "type": "permission_level[]"
+ },
+ {
+ "name": "data",
+ "type": "bytes"
+ }
+ ]
+ },
+ {
+ "name": "approval",
+ "base": "",
+ "fields": [
+ {
+ "name": "level",
+ "type": "permission_level"
+ },
+ {
+ "name": "time",
+ "type": "time_point"
+ }
+ ]
+ },
+ {
+ "name": "approvals_info",
+ "base": "",
+ "fields": [
+ {
+ "name": "version",
+ "type": "uint8"
+ },
+ {
+ "name": "proposal_name",
+ "type": "name"
+ },
+ {
+ "name": "requested_approvals",
+ "type": "approval[]"
+ },
+ {
+ "name": "provided_approvals",
+ "type": "approval[]"
+ }
+ ]
+ },
+ {
+ "name": "approve",
+ "base": "",
+ "fields": [
+ {
+ "name": "proposer",
+ "type": "name"
+ },
+ {
+ "name": "proposal_name",
+ "type": "name"
+ },
+ {
+ "name": "level",
+ "type": "permission_level"
+ },
+ {
+ "name": "proposal_hash",
+ "type": "checksum256$"
+ }
+ ]
+ },
+ {
+ "name": "cancel",
+ "base": "",
+ "fields": [
+ {
+ "name": "proposer",
+ "type": "name"
+ },
+ {
+ "name": "proposal_name",
+ "type": "name"
+ },
+ {
+ "name": "canceler",
+ "type": "name"
+ }
+ ]
+ },
+ {
+ "name": "exec",
+ "base": "",
+ "fields": [
+ {
+ "name": "proposer",
+ "type": "name"
+ },
+ {
+ "name": "proposal_name",
+ "type": "name"
+ },
+ {
+ "name": "executer",
+ "type": "name"
+ }
+ ]
+ },
+ {
+ "name": "extension",
+ "base": "",
+ "fields": [
+ {
+ "name": "type",
+ "type": "uint16"
+ },
+ {
+ "name": "data",
+ "type": "bytes"
+ }
+ ]
+ },
+ {
+ "name": "invalidate",
+ "base": "",
+ "fields": [
+ {
+ "name": "account",
+ "type": "name"
+ }
+ ]
+ },
+ {
+ "name": "invalidation",
+ "base": "",
+ "fields": [
+ {
+ "name": "account",
+ "type": "name"
+ },
+ {
+ "name": "last_invalidation_time",
+ "type": "time_point"
+ }
+ ]
+ },
+ {
+ "name": "old_approvals_info",
+ "base": "",
+ "fields": [
+ {
+ "name": "proposal_name",
+ "type": "name"
+ },
+ {
+ "name": "requested_approvals",
+ "type": "permission_level[]"
+ },
+ {
+ "name": "provided_approvals",
+ "type": "permission_level[]"
+ }
+ ]
+ },
+ {
+ "name": "permission_level",
+ "base": "",
+ "fields": [
+ {
+ "name": "actor",
+ "type": "name"
+ },
+ {
+ "name": "permission",
+ "type": "name"
+ }
+ ]
+ },
+ {
+ "name": "proposal",
+ "base": "",
+ "fields": [
+ {
+ "name": "proposal_name",
+ "type": "name"
+ },
+ {
+ "name": "packed_transaction",
+ "type": "bytes"
+ }
+ ]
+ },
+ {
+ "name": "propose",
+ "base": "",
+ "fields": [
+ {
+ "name": "proposer",
+ "type": "name"
+ },
+ {
+ "name": "proposal_name",
+ "type": "name"
+ },
+ {
+ "name": "requested",
+ "type": "permission_level[]"
+ },
+ {
+ "name": "trx",
+ "type": "transaction"
+ }
+ ]
+ },
+ {
+ "name": "transaction",
+ "base": "transaction_header",
+ "fields": [
+ {
+ "name": "context_free_actions",
+ "type": "action[]"
+ },
+ {
+ "name": "actions",
+ "type": "action[]"
+ },
+ {
+ "name": "transaction_extensions",
+ "type": "extension[]"
+ }
+ ]
+ },
+ {
+ "name": "transaction_header",
+ "base": "",
+ "fields": [
+ {
+ "name": "expiration",
+ "type": "time_point_sec"
+ },
+ {
+ "name": "ref_block_num",
+ "type": "uint16"
+ },
+ {
+ "name": "ref_block_prefix",
+ "type": "uint32"
+ },
+ {
+ "name": "max_net_usage_words",
+ "type": "varuint32"
+ },
+ {
+ "name": "max_cpu_usage_ms",
+ "type": "uint8"
+ },
+ {
+ "name": "delay_sec",
+ "type": "varuint32"
+ }
+ ]
+ },
+ {
+ "name": "unapprove",
+ "base": "",
+ "fields": [
+ {
+ "name": "proposer",
+ "type": "name"
+ },
+ {
+ "name": "proposal_name",
+ "type": "name"
+ },
+ {
+ "name": "level",
+ "type": "permission_level"
+ }
+ ]
+ }
+ ],
+ "actions": [
+ {
+ "name": "approve",
+ "type": "approve",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "cancel",
+ "type": "cancel",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "exec",
+ "type": "exec",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "invalidate",
+ "type": "invalidate",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "propose",
+ "type": "propose",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "unapprove",
+ "type": "unapprove",
+ "ricardian_contract": ""
+ }
+ ],
+ "tables": [
+ {
+ "name": "approvals",
+ "index_type": "i64",
+ "key_names": [],
+ "key_types": [],
+ "type": "old_approvals_info"
+ },
+ {
+ "name": "approvals2",
+ "index_type": "i64",
+ "key_names": [],
+ "key_types": [],
+ "type": "approvals_info"
+ },
+ {
+ "name": "invals",
+ "index_type": "i64",
+ "key_names": [],
+ "key_types": [],
+ "type": "invalidation"
+ },
+ {
+ "name": "proposal",
+ "index_type": "i64",
+ "key_names": [],
+ "key_types": [],
+ "type": "proposal"
+ }
+ ],
+ "ricardian_clauses": [],
+ "error_messages": [],
+ "abi_extensions": [],
+ "variants": []
+}
diff --git a/plugins/trace_api_plugin/examples/abis/eosio.token.abi b/plugins/trace_api_plugin/examples/abis/eosio.token.abi
new file mode 100644
index 00000000000..6d3421c17c9
--- /dev/null
+++ b/plugins/trace_api_plugin/examples/abis/eosio.token.abi
@@ -0,0 +1,186 @@
+{
+ "version": "eosio::abi/1.1",
+ "types": [],
+ "structs": [
+ {
+ "name": "account",
+ "base": "",
+ "fields": [
+ {
+ "name": "balance",
+ "type": "asset"
+ }
+ ]
+ },
+ {
+ "name": "close",
+ "base": "",
+ "fields": [
+ {
+ "name": "owner",
+ "type": "name"
+ },
+ {
+ "name": "symbol",
+ "type": "symbol"
+ }
+ ]
+ },
+ {
+ "name": "create",
+ "base": "",
+ "fields": [
+ {
+ "name": "issuer",
+ "type": "name"
+ },
+ {
+ "name": "maximum_supply",
+ "type": "asset"
+ }
+ ]
+ },
+ {
+ "name": "currency_stats",
+ "base": "",
+ "fields": [
+ {
+ "name": "supply",
+ "type": "asset"
+ },
+ {
+ "name": "max_supply",
+ "type": "asset"
+ },
+ {
+ "name": "issuer",
+ "type": "name"
+ }
+ ]
+ },
+ {
+ "name": "issue",
+ "base": "",
+ "fields": [
+ {
+ "name": "to",
+ "type": "name"
+ },
+ {
+ "name": "quantity",
+ "type": "asset"
+ },
+ {
+ "name": "memo",
+ "type": "string"
+ }
+ ]
+ },
+ {
+ "name": "open",
+ "base": "",
+ "fields": [
+ {
+ "name": "owner",
+ "type": "name"
+ },
+ {
+ "name": "symbol",
+ "type": "symbol"
+ },
+ {
+ "name": "ram_payer",
+ "type": "name"
+ }
+ ]
+ },
+ {
+ "name": "retire",
+ "base": "",
+ "fields": [
+ {
+ "name": "quantity",
+ "type": "asset"
+ },
+ {
+ "name": "memo",
+ "type": "string"
+ }
+ ]
+ },
+ {
+ "name": "transfer",
+ "base": "",
+ "fields": [
+ {
+ "name": "from",
+ "type": "name"
+ },
+ {
+ "name": "to",
+ "type": "name"
+ },
+ {
+ "name": "quantity",
+ "type": "asset"
+ },
+ {
+ "name": "memo",
+ "type": "string"
+ }
+ ]
+ }
+ ],
+ "actions": [
+ {
+ "name": "close",
+ "type": "close",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "create",
+ "type": "create",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "issue",
+ "type": "issue",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "open",
+ "type": "open",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "retire",
+ "type": "retire",
+ "ricardian_contract": ""
+ },
+ {
+ "name": "transfer",
+ "type": "transfer",
+ "ricardian_contract": ""
+ }
+ ],
+ "tables": [
+ {
+ "name": "accounts",
+ "index_type": "i64",
+ "key_names": [],
+ "key_types": [],
+ "type": "account"
+ },
+ {
+ "name": "stat",
+ "index_type": "i64",
+ "key_names": [],
+ "key_types": [],
+ "type": "currency_stats"
+ }
+ ],
+ "ricardian_clauses": [],
+ "error_messages": [],
+ "abi_extensions": [],
+ "variants": []
+}
diff --git a/plugins/trace_api_plugin/examples/abis/eosio.wrap.abi b/plugins/trace_api_plugin/examples/abis/eosio.wrap.abi
new file mode 100644
index 00000000000..aaa54848432
--- /dev/null
+++ b/plugins/trace_api_plugin/examples/abis/eosio.wrap.abi
@@ -0,0 +1,143 @@
+{
+ "version": "eosio::abi/1.0",
+ "types": [
+ {
+ "new_type_name": "account_name",
+ "type": "name"
+ },
+ {
+ "new_type_name": "permission_name",
+ "type": "name"
+ },
+ {
+ "new_type_name": "action_name",
+ "type": "name"
+ }
+ ],
+ "structs": [
+ {
+ "name": "permission_level",
+ "base": "",
+ "fields": [
+ {
+ "name": "actor",
+ "type": "account_name"
+ },
+ {
+ "name": "permission",
+ "type": "permission_name"
+ }
+ ]
+ },
+ {
+ "name": "action",
+ "base": "",
+ "fields": [
+ {
+ "name": "account",
+ "type": "account_name"
+ },
+ {
+ "name": "name",
+ "type": "action_name"
+ },
+ {
+ "name": "authorization",
+ "type": "permission_level[]"
+ },
+ {
+ "name": "data",
+ "type": "bytes"
+ }
+ ]
+ },
+ {
+ "name": "transaction_header",
+ "base": "",
+ "fields": [
+ {
+ "name": "expiration",
+ "type": "time_point_sec"
+ },
+ {
+ "name": "ref_block_num",
+ "type": "uint16"
+ },
+ {
+ "name": "ref_block_prefix",
+ "type": "uint32"
+ },
+ {
+ "name": "max_net_usage_words",
+ "type": "varuint32"
+ },
+ {
+ "name": "max_cpu_usage_ms",
+ "type": "uint8"
+ },
+ {
+ "name": "delay_sec",
+ "type": "varuint32"
+ }
+ ]
+ },
+ {
+ "name": "extension",
+ "base": "",
+ "fields": [
+ {
+ "name": "type",
+ "type": "uint16"
+ },
+ {
+ "name": "data",
+ "type": "bytes"
+ }
+ ]
+ },
+ {
+ "name": "transaction",
+ "base": "transaction_header",
+ "fields": [
+ {
+ "name": "context_free_actions",
+ "type": "action[]"
+ },
+ {
+ "name": "actions",
+ "type": "action[]"
+ },
+ {
+ "name": "transaction_extensions",
+ "type": "extension[]"
+ }
+ ]
+ },
+ {
+ "name": "exec",
+ "base": "",
+ "fields": [
+ {
+ "name": "executer",
+ "type": "account_name"
+ },
+ {
+ "name": "trx",
+ "type": "transaction"
+ }
+ ]
+ }
+ ],
+ "actions": [
+ {
+ "name": "exec",
+ "type": "exec",
+ "ricardian_contract": ""
+ }
+ ],
+ "tables": [],
+ "ricardian_clauses": [],
+ "error_messages": [],
+ "abi_extensions": [],
+ "variants": []
+}
diff --git a/plugins/trace_api_plugin/include/eosio/trace_api/abi_data_handler.hpp b/plugins/trace_api_plugin/include/eosio/trace_api/abi_data_handler.hpp
new file mode 100644
index 00000000000..d6c2355f59a
--- /dev/null
+++ b/plugins/trace_api_plugin/include/eosio/trace_api/abi_data_handler.hpp
@@ -0,0 +1,62 @@
+#pragma once
+
+#include
+#include
+#include
+
+namespace eosio {
+ namespace chain {
+ struct abi_serializer;
+ }
+
+ namespace trace_api {
+
+ /**
+ * Data Handler that uses eosio::chain::abi_serializer to decode data with a known set of ABI's
+ * Can be used directly as a Data_handler_provider OR shared between request_handlers using the
+ * ::shared_provider abstraction.
+ */
+ class abi_data_handler {
+ public:
+ explicit abi_data_handler( exception_handler except_handler = {} )
+ :except_handler( std::move( except_handler ) )
+ {
+ }
+
+ /**
+ * Add an ABI definition to this data handler
+ * @param name - the name of the account/contract that this ABI belongs to
+ * @param abi - the ABI definition of that ABI
+ */
+ void add_abi( const chain::name& name, const chain::abi_def& abi );
+
+ /**
+ * Given an action trace, produce a variant that represents the `data` field in the trace
+ *
+ * @param action - trace of the action including metadata necessary for finding the ABI
+ * @param yield - a yield function to allow cooperation during long running tasks
+ * @return variant representing the `data` field of the action interpreted by known ABIs OR an empty variant
+ */
+ fc::variant process_data( const action_trace_v0& action, const yield_function& yield = {});
+
+ /**
+ * Utility class that allows mulitple request_handlers to share the same abi_data_handler
+ */
+ class shared_provider {
+ public:
+ explicit shared_provider(const std::shared_ptr& handler)
+ :handler(handler)
+ {}
+
+ fc::variant process_data( const action_trace_v0& action, const yield_function& yield = {}) {
+ return handler->process_data(action, yield);
+ }
+
+ std::shared_ptr handler;
+ };
+
+ private:
+ std::map> abi_serializer_by_account;
+ exception_handler except_handler;
+ };
+} }
diff --git a/plugins/trace_api_plugin/include/eosio/trace_api/chain_extraction.hpp b/plugins/trace_api_plugin/include/eosio/trace_api/chain_extraction.hpp
new file mode 100644
index 00000000000..bf5e2506775
--- /dev/null
+++ b/plugins/trace_api_plugin/include/eosio/trace_api/chain_extraction.hpp
@@ -0,0 +1,126 @@
+#pragma once
+
+#include
+#include
+#include
+#include
+#include
+#include