diff --git a/.cicd/build.sh b/.cicd/build.sh index 2688b65413a..c549a449753 100755 --- a/.cicd/build.sh +++ b/.cicd/build.sh @@ -2,47 +2,57 @@ set -eo pipefail . ./.cicd/helpers/general.sh mkdir -p $BUILD_DIR -CMAKE_EXTRAS="-DBUILD_MONGO_DB_PLUGIN=true -DCMAKE_BUILD_TYPE='Release'" -if [[ $(uname) == 'Darwin' ]]; then +CMAKE_EXTRAS="-DCMAKE_BUILD_TYPE='Release'" +if [[ "$(uname)" == 'Darwin' ]]; then # You can't use chained commands in execute - [[ $TRAVIS == true ]] && export PINNED=false && ccache -s && CMAKE_EXTRAS="-DCMAKE_CXX_COMPILER_LAUNCHER=ccache" && ./$CICD_DIR/platforms/macos-10.14.sh - ( [[ ! $PINNED == false || $UNPINNED == true ]] ) && CMAKE_EXTRAS="$CMAKE_EXTRAS -DCMAKE_TOOLCHAIN_FILE=$HELPERS_DIR/clang.make" + if [[ "$TRAVIS" == 'true' ]]; then + export PINNED=false + ccache -s + CMAKE_EXTRAS="$CMAKE_EXTRAS -DCMAKE_CXX_COMPILER_LAUNCHER=ccache" + ./$CICD_DIR/platforms/macos-10.14.sh + else + CMAKE_EXTRAS="$CMAKE_EXTRAS -DBUILD_MONGO_DB_PLUGIN=true" + fi + [[ ! "$PINNED" == 'false' || "$UNPINNED" == 'true' ]] && CMAKE_EXTRAS="$CMAKE_EXTRAS -DCMAKE_TOOLCHAIN_FILE=$HELPERS_DIR/clang.make" cd $BUILD_DIR + echo "cmake $CMAKE_EXTRAS .." cmake $CMAKE_EXTRAS .. + echo "make -j$JOBS" make -j$JOBS else # Linux + CMAKE_EXTRAS="$CMAKE_EXTRAS -DBUILD_MONGO_DB_PLUGIN=true" ARGS=${ARGS:-"--rm --init -v $(pwd):$MOUNTED_DIR"} . $HELPERS_DIR/file-hash.sh $CICD_DIR/platforms/$IMAGE_TAG.dockerfile PRE_COMMANDS="cd $MOUNTED_DIR/build" # PRE_COMMANDS: Executed pre-cmake # CMAKE_EXTRAS: Executed within and right before the cmake path (cmake CMAKE_EXTRAS ..) - [[ ! $IMAGE_TAG =~ 'unpinned' ]] && CMAKE_EXTRAS="$CMAKE_EXTRAS -DCMAKE_TOOLCHAIN_FILE=$MOUNTED_DIR/.cicd/helpers/clang.make -DCMAKE_CXX_COMPILER_LAUNCHER=ccache" - if [[ $IMAGE_TAG == 'amazon_linux-2' ]]; then + [[ ! "$IMAGE_TAG" =~ 'unpinned' ]] && CMAKE_EXTRAS="$CMAKE_EXTRAS -DCMAKE_TOOLCHAIN_FILE=$MOUNTED_DIR/.cicd/helpers/clang.make -DCMAKE_CXX_COMPILER_LAUNCHER=ccache" + if [[ "$IMAGE_TAG" == 'amazon_linux-2' ]]; then PRE_COMMANDS="$PRE_COMMANDS && export PATH=/usr/lib64/ccache:\\\$PATH" - elif [[ $IMAGE_TAG == 'centos-7.6' ]]; then + elif [[ "$IMAGE_TAG" == 'centos-7.6' ]]; then PRE_COMMANDS="$PRE_COMMANDS && export PATH=/usr/lib64/ccache:\\\$PATH" - elif [[ $IMAGE_TAG == 'ubuntu-16.04' ]]; then + elif [[ "$IMAGE_TAG" == 'ubuntu-16.04' ]]; then PRE_COMMANDS="$PRE_COMMANDS && export PATH=/usr/lib/ccache:\\\$PATH" - elif [[ $IMAGE_TAG == 'ubuntu-18.04' ]]; then + elif [[ "$IMAGE_TAG" == 'ubuntu-18.04' ]]; then PRE_COMMANDS="$PRE_COMMANDS && export PATH=/usr/lib/ccache:\\\$PATH" - elif [[ $IMAGE_TAG == 'amazon_linux-2-unpinned' ]]; then + elif [[ "$IMAGE_TAG" == 'amazon_linux-2-unpinned' ]]; then PRE_COMMANDS="$PRE_COMMANDS && export PATH=/usr/lib64/ccache:\\\$PATH" CMAKE_EXTRAS="$CMAKE_EXTRAS -DCMAKE_CXX_COMPILER='clang++' -DCMAKE_C_COMPILER='clang'" - elif [[ $IMAGE_TAG == 'centos-7.6-unpinned' ]]; then + elif [[ "$IMAGE_TAG" == 'centos-7.6-unpinned' ]]; then PRE_COMMANDS="$PRE_COMMANDS && source /opt/rh/devtoolset-8/enable && source /opt/rh/rh-python36/enable && export PATH=/usr/lib64/ccache:\\\$PATH" - elif [[ $IMAGE_TAG == 'ubuntu-18.04-unpinned' ]]; then + elif [[ "$IMAGE_TAG" == 'ubuntu-18.04-unpinned' ]]; then PRE_COMMANDS="$PRE_COMMANDS && export PATH=/usr/lib/ccache:\\\$PATH" CMAKE_EXTRAS="$CMAKE_EXTRAS -DCMAKE_CXX_COMPILER='clang++' -DCMAKE_C_COMPILER='clang'" fi BUILD_COMMANDS="cmake $CMAKE_EXTRAS .. && make -j$JOBS" # Docker Commands - if [[ $BUILDKITE == true ]]; then + if [[ "$BUILDKITE" == 'true' ]]; then # Generate Base Images $CICD_DIR/generate-base-images.sh - [[ $ENABLE_INSTALL == true ]] && COMMANDS="cp -r $MOUNTED_DIR /root/eosio && cd /root/eosio/build &&" + [[ "$ENABLE_INSTALL" == 'true' ]] && COMMANDS="cp -r $MOUNTED_DIR /root/eosio && cd /root/eosio/build &&" COMMANDS="$COMMANDS $BUILD_COMMANDS" - [[ $ENABLE_INSTALL == true ]] && COMMANDS="$COMMANDS && make install" - elif [[ $TRAVIS == true ]]; then + [[ "$ENABLE_INSTALL" == 'true' ]] && COMMANDS="$COMMANDS && make install" + elif [[ "$TRAVIS" == 'true' ]]; then ARGS="$ARGS -v /usr/lib/ccache -v $HOME/.ccache:/opt/.ccache -e JOBS -e TRAVIS -e CCACHE_DIR=/opt/.ccache" COMMANDS="ccache -s && $BUILD_COMMANDS" fi diff --git a/.cicd/generate-pipeline.sh b/.cicd/generate-pipeline.sh index bd7bc301a8f..54eb5dfbdb8 100755 --- a/.cicd/generate-pipeline.sh +++ b/.cicd/generate-pipeline.sh @@ -1,9 +1,9 @@ #!/bin/bash set -eo pipefail +# environment . ./.cicd/helpers/general.sh export MOJAVE_ANKA_TAG_BASE=${MOJAVE_ANKA_TAG_BASE:-'clean::cicd::git-ssh::nas::brew::buildkite-agent'} export MOJAVE_ANKA_TEMPLATE_NAME=${MOJAVE_ANKA_TEMPLATE_NAME:-'10.14.4_6C_14G_40G'} -# Use files in platforms dir as source of truth for what platforms we need to generate steps for export PLATFORMS_JSON_ARRAY='[]' # Determine if it's a forked PR and make sure to add git fetch so we don't have to git clone the forked repo's url @@ -12,11 +12,13 @@ if [[ $BUILDKITE_BRANCH =~ ^pull/[0-9]+/head: ]]; then export GIT_FETCH="git fetch -v --prune origin refs/pull/$PR_ID/head &&" fi +[[ -z "$ROUNDS" ]] && export ROUNDS='1' +# read .cicd/platforms for FILE in $(ls $CICD_DIR/platforms); do - # Ability to skip mac or linux by not even creating the json block + # skip mac or linux by not even creating the json block ( [[ $SKIP_MAC == true ]] && [[ $FILE =~ 'macos' ]] ) && continue - ( [[ $SKIP_LINUX == true ]] && [[ ! $FILE =~ 'macos' ]] ) && continue - # Prevent using both platform files (only use unpinned or pinned) + ( [[ $SKIP_LINUX == true ]] && [[ ! $FILE =~ 'macos' ]] ) && continue + # use pinned or unpinned, not both sets of platform files if [[ $PINNED == false || $UNPINNED == true ]] && [[ ! $FILE =~ 'macos' ]]; then export SKIP_CONTRACT_BUILDER=${SKIP_CONTRACT_BUILDER:-true} export SKIP_PACKAGE_BUILDER=${SKIP_PACKAGE_BUILDER:-true} @@ -24,14 +26,14 @@ for FILE in $(ls $CICD_DIR/platforms); do else [[ $FILE =~ 'unpinned' ]] && continue fi - export FILE_NAME=$(echo $FILE | awk '{split($0,a,/\.(d|s)/); print a[1] }') - export PLATFORM_NAME=$(echo $FILE_NAME | cut -d- -f1 | sed 's/os/OS/g') - export PLATFORM_NAME_UPCASE=$(echo $PLATFORM_NAME | tr a-z A-Z) - export VERSION_MAJOR=$(echo $FILE_NAME | cut -d- -f2 | cut -d. -f1) - [[ $(echo $FILE_NAME | cut -d- -f2) =~ '.' ]] && export VERSION_MINOR="_$(echo $FILE_NAME | cut -d- -f2 | cut -d. -f2)" || export VERSION_MINOR='' - export VERSION_FULL=$(echo $FILE_NAME | cut -d- -f2) + export FILE_NAME="$(echo $FILE | awk '{split($0,a,/\.(d|s)/); print a[1] }')" + export PLATFORM_NAME="$(echo $FILE_NAME | cut -d- -f1 | sed 's/os/OS/g')" + export PLATFORM_NAME_UPCASE="$(echo $PLATFORM_NAME | tr a-z A-Z)" + export VERSION_MAJOR="$(echo $FILE_NAME | cut -d- -f2 | cut -d. -f1)" + [[ "$(echo $FILE_NAME | cut -d- -f2)" =~ '.' ]] && export VERSION_MINOR="_$(echo $FILE_NAME | cut -d- -f2 | cut -d. -f2)" || export VERSION_MINOR='' + export VERSION_FULL="$(echo $FILE_NAME | cut -d- -f2)" OLDIFS=$IFS - IFS="_" + IFS='_' set $PLATFORM_NAME IFS=$OLDIFS export PLATFORM_NAME_FULL="$(capitalize $1)$( [[ ! -z $2 ]] && echo "_$(capitalize $2)" || true ) $VERSION_FULL" @@ -53,20 +55,31 @@ for FILE in $(ls $CICD_DIR/platforms); do "ICON": env.ICON }]') done +# set build_source whether triggered or not +if [[ ! -z ${BUILDKITE_TRIGGERED_FROM_BUILD_ID} ]]; then + export BUILD_SOURCE="--build \$BUILDKITE_TRIGGERED_FROM_BUILD_ID" +fi +export BUILD_SOURCE=${BUILD_SOURCE:---build \$BUILDKITE_BUILD_ID} +# set trigger_job if master/release/develop branch and webhook +if [[ $BUILDKITE_BRANCH =~ ^release/[0-9]+\.[0-9]+\.x$ || $BUILDKITE_BRANCH =~ ^master$ || $BUILDKITE_BRANCH =~ ^develop$ ]]; then + [[ $BUILDKITE_SOURCE == 'webhook' ]] && export TRIGGER_JOB=true +fi oIFS="$IFS" -IFS=$'' -nIFS=$IFS # Needed to fix array splitting (\n won't work) -################### -# Anka Ensure Tag # -echo $PLATFORMS_JSON_ARRAY | jq -cr ".[]" | while read -r PLATFORM_JSON; do - if [[ $(echo "$PLATFORM_JSON" | jq -r .FILE_NAME) =~ 'macos' ]]; then - cat < /dev/null git submodule update --init &> /dev/null while read -r a b; do BASE_MAP[$a]=$b done < <(git submodule --quiet foreach --recursive 'echo $path `git log -1 --format=%ct`') + +# We need to switch back to the PR ref/head so we can git log properly +if [[ $TRAVIS == true && ! -z $TRAVIS_PULL_REQUEST_SLUG ]]; then + echo "git fetch origin +refs/pull/$TRAVIS_PULL_REQUEST/merge:" + git fetch origin +refs/pull/$TRAVIS_PULL_REQUEST/merge: &> /dev/null + echo "switching back to $TRAVIS_COMMIT" + echo 'git checkout -qf FETCH_HEAD' + git checkout -qf FETCH_HEAD &> /dev/null +elif [[ $BUILDKITE == true ]]; then + echo "switching back to $CURRENT_BRANCH" + git checkout -f $CURRENT_BRANCH &> /dev/null +fi + for k in "${!BASE_MAP[@]}"; do base_ts=${BASE_MAP[$k]} pr_ts=${PR_MAP[$k]} @@ -27,8 +45,8 @@ for k in "${!BASE_MAP[@]}"; do echo " timestamp on $CURRENT_BRANCH: $pr_ts" echo " timestamp on $BASE_BRANCH: $base_ts" if (( $pr_ts < $base_ts)); then - echo "$k is older on $CURRENT_BRANCH than $BASE_BRANCH; investigating..." - if for c in `git log $CURRENT_BRANCH ^$BASE_BRANCH --pretty=format:"%H"`; do git show --pretty="" --name-only $c; done | grep -q "^$k$"; then + echo "$k is older on $CURRENT_BRANCH than $BASE_BRANCH; investigating the difference between $CURRENT_BRANCH and $BASE_BRANCH to look for $k changing..." + if [[ ! -z $(for c in $(git --no-pager log $CURRENT_BRANCH ^$BASE_BRANCH --pretty=format:"%H"); do git show --pretty="" --name-only $c; done | grep "^$k$") ]]; then echo "ERROR: $k has regressed" exit 1 else diff --git a/.travis.yml b/.travis.yml index 110f2756374..4ce27f6b473 100644 --- a/.travis.yml +++ b/.travis.yml @@ -34,21 +34,24 @@ matrix: homebrew: update: true packages: - - graphviz + - ccache + - jq + - boost + - python@2 + - python - libtool + - libusb + - graphviz + - automake + - wget - gmp - llvm@4 - pkgconfig - - python - - python@2 - doxygen - - libusb - openssl - - boost@1.70 - - ccache env: - PATH="/usr/local/opt/ccache/libexec:$PATH" -script: "ccache --max-size=1G && ./.cicd/build.sh && ./.cicd/test.sh scripts/parallel-test.sh && ./.cicd/test.sh scripts/serial-test.sh" +script: "ccache --max-size=1G && ./.cicd/build.sh && ./.cicd/test.sh scripts/parallel-test.sh && ./.cicd/test.sh scripts/serial-test.sh && if [[ $(uname) != 'Darwin' ]]; then ./.cicd/submodule-regression-check.sh; fi" notifications: webhooks: secure: gmqODqoFAil2cR7v++ibqRNECBOSD/VJX+2qPa7XptkVWmVMzbII5CNgBQAscjFsp9arHPMXCCzkBi847PCSiHdsnYFQ4T273FLRWr3cDbLjfmR+BJ7dGKvQnlpSi2Ze2TtAPJyRl+iv+cxDj7cWE5zw2c4xbgh1a/cNO+/ayUfFkyMEIfVWRsHkdkra4gOLywou0XRLHr4CX1V60uU7uuqATnIMMi7gQYwiKKtZqjkbf8wcBvZirDhjQ6lDPN5tnZo6L4QHmqjtzNJg/UrD4h+zES53dLVI4uxlXRAwwpw+mJOFA3QE/3FT+bMQjLCffUz4gZaWcdgebPYzrwSWUbJoFdWAOwcTqivQY0FIQzcz/r6uGWcwWTavzkPEbg68BVM2BZId/0110J6feeTkpJ3MPV+UsIoGTvbg50vi/I06icftuZ/cLqDj3+Emifm7Jlr1sRTSdqtYAJj/2ImUfsb46cwgjAVhFOTvc+KuPgJQgvOXV7bZkxEr5qDWo8Al2sV8BWb83j1rMlZ4LfERokImDVqxu2kkcunchzvhtYFTesSpmwegVpwceCtOtO0rEUgATnfTEHzk2rm8nuz4UtidsQnluUKqmKD0QCqHXFfn+3ZRJsDqr+iCYdxv1BAeAVc9q1L7bgrKDMGiJgkxuhZ2v3J2SflWLvjZjFDduuc= diff --git a/CMakeLists.txt b/CMakeLists.txt index 8a66bdac45d..392d7b4548d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -28,7 +28,7 @@ set( CXX_STANDARD_REQUIRED ON) set(VERSION_MAJOR 1) set(VERSION_MINOR 8) -set(VERSION_PATCH 3) +set(VERSION_PATCH 4) #set(VERSION_SUFFIX develop) if(VERSION_SUFFIX) diff --git a/README.md b/README.md index 9264c12855c..b1e8dae21c4 100644 --- a/README.md +++ b/README.md @@ -44,13 +44,13 @@ $ brew remove eosio #### Ubuntu 18.04 Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.8.3/eosio_1.8.3-1-ubuntu-18.04_amd64.deb -$ sudo apt install ./eosio_1.8.3-1-ubuntu-18.04_amd64.deb +$ wget https://github.com/eosio/eos/releases/download/v1.8.4/eosio_1.8.4-1-ubuntu-18.04_amd64.deb +$ sudo apt install ./eosio_1.8.4-1-ubuntu-18.04_amd64.deb ``` #### Ubuntu 16.04 Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.8.3/eosio_1.8.3-1-ubuntu-16.04_amd64.deb -$ sudo apt install ./eosio_1.8.3-1-ubuntu-16.04_amd64.deb +$ wget https://github.com/eosio/eos/releases/download/v1.8.4/eosio_1.8.4-1-ubuntu-16.04_amd64.deb +$ sudo apt install ./eosio_1.8.4-1-ubuntu-16.04_amd64.deb ``` #### Ubuntu Package Uninstall ```sh @@ -58,8 +58,8 @@ $ sudo apt remove eosio ``` #### Centos RPM Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.8.3/eosio-1.8.3-1.el7.x86_64.rpm -$ sudo yum install ./eosio-1.8.3-1.el7.x86_64.rpm +$ wget https://github.com/eosio/eos/releases/download/v1.8.4/eosio-1.8.4-1.el7.x86_64.rpm +$ sudo yum install ./eosio-1.8.4-1.el7.x86_64.rpm ``` #### Centos RPM Package Uninstall ```sh diff --git a/libraries/chain/include/eosio/chain/wasm_eosio_injection.hpp b/libraries/chain/include/eosio/chain/wasm_eosio_injection.hpp index 2d93bad31d8..65cfa7b552c 100644 --- a/libraries/chain/include/eosio/chain/wasm_eosio_injection.hpp +++ b/libraries/chain/include/eosio/chain/wasm_eosio_injection.hpp @@ -754,8 +754,9 @@ namespace eosio { namespace chain { namespace wasm_injections { struct post_op_injectors : wasm_ops::op_types { - using loop_t = wasm_ops::loop ; - using call_t = wasm_ops::call ; + using loop_t = wasm_ops::loop ; + using call_t = wasm_ops::call ; + using grow_memory_t = wasm_ops::grow_memory ; }; template diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index b35b38a0da8..25346501ca0 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -810,6 +810,7 @@ namespace eosio { last_handshake_recv = handshake_message(); last_handshake_sent = handshake_message(); my_impl->sync_master->reset_lib_num(shared_from_this()); + fc_ilog(logger, "closing ${a}, ${p}", ("a",peer_addr)("p",peer_name())); fc_dlog(logger, "canceling wait on ${p}", ("p",peer_name())); cancel_wait(); if( read_delay_timer ) read_delay_timer->cancel(); @@ -1395,8 +1396,8 @@ namespace eosio { sync_known_lib_num = target; } - if (!sync_required()) { - uint32_t bnum = chain_plug->chain().last_irreversible_block_num(); + uint32_t bnum = chain_plug->chain().last_irreversible_block_num(); + if (!sync_required() || target <= bnum) { uint32_t hnum = chain_plug->chain().fork_db_pending_head_block_num(); fc_dlog( logger, "We are already caught up, my irr = ${b}, head = ${h}, target = ${t}", ("b",bnum)("h",hnum)("t",target)); @@ -1482,12 +1483,7 @@ namespace eosio { if (head < msg.head_num ) { fc_dlog(logger, "sync check state 3"); - if (!verify_catchup(c, msg.head_num, msg.head_id)) { - request_message req; - req.req_blocks.mode = catch_up; - req.req_trx.mode = none; - c->enqueue( req ); - } + verify_catchup(c, msg.head_num, msg.head_id); return; } else { @@ -1501,7 +1497,11 @@ namespace eosio { c->enqueue( note ); } c->syncing = true; - if( cc.get_block_id_for_num( msg.head_num ) != msg.head_id ) { + bool on_fork = true; + try { + on_fork = cc.get_block_id_for_num( msg.head_num ) != msg.head_id; + } catch( ... ) {} + if( on_fork ) { request_message req; req.req_blocks.mode = catch_up; req.req_trx.mode = none; @@ -1552,7 +1552,14 @@ namespace eosio { if (msg.known_blocks.ids.size() == 0) { fc_elog( logger,"got a catch up with ids size = 0" ); } else { - verify_catchup(c, msg.known_blocks.pending, msg.known_blocks.ids.back()); + const block_id_type& id = msg.known_blocks.ids.back(); + controller& cc = chain_plug->chain(); + if( !cc.fetch_block_by_id( id ) ) { + verify_catchup( c, msg.known_blocks.pending, id ); + } else { + // we already have the block, so update peer with our view of the world + c->send_handshake(); + } } } else { @@ -1576,9 +1583,8 @@ namespace eosio { fc_dlog(logger, "got block ${bn} from ${p}",("bn",blk_num)("p",c->peer_name())); if (state == lib_catchup) { if (blk_num != sync_next_expected_num) { - fc_wlog( logger, "expected block ${ne} but got ${bn}, closing connection: ${p}", + fc_wlog( logger, "expected block ${ne} but got ${bn}, from connection: ${p}", ("ne",sync_next_expected_num)("bn",blk_num)("p",c->peer_name()) ); - my_impl->close(c); return; } sync_next_expected_num = blk_num + 1; @@ -3022,6 +3028,8 @@ namespace eosio { } void net_plugin::plugin_startup() { + handle_sighup(); + try { my->producer_plug = app().find_plugin(); // currently thread_pool only used for server_ioc @@ -3056,17 +3064,13 @@ namespace eosio { } } - my->keepalive_timer.reset( new boost::asio::steady_timer( my->thread_pool->get_executor() ) ); - my->ticker(); - if( my->acceptor ) { my->acceptor->open(my->listen_endpoint.protocol()); my->acceptor->set_option(tcp::acceptor::reuse_address(true)); try { my->acceptor->bind(my->listen_endpoint); } catch (const std::exception& e) { - fc_elog( logger, "net_plugin::plugin_startup failed to bind to port ${port}", - ("port", my->listen_endpoint.port())); + elog( "net_plugin::plugin_startup failed to bind to port ${port}", ("port", my->listen_endpoint.port())); throw e; } my->acceptor->listen(); @@ -3078,6 +3082,9 @@ namespace eosio { cc.accepted_block.connect( boost::bind(&net_plugin_impl::accepted_block, my.get(), _1)); } + my->keepalive_timer.reset( new boost::asio::steady_timer( my->thread_pool->get_executor() ) ); + my->ticker(); + my->incoming_transaction_ack_subscription = app().get_channel().subscribe(boost::bind(&net_plugin_impl::transaction_ack, my.get(), _1)); if( cc.get_read_mode() == chain::db_read_mode::READ_ONLY ) { @@ -3090,7 +3097,12 @@ namespace eosio { for( auto seed_node : my->supplied_peers ) { connect( seed_node ); } - handle_sighup(); + + } catch (...) { + // always want plugin_shutdown even on exception + plugin_shutdown(); + throw; + } } void net_plugin::handle_sighup() { diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 4291dffaeb6..cf4bd077939 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -202,6 +202,11 @@ class producer_plugin_impl : public std::enable_shared_from_thischain_plug->chain(); @@ -840,6 +845,11 @@ void producer_plugin::plugin_startup() my->schedule_production_loop(); ilog("producer plugin: plugin_startup() end"); + } catch( ... ) { + // always call plugin_shutdown, even on exception + plugin_shutdown(); + throw; + } } FC_CAPTURE_AND_RETHROW() } void producer_plugin::plugin_shutdown() { @@ -1387,7 +1397,6 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { } LOG_AND_DROP(); if( chain.is_building_block() ) { - auto pending_block_time = chain.pending_block_time(); auto pending_block_signing_key = chain.pending_block_signing_key(); const fc::time_point preprocess_deadline = calculate_block_deadline(block_time); @@ -1396,283 +1405,325 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { _pending_block_mode = pending_block_mode::speculating; } - // attempt to play persisted transactions first - bool exhausted = false; + try { + if( !remove_expired_persisted_trxs( preprocess_deadline ) ) + return start_block_result::exhausted; + if( !remove_expired_blacklisted_trxs( preprocess_deadline ) ) + return start_block_result::exhausted; - // remove all persisted transactions that have now expired - auto& persisted_by_id = _persistent_transactions.get(); - auto& persisted_by_expiry = _persistent_transactions.get(); - if (!persisted_by_expiry.empty()) { - int num_expired_persistent = 0; - int orig_count = _persistent_transactions.size(); + // limit execution of pending incoming to once per block + size_t pending_incoming_process_limit = _pending_incoming_transactions.size(); - while(!persisted_by_expiry.empty() && persisted_by_expiry.begin()->expiry <= pending_block_time) { - if (preprocess_deadline <= fc::time_point::now()) { - exhausted = true; - break; - } - auto const& txid = persisted_by_expiry.begin()->trx_id; - if (_pending_block_mode == pending_block_mode::producing) { - fc_dlog(_trx_trace_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is EXPIRING PERSISTED tx: ${txid}", - ("block_num", chain.head_block_num() + 1) - ("prod", chain.pending_block_producer()) - ("txid", txid)); - } else { - fc_dlog(_trx_trace_log, "[TRX_TRACE] Speculative execution is EXPIRING PERSISTED tx: ${txid}", - ("txid", txid)); - } + if( !process_unapplied_trxs( preprocess_deadline ) ) + return start_block_result::exhausted; - persisted_by_expiry.erase(persisted_by_expiry.begin()); - num_expired_persistent++; + if (_pending_block_mode == pending_block_mode::producing) { + auto scheduled_trx_deadline = preprocess_deadline; + if (_max_scheduled_transaction_time_per_block_ms >= 0) { + scheduled_trx_deadline = std::min( + scheduled_trx_deadline, + fc::time_point::now() + fc::milliseconds(_max_scheduled_transaction_time_per_block_ms) + ); + } + // may exhaust scheduled_trx_deadline but not preprocess_deadline, exhausted preprocess_deadline checked below + process_scheduled_and_incoming_trxs( scheduled_trx_deadline, pending_incoming_process_limit ); } - if( exhausted ) { - fc_wlog( _log, "Unable to process all ${n} persisted transactions before deadline, Expired ${expired}", - ( "n", orig_count ) - ( "expired", num_expired_persistent ) ); + if( app().is_quiting() ) // db guard exception above in LOG_AND_DROP could have called app().quit() + return start_block_result::failed; + if( preprocess_deadline <= fc::time_point::now() ) { + return start_block_result::exhausted; } else { - fc_dlog( _log, "Processed ${n} persisted transactions, Expired ${expired}", - ( "n", orig_count ) - ( "expired", num_expired_persistent ) ); + if( !process_incoming_trxs( preprocess_deadline, pending_incoming_process_limit ) ) + return start_block_result::exhausted; + return start_block_result::succeeded; } - } - try { - size_t orig_pending_txn_size = _pending_incoming_transactions.size(); - - // Processing unapplied transactions... - // - if (_producers.empty() && persisted_by_id.empty()) { - // if this node can never produce and has no persisted transactions, - // there is no need for unapplied transactions they can be dropped - chain.get_unapplied_transactions().clear(); - } else { - // derive appliable transactions from unapplied_transactions and drop droppable transactions - unapplied_transactions_type& unapplied_trxs = chain.get_unapplied_transactions(); - if( !unapplied_trxs.empty() ) { - auto unapplied_trxs_size = unapplied_trxs.size(); - int num_applied = 0; - int num_failed = 0; - int num_processed = 0; - auto calculate_transaction_category = [&](const transaction_metadata_ptr& trx) { - if (trx->packed_trx->expiration() < pending_block_time) { - return tx_category::EXPIRED; - } else if (persisted_by_id.find(trx->id) != persisted_by_id.end()) { - return tx_category::PERSISTED; - } else { - return tx_category::UNEXPIRED_UNPERSISTED; - } - }; - - auto itr = unapplied_trxs.begin(); - while( itr != unapplied_trxs.end() ) { - auto itr_next = itr; // save off next since itr may be invalidated by loop - ++itr_next; - - if( preprocess_deadline <= fc::time_point::now() ) exhausted = true; - if( exhausted ) break; - const transaction_metadata_ptr trx = itr->second; - auto category = calculate_transaction_category(trx); - if (category == tx_category::EXPIRED || - (category == tx_category::UNEXPIRED_UNPERSISTED && _producers.empty())) - { - if (!_producers.empty()) { - fc_dlog(_trx_trace_log, "[TRX_TRACE] Node with producers configured is dropping an EXPIRED transaction that was PREVIOUSLY ACCEPTED : ${txid}", - ("txid", trx->id)); - } - itr = unapplied_trxs.erase( itr ); // unapplied_trxs map has not been modified, so simply erase and continue - continue; - } else if (category == tx_category::PERSISTED || - (category == tx_category::UNEXPIRED_UNPERSISTED && _pending_block_mode == pending_block_mode::producing)) - { - ++num_processed; - - try { - auto deadline = fc::time_point::now() + fc::milliseconds(_max_transaction_time_ms); - bool deadline_is_subjective = false; - if (_max_transaction_time_ms < 0 || (_pending_block_mode == pending_block_mode::producing && preprocess_deadline < deadline)) { - deadline_is_subjective = true; - deadline = preprocess_deadline; - } - - auto trace = chain.push_transaction(trx, deadline); - if (trace->except) { - if (failure_is_subjective(*trace->except, deadline_is_subjective)) { - exhausted = true; - break; - } else { - // this failed our configured maximum transaction time, we don't want to replay it - // chain.plus_transactions can modify unapplied_trxs, so erase by id - unapplied_trxs.erase( trx->signed_id ); - ++num_failed; - } - } else { - ++num_applied; - } - } LOG_AND_DROP(); - } + } catch ( const guard_exception& e ) { + chain_plugin::handle_guard_exception(e); + return start_block_result::failed; + } catch ( std::bad_alloc& ) { + chain_plugin::handle_bad_alloc(); + } catch ( boost::interprocess::bad_alloc& ) { + chain_plugin::handle_db_exhaustion(); + } + } - itr = itr_next; - } + return start_block_result::failed; +} - fc_dlog(_log, "Processed ${m} of ${n} previously applied transactions, Applied ${applied}, Failed/Dropped ${failed}", - ("m", num_processed) - ("n", unapplied_trxs_size) - ("applied", num_applied) - ("failed", num_failed)); - } +bool producer_plugin_impl::remove_expired_persisted_trxs( const fc::time_point& deadline ) +{ + bool exhausted = false; + auto& persisted_by_expiry = _persistent_transactions.get(); + if (!persisted_by_expiry.empty()) { + chain::controller& chain = chain_plug->chain(); + int num_expired_persistent = 0; + int orig_count = _persistent_transactions.size(); + + const time_point pending_block_time = chain.pending_block_time(); + while(!persisted_by_expiry.empty() && persisted_by_expiry.begin()->expiry <= pending_block_time) { + if (deadline <= fc::time_point::now()) { + exhausted = true; + break; } - + auto const& txid = persisted_by_expiry.begin()->trx_id; if (_pending_block_mode == pending_block_mode::producing) { - auto& blacklist_by_id = _blacklisted_transactions.get(); - auto& blacklist_by_expiry = _blacklisted_transactions.get(); - auto now = fc::time_point::now(); - if(!blacklist_by_expiry.empty()) { - int num_expired = 0; - int orig_count = _blacklisted_transactions.size(); - - while (!blacklist_by_expiry.empty() && blacklist_by_expiry.begin()->expiry <= now) { - if (preprocess_deadline <= fc::time_point::now()) break; - blacklist_by_expiry.erase(blacklist_by_expiry.begin()); - num_expired++; - } - - fc_dlog(_log, "Processed ${n} blacklisted transactions, Expired ${expired}", - ("n", orig_count) - ("expired", num_expired)); - } + fc_dlog(_trx_trace_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is EXPIRING PERSISTED tx: ${txid}", + ("block_num", chain.head_block_num() + 1) + ("prod", chain.pending_block_producer()) + ("txid", txid)); + } else { + fc_dlog(_trx_trace_log, "[TRX_TRACE] Speculative execution is EXPIRING PERSISTED tx: ${txid}", + ("txid", txid)); + } - // scheduled transactions - int num_applied = 0; - int num_failed = 0; - int num_processed = 0; + persisted_by_expiry.erase(persisted_by_expiry.begin()); + num_expired_persistent++; + } - auto scheduled_trx_deadline = preprocess_deadline; - if (_max_scheduled_transaction_time_per_block_ms >= 0) { - scheduled_trx_deadline = std::min( - scheduled_trx_deadline, - fc::time_point::now() + fc::milliseconds(_max_scheduled_transaction_time_per_block_ms) - ); - } - time_point pending_block_time = chain.pending_block_time(); - const auto& sch_idx = chain.db().get_index(); - const auto scheduled_trxs_size = sch_idx.size(); - auto sch_itr = sch_idx.begin(); - while( sch_itr != sch_idx.end() ) { - if( sch_itr->delay_until > pending_block_time) break; // not scheduled yet - if( sch_itr->published >= pending_block_time ) { - ++sch_itr; - continue; // do not allow schedule and execute in same block - } - if( scheduled_trx_deadline <= fc::time_point::now() ) { - exhausted = true; - break; - } + if( exhausted ) { + fc_wlog( _log, "Unable to process all ${n} persisted transactions before deadline, Expired ${expired}", + ( "n", orig_count ) + ( "expired", num_expired_persistent ) ); + } else { + fc_dlog( _log, "Processed ${n} persisted transactions, Expired ${expired}", + ( "n", orig_count ) + ( "expired", num_expired_persistent ) ); + } + } + return !exhausted; +} - const transaction_id_type trx_id = sch_itr->trx_id; // make copy since reference could be invalidated - if (blacklist_by_id.find(trx_id) != blacklist_by_id.end()) { - ++sch_itr; - continue; - } +bool producer_plugin_impl::remove_expired_blacklisted_trxs( const fc::time_point& deadline ) +{ + bool exhausted = false; + auto& blacklist_by_expiry = _blacklisted_transactions.get(); + auto now = fc::time_point::now(); + if(!blacklist_by_expiry.empty()) { + int num_expired = 0; + int orig_count = _blacklisted_transactions.size(); + + while (!blacklist_by_expiry.empty() && blacklist_by_expiry.begin()->expiry <= now) { + if (deadline <= fc::time_point::now()) { + exhausted = true; + break; + } + blacklist_by_expiry.erase(blacklist_by_expiry.begin()); + num_expired++; + } - auto sch_itr_next = sch_itr; // save off next since sch_itr may be invalidated by loop - ++sch_itr_next; - const auto next_delay_until = sch_itr_next != sch_idx.end() ? sch_itr_next->delay_until : sch_itr->delay_until; - const auto next_id = sch_itr_next != sch_idx.end() ? sch_itr_next->id : sch_itr->id; + fc_dlog( _log, "Processed ${n} blacklisted transactions, Expired ${expired}", + ("n", orig_count)("expired", num_expired) ); + } + return !exhausted; +} - num_processed++; +bool producer_plugin_impl::process_unapplied_trxs( const fc::time_point& deadline ) +{ + chain::controller& chain = chain_plug->chain(); + auto& persisted_by_id = _persistent_transactions.get(); - // configurable ratio of incoming txns vs deferred txns - while (_incoming_trx_weight >= 1.0 && orig_pending_txn_size && _pending_incoming_transactions.size()) { - if (scheduled_trx_deadline <= fc::time_point::now()) break; + bool exhausted = false; + // Processing unapplied transactions... + // + if (_producers.empty() && persisted_by_id.empty()) { + // if this node can never produce and has no persisted transactions, + // there is no need for unapplied transactions they can be dropped + chain.get_unapplied_transactions().clear(); + } else { + // derive appliable transactions from unapplied_transactions and drop droppable transactions + unapplied_transactions_type& unapplied_trxs = chain.get_unapplied_transactions(); + if( !unapplied_trxs.empty() ) { + const time_point pending_block_time = chain.pending_block_time(); + auto unapplied_trxs_size = unapplied_trxs.size(); + int num_applied = 0; + int num_failed = 0; + int num_processed = 0; + auto calculate_transaction_category = [&](const transaction_metadata_ptr& trx) { + if (trx->packed_trx->expiration() < pending_block_time) { + return tx_category::EXPIRED; + } else if (persisted_by_id.find(trx->id) != persisted_by_id.end()) { + return tx_category::PERSISTED; + } else { + return tx_category::UNEXPIRED_UNPERSISTED; + } + }; - auto e = _pending_incoming_transactions.front(); - _pending_incoming_transactions.pop_front(); - --orig_pending_txn_size; - _incoming_trx_weight -= 1.0; - process_incoming_transaction_async(std::get<0>(e), std::get<1>(e), std::get<2>(e)); - } + auto itr = unapplied_trxs.begin(); + while( itr != unapplied_trxs.end() ) { + auto itr_next = itr; // save off next since itr may be invalidated by loop + ++itr_next; - if (scheduled_trx_deadline <= fc::time_point::now()) { - exhausted = true; - break; + if( deadline <= fc::time_point::now() ) { + exhausted = true; + break; + } + const transaction_metadata_ptr trx = itr->second; + auto category = calculate_transaction_category(trx); + if (category == tx_category::EXPIRED || + (category == tx_category::UNEXPIRED_UNPERSISTED && _producers.empty())) + { + if (!_producers.empty()) { + fc_dlog(_trx_trace_log, "[TRX_TRACE] Node with producers configured is dropping an EXPIRED transaction that was PREVIOUSLY ACCEPTED : ${txid}", + ("txid", trx->id)); } + itr = unapplied_trxs.erase( itr ); // unapplied_trxs map has not been modified, so simply erase and continue + continue; + } else if (category == tx_category::PERSISTED || + (category == tx_category::UNEXPIRED_UNPERSISTED && _pending_block_mode == pending_block_mode::producing)) + { + ++num_processed; try { - auto deadline = fc::time_point::now() + fc::milliseconds(_max_transaction_time_ms); + auto trx_deadline = fc::time_point::now() + fc::milliseconds( _max_transaction_time_ms ); bool deadline_is_subjective = false; - if (_max_transaction_time_ms < 0 || (_pending_block_mode == pending_block_mode::producing && scheduled_trx_deadline < deadline)) { + if (_max_transaction_time_ms < 0 || (_pending_block_mode == pending_block_mode::producing && deadline < trx_deadline)) { deadline_is_subjective = true; - deadline = scheduled_trx_deadline; + trx_deadline = deadline; } - auto trace = chain.push_scheduled_transaction(trx_id, deadline); + auto trace = chain.push_transaction(trx, trx_deadline); if (trace->except) { if (failure_is_subjective(*trace->except, deadline_is_subjective)) { exhausted = true; break; } else { - auto expiration = fc::time_point::now() + fc::seconds(chain.get_global_properties().configuration.deferred_trx_expiration_window); - // this failed our configured maximum transaction time, we don't want to replay it add it to a blacklist - _blacklisted_transactions.insert(transaction_id_with_expiry{trx_id, expiration}); - num_failed++; + // this failed our configured maximum transaction time, we don't want to replay it + // chain.plus_transactions can modify unapplied_trxs, so erase by id + unapplied_trxs.erase( trx->signed_id ); + ++num_failed; } } else { - num_applied++; + ++num_applied; } } LOG_AND_DROP(); + } - _incoming_trx_weight += _incoming_defer_ratio; - if (!orig_pending_txn_size) _incoming_trx_weight = 0.0; + itr = itr_next; + } - if( sch_itr_next == sch_idx.end() ) break; - sch_itr = sch_idx.lower_bound( boost::make_tuple( next_delay_until, next_id ) ); - } + fc_dlog( _log, "Processed ${m} of ${n} previously applied transactions, Applied ${applied}, Failed/Dropped ${failed}", + ("m", num_processed)("n", unapplied_trxs_size)("applied", num_applied)("failed", num_failed) ); + } + } + return !exhausted; +} - if( scheduled_trxs_size > 0 ) { - fc_dlog( _log, - "Processed ${m} of ${n} scheduled transactions, Applied ${applied}, Failed/Dropped ${failed}", - ( "m", num_processed ) - ( "n", scheduled_trxs_size ) - ( "applied", num_applied ) - ( "failed", num_failed ) ); - } +bool producer_plugin_impl::process_scheduled_and_incoming_trxs( const fc::time_point& deadline, size_t& pending_incoming_process_limit ) +{ + chain::controller& chain = chain_plug->chain(); + const time_point pending_block_time = chain.pending_block_time(); + auto& blacklist_by_id = _blacklisted_transactions.get(); + + // scheduled transactions + int num_applied = 0; + int num_failed = 0; + int num_processed = 0; + bool exhausted = false; + double incoming_trx_weight = 0.0; + + const auto& sch_idx = chain.db().get_index(); + const auto scheduled_trxs_size = sch_idx.size(); + auto sch_itr = sch_idx.begin(); + while( sch_itr != sch_idx.end() ) { + if( sch_itr->delay_until > pending_block_time) break; // not scheduled yet + if( sch_itr->published >= pending_block_time ) { + ++sch_itr; + continue; // do not allow schedule and execute in same block + } + if( deadline <= fc::time_point::now() ) { + exhausted = true; + break; + } + + const transaction_id_type trx_id = sch_itr->trx_id; // make copy since reference could be invalidated + if (blacklist_by_id.find(trx_id) != blacklist_by_id.end()) { + ++sch_itr; + continue; + } + + auto sch_itr_next = sch_itr; // save off next since sch_itr may be invalidated by loop + ++sch_itr_next; + const auto next_delay_until = sch_itr_next != sch_idx.end() ? sch_itr_next->delay_until : sch_itr->delay_until; + const auto next_id = sch_itr_next != sch_idx.end() ? sch_itr_next->id : sch_itr->id; + num_processed++; + + // configurable ratio of incoming txns vs deferred txns + while (incoming_trx_weight >= 1.0 && pending_incoming_process_limit && _pending_incoming_transactions.size()) { + if (deadline <= fc::time_point::now()) { + exhausted = true; + break; } - if( app().is_quiting() ) // db guard exception above in LOG_AND_DROP could have called app().quit() - return start_block_result::failed; - if (exhausted || preprocess_deadline <= fc::time_point::now()) { - return start_block_result::exhausted; - } else { - // attempt to apply any pending incoming transactions - _incoming_trx_weight = 0.0; - - if (!_pending_incoming_transactions.empty()) { - fc_dlog(_log, "Processing ${n} pending transactions", ("n", _pending_incoming_transactions.size())); - while (orig_pending_txn_size && _pending_incoming_transactions.size()) { - if (preprocess_deadline <= fc::time_point::now()) return start_block_result::exhausted; - auto e = _pending_incoming_transactions.front(); - _pending_incoming_transactions.pop_front(); - --orig_pending_txn_size; - process_incoming_transaction_async(std::get<0>(e), std::get<1>(e), std::get<2>(e)); - } + auto e = _pending_incoming_transactions.front(); + _pending_incoming_transactions.pop_front(); + --pending_incoming_process_limit; + incoming_trx_weight -= 1.0; + process_incoming_transaction_async(std::get<0>(e), std::get<1>(e), std::get<2>(e)); + } + + if (deadline <= fc::time_point::now()) { + exhausted = true; + break; + } + + try { + auto trx_deadline = fc::time_point::now() + fc::milliseconds(_max_transaction_time_ms); + bool deadline_is_subjective = false; + if (_max_transaction_time_ms < 0 || (_pending_block_mode == pending_block_mode::producing && deadline < trx_deadline)) { + deadline_is_subjective = true; + trx_deadline = deadline; + } + + auto trace = chain.push_scheduled_transaction(trx_id, trx_deadline); + if (trace->except) { + if (failure_is_subjective(*trace->except, deadline_is_subjective)) { + exhausted = true; + break; + } else { + auto expiration = fc::time_point::now() + fc::seconds(chain.get_global_properties().configuration.deferred_trx_expiration_window); + // this failed our configured maximum transaction time, we don't want to replay it add it to a blacklist + _blacklisted_transactions.insert(transaction_id_with_expiry{trx_id, expiration}); + num_failed++; } - return start_block_result::succeeded; + } else { + num_applied++; } + } LOG_AND_DROP(); - } catch ( const guard_exception& e ) { - chain_plugin::handle_guard_exception(e); - return start_block_result::failed; - } catch ( std::bad_alloc& ) { - chain_plugin::handle_bad_alloc(); - } catch ( boost::interprocess::bad_alloc& ) { - chain_plugin::handle_db_exhaustion(); - } + incoming_trx_weight += _incoming_defer_ratio; + if (!pending_incoming_process_limit) incoming_trx_weight = 0.0; + if( sch_itr_next == sch_idx.end() ) break; + sch_itr = sch_idx.lower_bound( boost::make_tuple( next_delay_until, next_id ) ); } - return start_block_result::failed; + if( scheduled_trxs_size > 0 ) { + fc_dlog( _log, "Processed ${m} of ${n} scheduled transactions, Applied ${applied}, Failed/Dropped ${failed}", + ( "m", num_processed )( "n", scheduled_trxs_size )( "applied", num_applied )( "failed", num_failed ) ); + } + return !exhausted; +} + +bool producer_plugin_impl::process_incoming_trxs( const fc::time_point& deadline, size_t& pending_incoming_process_limit ) +{ + bool exhausted = false; + if (!_pending_incoming_transactions.empty()) { + fc_dlog(_log, "Processing ${n} pending transactions", ("n", _pending_incoming_transactions.size())); + while (pending_incoming_process_limit && _pending_incoming_transactions.size()) { + if( deadline <= fc::time_point::now() ) { + exhausted = true; + break; + } + auto e = _pending_incoming_transactions.front(); + _pending_incoming_transactions.pop_front(); + --pending_incoming_process_limit; + process_incoming_transaction_async(std::get<0>(e), std::get<1>(e), std::get<2>(e)); + } + } + return !exhausted; } void producer_plugin_impl::schedule_production_loop() { diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 70e398cf0cb..519852d370c 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -48,6 +48,7 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/validate-dirty-db.py ${CMAKE_CURRENT_ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/launcher_test.py ${CMAKE_CURRENT_BINARY_DIR}/launcher_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/db_modes_test.sh ${CMAKE_CURRENT_BINARY_DIR}/db_modes_test.sh COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/prod_preactivation_test.py ${CMAKE_CURRENT_BINARY_DIR}/prod_preactivation_test.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/release-build.sh ${CMAKE_CURRENT_BINARY_DIR}/release-build.sh COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/version-label.sh ${CMAKE_CURRENT_BINARY_DIR}/version-label.sh COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_producer_watermark_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_producer_watermark_test.py COPYONLY) @@ -86,6 +87,8 @@ add_test(NAME launcher_test COMMAND tests/launcher_test.py -v --clean-run --dump set_property(TEST launcher_test PROPERTY LABELS nonparallelizable_tests) add_test(NAME db_modes_test COMMAND tests/db_modes_test.sh WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_tests_properties(db_modes_test PROPERTIES COST 6000) +add_test(NAME release-build-test COMMAND tests/release-build.sh WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST release-build-test PROPERTY LABELS nonparallelizable_tests) add_test(NAME version-label-test COMMAND tests/version-label.sh WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) # Long running tests diff --git a/tests/nodeos_forked_chain_test.py b/tests/nodeos_forked_chain_test.py index e4e3bc9469b..d8b9553b7d8 100755 --- a/tests/nodeos_forked_chain_test.py +++ b/tests/nodeos_forked_chain_test.py @@ -272,7 +272,7 @@ def getMinHeadAndLib(prodNodes): blockProducer=node.getBlockProducerByNum(blockNum) if producerToSlot[lastBlockProducer]["count"]!=inRowCountPerProducer: - Utils.errorExit("Producer %s, in slot %d, expected to produce %d blocks but produced %d blocks" % (blockProducer, inRowCountPerProducer, producerToSlot[lastBlockProducer]["count"])) + Utils.errorExit("Producer %s, in slot %d, expected to produce %d blocks but produced %d blocks" % (blockProducer, slot, inRowCountPerProducer, producerToSlot[lastBlockProducer]["count"])) if blockProducer==productionCycle[0]: break diff --git a/tests/release-build.sh b/tests/release-build.sh new file mode 100755 index 00000000000..abcfaa0ecae --- /dev/null +++ b/tests/release-build.sh @@ -0,0 +1,66 @@ +#!/bin/bash +# test name and purpose +echo '' +echo ' ##### Release Build Test #####' +echo '' +echo ' The purpose of this test is to ensure that nodeos was built with compiler' +echo 'optimizations enabled. While there is no way to programmatically determine that' +echo 'given one binary, we do set a debug flag in nodeos when it is built with' +echo 'asserts. This test checks that debug flag. Anyone intending to build and install' +echo 'nodeos from source should perform a "release build" which excludes asserts and' +echo 'debugging symbols, and performs compiler optimizations.' +echo '' +# check for xxd +if ! $(xxd --version 2>/dev/null); then + echo 'ERROR: Test requires xxd, but xxd was not found in your PATH!' + echo '' + echo 'The xxd hex dump tool can be installed as part of the vim-common package on most operating systems.' + exit 1 +fi +# find nodeos +[[ $(git --version) ]] && cd "$(git rev-parse --show-toplevel)/build" || cd "$(dirname "${BASH_SOURCE[0]}")/.." +if [[ ! -f programs/nodeos/nodeos ]]; then + echo 'ERROR: nodeos binary not found!' + echo '' + echo 'I looked here...' + echo "$ ls -la \"$(pwd)/programs/nodeos\"" + ls -la "$(pwd)/programs/nodeos" + echo '...which I derived from one of these paths:' + echo '$ echo "$(git rev-parse --show-toplevel)/build"' + echo "$(git rev-parse --show-toplevel)/build" + echo '$ echo "$(dirname "${BASH_SOURCE[0]}")/.."' + echo "$(dirname "${BASH_SOURCE[0]}")/.." + echo 'Release build test not run.' + exit 2 +fi +# run nodeos to generate state files +mkdir release-build-test +programs/nodeos/nodeos --config-dir "$(pwd)/release-build-test/config" --data-dir "$(pwd)/release-build-test/data" 1>/dev/null 2>/dev/null & +sleep 10 +kill $! # kill nodeos gracefully, by PID +if [[ ! -f release-build-test/data/state/shared_memory.bin ]]; then + echo 'ERROR: nodeos state not found!' + echo '' + echo 'Looked for shared_memory.bin in the following places:' + echo "$ ls -la \"$(pwd)/release-build-test/data/state\"" + ls -la "$(pwd)/release-build-test/data/state" + echo 'Release build test not run.' + rm -rf release-build-test + exit 3 +fi +# test state files for debug flag +export DEBUG_BYTE="$(xxd -seek 9 -l 1 release-build-test/data/state/shared_memory.bin | awk '{print $2}')" +if [[ "$DEBUG_BYTE" == '00' ]]; then + echo 'PASS: Debug flag is not set.' + echo '' + rm -rf release-build-test + exit 0 +fi +echo 'FAIL: Debug flag is set!' +echo "Debug Byte = 0x$DEBUG_BYTE" +echo '' +echo 'First kilobyte of shared_memory.bin:' +echo '$ xxd -l 1024 shared_memory.bin' +xxd -l 1024 release-build-test/data/state/shared_memory.bin +rm -rf release-build-test +exit 4 \ No newline at end of file