From 66c0e6ebf8236d648c62f9134e73166a65c980b7 Mon Sep 17 00:00:00 2001 From: Liu <57480598+liu-zhipeng@users.noreply.github.com> Date: Tue, 7 Nov 2023 02:29:30 +0800 Subject: [PATCH] Prod sync (#5131) * fix: account for decimal precision differences (#4782) * fix: account for decimal precision differences * build: bump sdk core * fix: no multiply by 1e18 (#4822) * build: bump sdk core (#4826) * chore: add routers * fix: init script for xTokens (#4838) * fix: init * fix: mainnet fix && removed errors * fix: include all chains (#4851) * Chore: add alcx to test net (#4833) * config: Next tokens * feat: changed next assets * chore: add alcx Signed-off-by: hesnicewithit --------- Signed-off-by: hesnicewithit Co-authored-by: Layne Haber Co-authored-by: just-a-node * feat: add EURe to testnet allowlist (#4795) * feat: add EURe to testnet allowlist * Update production.ts * Update production.ts --------- Co-authored-by: Prathmesh Khandelwal <201952225@iiitvadodara.ac.in> * feat: add BTRST data (#4773) * feat: add BTRST data * add cap * this is only a xMonoTest test (#4798) Co-authored-by: Prathmesh <201952225@iiitvadodara.ac.in> * fix: fetch strategy - fallback * chore: rebuild graph-client * Merge pull request Whitelist NEXT xerc20s (#4855) * feat: add testnet NEXT xerc20 * feat: add mainnet NEXT xerc20 * fix: update testnet next * fix: update mainnet next * fix: uncomment * feat: update next contracts and increase cap * feat: new xerc20 for dappradar testnet (#4858) * Revert "Merge branch 'main' into testnet-prod" This reverts commit 00f4318197d5a406fdec9bc5d75b3e4289dceb9c, reversing changes made to 0c08528a00132b61c3b17bc46cf9027268f7ae5f. * fix: merge changes to sdk and init script from main * fix: maticjs version * fix: migrate state to new backend (#4901) * fix: migrate state to new backend * fix: staging state migration + fixes --------- Co-authored-by: Rahul Sethuram * fix: prod state migrations (#4903) * Fix/migrate state prod core (#4904) * fix: prod state migrations * fix: missing config * ci: deploy terraform state changes (#4907) * Fix/ci fix - remove flow control (#4910) * fix: remove ci stop check * fix: remove continue-on-error * fix: control flow (#4911) * fix: maticjs version * fix: migrate state to new backend (#4901) * fix: migrate state to new backend * fix: staging state migration + fixes --------- Co-authored-by: Rahul Sethuram * fix: prod state migrations (#4903) * Fix/migrate state prod core (#4904) * fix: prod state migrations * fix: missing config * Fix/ci fix - remove flow control (#4910) * fix: remove ci stop check * fix: remove continue-on-error --------- Co-authored-by: Rahul Sethuram * Fix/tf version bump (#4914) * fix: bump tf version and fix warning * fix: terraform version bump * Fix/merge conflict testnet prod resolution (#4918) * fix: maticjs version * fix: migrate state to new backend (#4901) * fix: migrate state to new backend * fix: staging state migration + fixes --------- Co-authored-by: Rahul Sethuram * fix: prod state migrations (#4903) * Fix/migrate state prod core (#4904) * fix: prod state migrations * fix: missing config * Fix/ci fix - remove flow control (#4910) * fix: remove ci stop check * fix: remove continue-on-error * Fix/tf version bump (#4914) * fix: bump tf version and fix warning * fix: terraform version bump --------- Co-authored-by: Rahul Sethuram * fix: terraform version (#4916) * fix: maticjs version * fix: migrate state to new backend (#4901) * fix: migrate state to new backend * fix: staging state migration + fixes --------- Co-authored-by: Rahul Sethuram * fix: prod state migrations (#4903) * Fix/migrate state prod core (#4904) * fix: prod state migrations * fix: missing config * Fix/ci fix - remove flow control (#4910) * fix: remove ci stop check * fix: remove continue-on-error * Fix/tf version bump (#4914) * fix: bump tf version and fix warning * fix: terraform version bump --------- Co-authored-by: Rahul Sethuram * config: add another zksync provider * config: add secondary zksync provider everywhere * fix: try runner space clean up and fix smoke tests * fix: gracefully handle null array value * config: remove misbehaving chain from router * fix: disable consensys and zkevm nets on testnet * fix: temp router bypass for zkevm and consensys * fix: router execute bypass zkevm and consensys * fix: bumpTransfer task (#4950) * fix: One Inch authorization added * fix: added checks * 4927-One Inch config added to chain-abstraction SDK (#4955) * fix: One Inch authorization added * fix: added checks * Update typo Co-authored-by: Rahul Sethuram --------- Co-authored-by: Rahul Sethuram * fix: bump version * feat: 4708 protocol backend tasks for integration test (#4800) * feat: add devnet config * feat: tenderly devnet setup (#4722) * feat: create deploy scripts for devnet * feat: get latest synced blocknumber * fix: parse the result * feat: create graph-node dockers * feat: create all-in script for devnets * feat: deploy chains, deploy contracts, deploy subgraph * feat: create a separate docker-compose file * feat: add cartographer-api and database * chore: clean up * feat: 4718 smart contracts deploy dev net (#4757) * feat: install forge-deploy npm * fix: remove forge-deploy npm * feat: forge-deploy npm * feat: forge deploy basically works * feat: basically works * feat: remove forge-deploy * feat: deploy forge * forge install: solidity-stringutils * feat: solidity-stingutils * feat: messaging & connext deploy * feat: use gnosis instead of arbitrum * feat: multisend contract deploy * feat: deploy relayer proxy hub contract * chore: yarn.lock * feat: init * feat: init messaging layer * feat: agent setup * feat: initialize assets * feat: add devnet deploy commands to sh * feat: use gnosis instead of arbitrum * feat: contract deploy and init works in sh * feat: export deployments works * feat: devnet subgraph build works * feat: devnet subgraph deployed * feat: use local postgres db for carto of devnet * feat: devnet graph-client build * chore: rebuild * chore: graph-client build * fix: add setup step to ingest mainnet db data * fix: package name * fix: resolve errors and warnings * fix: wrong command * feat: use a separate docker-compose * fix: update off chain docker local setup * fix: update db port * fix: add dockerfile for carto image for local * fix: docker entry point for local carto poller * fix: use db image with cron setup * fix: setup db schema * fix: update lh docker build path * chore: delete * feat: we want a fresh devnet * feat: remove previous deployments * feat: setup chains with the RPC urls * feat: use a separate one * feat: create template files * feat: add `devnet` to network type * fix: a couple of syntax errors * feat: txservice get deployments * fix: typo * feat: replace * feat: put the network param * fix: relayer config * feat: add NETWORK variable * fix: subgraph deploy script * fix: rebuild graph-client * feat: subgraph adapter updated * feat: add subgraphPrefix * feat: use domain * feat: update tenderly cli before spawn * fix: deploy script * fix: dbmate up * fix: devnet deploy script * feat: hardhat-tenderly setup * fix: contracts build * feat: generate config in integration package * fix: add params to AdminHubConnector * feat: devnet messaging config * feat: devnet hh config * feat: hh deploy script to devnet * fix: export separately devnet:mainnet * fix: txservice get deployed contracts from devnet * fix: devnet init script * fix: devnet json name * fix: subgraph devnet deploy * fix: devnet init command * fix: build * fix: test * feat: implement a method * feat: update config file * fix: lint * fix: update a script name * feat: update bash script * fix: incorrect method of JSON * fix: devnet init * fix: setup docker after contract deployment * fix: devnet deploy command * fix: automatic verify * fix: speed up deploy * chore: spoke connector for lighthouse * fix: spoke connector address * fix: retry deploy command * feat: anvil docker image * feat: docker-compose for local network * fix: rename tenderly to devnet * fix: anvil disable --no-mine * fix: rename local test sh * feat: local network setup * fix: add local domain * fix: deploy script * fix: local export * fix: clean up devnet commands * refactor: update deploy script * feat: devnet deploy & init works * feat: add scripts * fix: update anvil docker image * feat: progress * chore: relevant settings * fix: yarn build:all * fix: subgraph deploy to local * fix: typo * fix: create with version label * fix: mapping * fix: prefix issue * chore: replace "local" * feat: graphclient rebuild * chore: yarn.lock * feat: test router, sequencer, relayer * fix: dbmate up * fix: subgraph adapter * chore: add mac setup * fix: chain setup for local spec * fix: default variables for local test * feat: skipping action * fix: setup router * fix: hardhat to anvil * feat: redeployments * fix: initializeRouter on routerAccount * feat: no need to setup asset here * feat: pre-xcallIntoLocal * chore: add abi for spoke and hub domain * chore: add connectors interface * feat: updates for onchain setup for slow path * chore: update the imports for spoke and hub connector * chore: update the implementation * fix: viem of pancake-sdk downgrade * feat: clean up xcall * fix: update e2e local bring up steps * feat: before approve * feat: horray, xcallIntoLocal * feat: create onchain methods * fix: typo * fix: dbmate * feat: spokeRootSent * feat: AdminMainnetSpokeConnector * chore: it changes every time * feat: send / propagate local root * feat: local test propagate works * fix: deploy script delay blocks * feat: receive aggregated root on spoke * fix: setup same local / adopted asset * fix: must be owner * chore: local.init.json * fix: from address * feat: xcall sent * feat: message queue setup * fix: cleanup * feat: process amb * fix: receiveSpokeRootOnHub * fix: graph node url * fix: typo * fix: subgraph prefix * build: fix build:all * feat: local networks for conversionRate * feat: pre happy path * feat: ensure automine is off * fix: anvil docker block time * fix: missing first one * fix: db url * feat: hub domain * fix: missing the entry * feat: wow, both path working * feat: anvil block time 10s * feat: relayer config for local test * feat: need deployments for build --------- Co-authored-by: wanglonghong Co-authored-by: preethamr Co-authored-by: sanchaymittal * fix: pull from ghcr * chore: use the constant for db_image (#4963) * fix: try runner space clean up and fix smoke tests * fix: 1inch api for quote (#4964) * chore: disable smoke test on ci/cd for main * chore: fix smoke-test * chore: fix config setup step * fix: send to relayer with correct domain (#4978) * feat: flow logs + networking improvements * fix: roll back minor changes * fix: expire images but keep 20 at least * fix: fix lifecycle priority * chore: update config.local.json * tag images with the branch name * fix: add branch name to images * fix:chain-abstraction:bump (#4985) * fix: better lifecycle policy * add testnet-prod lcp * fix: vpc flow logs improvements * feat: split flow logs between private and public * fix: private subnet for router web3signer * chore: graphclient update * chore: config updates * fix: update alchemy keys for opt and arb * testnet prod sync (#5005) * fix: maticjs version * fix: migrate state to new backend (#4901) * fix: migrate state to new backend * fix: staging state migration + fixes --------- Co-authored-by: Rahul Sethuram * fix: prod state migrations (#4903) * Fix/migrate state prod core (#4904) * fix: prod state migrations * fix: missing config * Fix/ci fix - remove flow control (#4910) * fix: remove ci stop check * fix: remove continue-on-error * Fix/tf version bump (#4914) * fix: bump tf version and fix warning * fix: terraform version bump * fix: bumpTransfer task (#4950) * fix: One Inch authorization added * fix: added checks * 4927-One Inch config added to chain-abstraction SDK (#4955) * fix: One Inch authorization added * fix: added checks * Update typo Co-authored-by: Rahul Sethuram --------- Co-authored-by: Rahul Sethuram * fix: bump version * feat: 4708 protocol backend tasks for integration test (#4800) * feat: add devnet config * feat: tenderly devnet setup (#4722) * feat: create deploy scripts for devnet * feat: get latest synced blocknumber * fix: parse the result * feat: create graph-node dockers * feat: create all-in script for devnets * feat: deploy chains, deploy contracts, deploy subgraph * feat: create a separate docker-compose file * feat: add cartographer-api and database * chore: clean up * feat: 4718 smart contracts deploy dev net (#4757) * feat: install forge-deploy npm * fix: remove forge-deploy npm * feat: forge-deploy npm * feat: forge deploy basically works * feat: basically works * feat: remove forge-deploy * feat: deploy forge * forge install: solidity-stringutils * feat: solidity-stingutils * feat: messaging & connext deploy * feat: use gnosis instead of arbitrum * feat: multisend contract deploy * feat: deploy relayer proxy hub contract * chore: yarn.lock * feat: init * feat: init messaging layer * feat: agent setup * feat: initialize assets * feat: add devnet deploy commands to sh * feat: use gnosis instead of arbitrum * feat: contract deploy and init works in sh * feat: export deployments works * feat: devnet subgraph build works * feat: devnet subgraph deployed * feat: use local postgres db for carto of devnet * feat: devnet graph-client build * chore: rebuild * chore: graph-client build * fix: add setup step to ingest mainnet db data * fix: package name * fix: resolve errors and warnings * fix: wrong command * feat: use a separate docker-compose * fix: update off chain docker local setup * fix: update db port * fix: add dockerfile for carto image for local * fix: docker entry point for local carto poller * fix: use db image with cron setup * fix: setup db schema * fix: update lh docker build path * chore: delete * feat: we want a fresh devnet * feat: remove previous deployments * feat: setup chains with the RPC urls * feat: use a separate one * feat: create template files * feat: add `devnet` to network type * fix: a couple of syntax errors * feat: txservice get deployments * fix: typo * feat: replace * feat: put the network param * fix: relayer config * feat: add NETWORK variable * fix: subgraph deploy script * fix: rebuild graph-client * feat: subgraph adapter updated * feat: add subgraphPrefix * feat: use domain * feat: update tenderly cli before spawn * fix: deploy script * fix: dbmate up * fix: devnet deploy script * feat: hardhat-tenderly setup * fix: contracts build * feat: generate config in integration package * fix: add params to AdminHubConnector * feat: devnet messaging config * feat: devnet hh config * feat: hh deploy script to devnet * fix: export separately devnet:mainnet * fix: txservice get deployed contracts from devnet * fix: devnet init script * fix: devnet json name * fix: subgraph devnet deploy * fix: devnet init command * fix: build * fix: test * feat: implement a method * feat: update config file * fix: lint * fix: update a script name * feat: update bash script * fix: incorrect method of JSON * fix: devnet init * fix: setup docker after contract deployment * fix: devnet deploy command * fix: automatic verify * fix: speed up deploy * chore: spoke connector for lighthouse * fix: spoke connector address * fix: retry deploy command * feat: anvil docker image * feat: docker-compose for local network * fix: rename tenderly to devnet * fix: anvil disable --no-mine * fix: rename local test sh * feat: local network setup * fix: add local domain * fix: deploy script * fix: local export * fix: clean up devnet commands * refactor: update deploy script * feat: devnet deploy & init works * feat: add scripts * fix: update anvil docker image * feat: progress * chore: relevant settings * fix: yarn build:all * fix: subgraph deploy to local * fix: typo * fix: create with version label * fix: mapping * fix: prefix issue * chore: replace "local" * feat: graphclient rebuild * chore: yarn.lock * feat: test router, sequencer, relayer * fix: dbmate up * fix: subgraph adapter * chore: add mac setup * fix: chain setup for local spec * fix: default variables for local test * feat: skipping action * fix: setup router * fix: hardhat to anvil * feat: redeployments * fix: initializeRouter on routerAccount * feat: no need to setup asset here * feat: pre-xcallIntoLocal * chore: add abi for spoke and hub domain * chore: add connectors interface * feat: updates for onchain setup for slow path * chore: update the imports for spoke and hub connector * chore: update the implementation * fix: viem of pancake-sdk downgrade * feat: clean up xcall * fix: update e2e local bring up steps * feat: before approve * feat: horray, xcallIntoLocal * feat: create onchain methods * fix: typo * fix: dbmate * feat: spokeRootSent * feat: AdminMainnetSpokeConnector * chore: it changes every time * feat: send / propagate local root * feat: local test propagate works * fix: deploy script delay blocks * feat: receive aggregated root on spoke * fix: setup same local / adopted asset * fix: must be owner * chore: local.init.json * fix: from address * feat: xcall sent * feat: message queue setup * fix: cleanup * feat: process amb * fix: receiveSpokeRootOnHub * fix: graph node url * fix: typo * fix: subgraph prefix * build: fix build:all * feat: local networks for conversionRate * feat: pre happy path * feat: ensure automine is off * fix: anvil docker block time * fix: missing first one * fix: db url * feat: hub domain * fix: missing the entry * feat: wow, both path working * feat: anvil block time 10s * feat: relayer config for local test * feat: need deployments for build --------- Co-authored-by: wanglonghong Co-authored-by: preethamr Co-authored-by: sanchaymittal * fix: pull from ghcr * chore: use the constant for db_image (#4963) * fix: try runner space clean up and fix smoke tests * fix: 1inch api for quote (#4964) * chore: disable smoke test on ci/cd for main * chore: fix smoke-test * chore: fix config setup step * fix: send to relayer with correct domain (#4978) * feat: flow logs + networking improvements * fix: roll back minor changes * fix: expire images but keep 20 at least * fix: fix lifecycle priority * chore: update config.local.json * tag images with the branch name * fix: add branch name to images * fix:chain-abstraction:bump (#4985) * fix: better lifecycle policy * add testnet-prod lcp * fix: vpc flow logs improvements * feat: split flow logs between private and public * fix: private subnet for router web3signer --------- Co-authored-by: Carlo Mazzaferro Co-authored-by: Eddie Co-authored-by: Prathmesh <201952225@iiitvadodara.ac.in> Co-authored-by: Liu <57480598+liu-zhipeng@users.noreply.github.com> Co-authored-by: wanglonghong Co-authored-by: preethamr Co-authored-by: sanchaymittal * fix: public subnets for services * fix: add zksync provider (#5006) * testnet-prod sync (#5008) * fix: maticjs version * fix: migrate state to new backend (#4901) * fix: migrate state to new backend * fix: staging state migration + fixes --------- Co-authored-by: Rahul Sethuram * fix: prod state migrations (#4903) * Fix/migrate state prod core (#4904) * fix: prod state migrations * fix: missing config * Fix/ci fix - remove flow control (#4910) * fix: remove ci stop check * fix: remove continue-on-error * Fix/tf version bump (#4914) * fix: bump tf version and fix warning * fix: terraform version bump * fix: bumpTransfer task (#4950) * fix: One Inch authorization added * fix: added checks * 4927-One Inch config added to chain-abstraction SDK (#4955) * fix: One Inch authorization added * fix: added checks * Update typo Co-authored-by: Rahul Sethuram --------- Co-authored-by: Rahul Sethuram * fix: bump version * feat: 4708 protocol backend tasks for integration test (#4800) * feat: add devnet config * feat: tenderly devnet setup (#4722) * feat: create deploy scripts for devnet * feat: get latest synced blocknumber * fix: parse the result * feat: create graph-node dockers * feat: create all-in script for devnets * feat: deploy chains, deploy contracts, deploy subgraph * feat: create a separate docker-compose file * feat: add cartographer-api and database * chore: clean up * feat: 4718 smart contracts deploy dev net (#4757) * feat: install forge-deploy npm * fix: remove forge-deploy npm * feat: forge-deploy npm * feat: forge deploy basically works * feat: basically works * feat: remove forge-deploy * feat: deploy forge * forge install: solidity-stringutils * feat: solidity-stingutils * feat: messaging & connext deploy * feat: use gnosis instead of arbitrum * feat: multisend contract deploy * feat: deploy relayer proxy hub contract * chore: yarn.lock * feat: init * feat: init messaging layer * feat: agent setup * feat: initialize assets * feat: add devnet deploy commands to sh * feat: use gnosis instead of arbitrum * feat: contract deploy and init works in sh * feat: export deployments works * feat: devnet subgraph build works * feat: devnet subgraph deployed * feat: use local postgres db for carto of devnet * feat: devnet graph-client build * chore: rebuild * chore: graph-client build * fix: add setup step to ingest mainnet db data * fix: package name * fix: resolve errors and warnings * fix: wrong command * feat: use a separate docker-compose * fix: update off chain docker local setup * fix: update db port * fix: add dockerfile for carto image for local * fix: docker entry point for local carto poller * fix: use db image with cron setup * fix: setup db schema * fix: update lh docker build path * chore: delete * feat: we want a fresh devnet * feat: remove previous deployments * feat: setup chains with the RPC urls * feat: use a separate one * feat: create template files * feat: add `devnet` to network type * fix: a couple of syntax errors * feat: txservice get deployments * fix: typo * feat: replace * feat: put the network param * fix: relayer config * feat: add NETWORK variable * fix: subgraph deploy script * fix: rebuild graph-client * feat: subgraph adapter updated * feat: add subgraphPrefix * feat: use domain * feat: update tenderly cli before spawn * fix: deploy script * fix: dbmate up * fix: devnet deploy script * feat: hardhat-tenderly setup * fix: contracts build * feat: generate config in integration package * fix: add params to AdminHubConnector * feat: devnet messaging config * feat: devnet hh config * feat: hh deploy script to devnet * fix: export separately devnet:mainnet * fix: txservice get deployed contracts from devnet * fix: devnet init script * fix: devnet json name * fix: subgraph devnet deploy * fix: devnet init command * fix: build * fix: test * feat: implement a method * feat: update config file * fix: lint * fix: update a script name * feat: update bash script * fix: incorrect method of JSON * fix: devnet init * fix: setup docker after contract deployment * fix: devnet deploy command * fix: automatic verify * fix: speed up deploy * chore: spoke connector for lighthouse * fix: spoke connector address * fix: retry deploy command * feat: anvil docker image * feat: docker-compose for local network * fix: rename tenderly to devnet * fix: anvil disable --no-mine * fix: rename local test sh * feat: local network setup * fix: add local domain * fix: deploy script * fix: local export * fix: clean up devnet commands * refactor: update deploy script * feat: devnet deploy & init works * feat: add scripts * fix: update anvil docker image * feat: progress * chore: relevant settings * fix: yarn build:all * fix: subgraph deploy to local * fix: typo * fix: create with version label * fix: mapping * fix: prefix issue * chore: replace "local" * feat: graphclient rebuild * chore: yarn.lock * feat: test router, sequencer, relayer * fix: dbmate up * fix: subgraph adapter * chore: add mac setup * fix: chain setup for local spec * fix: default variables for local test * feat: skipping action * fix: setup router * fix: hardhat to anvil * feat: redeployments * fix: initializeRouter on routerAccount * feat: no need to setup asset here * feat: pre-xcallIntoLocal * chore: add abi for spoke and hub domain * chore: add connectors interface * feat: updates for onchain setup for slow path * chore: update the imports for spoke and hub connector * chore: update the implementation * fix: viem of pancake-sdk downgrade * feat: clean up xcall * fix: update e2e local bring up steps * feat: before approve * feat: horray, xcallIntoLocal * feat: create onchain methods * fix: typo * fix: dbmate * feat: spokeRootSent * feat: AdminMainnetSpokeConnector * chore: it changes every time * feat: send / propagate local root * feat: local test propagate works * fix: deploy script delay blocks * feat: receive aggregated root on spoke * fix: setup same local / adopted asset * fix: must be owner * chore: local.init.json * fix: from address * feat: xcall sent * feat: message queue setup * fix: cleanup * feat: process amb * fix: receiveSpokeRootOnHub * fix: graph node url * fix: typo * fix: subgraph prefix * build: fix build:all * feat: local networks for conversionRate * feat: pre happy path * feat: ensure automine is off * fix: anvil docker block time * fix: missing first one * fix: db url * feat: hub domain * fix: missing the entry * feat: wow, both path working * feat: anvil block time 10s * feat: relayer config for local test * feat: need deployments for build --------- Co-authored-by: wanglonghong Co-authored-by: preethamr Co-authored-by: sanchaymittal * fix: pull from ghcr * chore: use the constant for db_image (#4963) * fix: try runner space clean up and fix smoke tests * fix: 1inch api for quote (#4964) * chore: disable smoke test on ci/cd for main * chore: fix smoke-test * chore: fix config setup step * fix: send to relayer with correct domain (#4978) * feat: flow logs + networking improvements * fix: roll back minor changes * fix: expire images but keep 20 at least * fix: fix lifecycle priority * chore: update config.local.json * tag images with the branch name * fix: add branch name to images * fix:chain-abstraction:bump (#4985) * fix: better lifecycle policy * add testnet-prod lcp * fix: vpc flow logs improvements * feat: split flow logs between private and public * fix: private subnet for router web3signer * fix: public subnets for services * fix: add zksync provider (#5006) --------- Co-authored-by: Rahul Sethuram Co-authored-by: Eddie Co-authored-by: Prathmesh <201952225@iiitvadodara.ac.in> Co-authored-by: Rahul Sethuram Co-authored-by: Liu <57480598+liu-zhipeng@users.noreply.github.com> Co-authored-by: wanglonghong Co-authored-by: preethamr Co-authored-by: sanchaymittal * fix: hotfix prover cron & web3signer networking * fix: private subnets for lh prover * fix: networking mainnet fixes and ecr lcps (#5015) * fix: improve ecp to remove any tagged images * Radar token update (#4892) * Radar token update * update address for xerc20 * xRADAR added --------- Co-authored-by: Prathmesh <201952225@iiitvadodara.ac.in> * feat: 5033 working linea connector (#5037) * fix: rename consensys to linea * fix: rename consensys to linea * fix: rename and verify * feat: enable linea goerli ops * chore: delete linea hub connector * chore: recreate linea hub connector * Testnet Prod sync (#5038) * fix: networking mainnet fixes and ecr lcps (#5015) * fix: improve ecp to remove any tagged images * Radar token update (#4892) * Radar token update * update address for xerc20 * xRADAR added --------- Co-authored-by: Prathmesh <201952225@iiitvadodara.ac.in> * feat: 5033 working linea connector (#5037) * fix: rename consensys to linea * fix: rename consensys to linea * fix: rename and verify * feat: enable linea goerli ops * chore: delete linea hub connector * chore: recreate linea hub connector --------- Co-authored-by: Carlo Mazzaferro Co-authored-by: Sonmezturk Co-authored-by: Prathmesh <201952225@iiitvadodara.ac.in> * fix: testnet prod terraform (#5039) * testnet prod sync (#5040) * fix: networking mainnet fixes and ecr lcps (#5015) * fix: improve ecp to remove any tagged images * Radar token update (#4892) * Radar token update * update address for xerc20 * xRADAR added --------- Co-authored-by: Prathmesh <201952225@iiitvadodara.ac.in> * feat: 5033 working linea connector (#5037) * fix: rename consensys to linea * fix: rename consensys to linea * fix: rename and verify * feat: enable linea goerli ops * chore: delete linea hub connector * chore: recreate linea hub connector * fix: testnet prod terraform (#5039) --------- Co-authored-by: Carlo Mazzaferro Co-authored-by: Sonmezturk Co-authored-by: Prathmesh <201952225@iiitvadodara.ac.in> Co-authored-by: Sanchay Mittal * chore: rebuild * fix: enable router execution linea * fix: reader linea * feat: linea goerli subgraph deploy * feat: create xERC20 template (#5049) * feat: create xERC20 template * feat: minor changes * fix: placeholder text * fix: add env in title * feat: add xERC20 label * fix: chain Ids for subgraph networks * chore: graph client rebuild * feat: add bware linea goerli subgraph * chore: rebuild graph client * fix: thegraph network id for zksync-era * feat: 5069 testnet onboarding lh agents works (#5074) * feat: hh task for claiming linea message * feat: gas cost for linea propagate * test: LH tests for linea * testnet prod sync (#5079) * feat: add bware linea goerli subgraph * chore: rebuild graph client * fix: thegraph network id for zksync-era * feat: 5069 testnet onboarding lh agents works (#5074) * feat: hh task for claiming linea message * feat: gas cost for linea propagate * test: LH tests for linea --------- Co-authored-by: Liu <57480598+liu-zhipeng@users.noreply.github.com> Co-authored-by: preethamr * fix: 5069 testnet onboarding lh agents works (#5080) * feat: hh task for claiming linea message * feat: gas cost for linea propagate * test: LH tests for linea * chore: yarn.lock * chore: update yarn.lock * chore: attempt fixing ci/cd for lighthouse * chore: fix broken ci/cd * fix: docker build fix (#5092) * fix: process from root linea (#5084) * feat: claimMessage interface * chore: rebuild * feat: linea process from root * test: linea process from root unit tests * feat: deploy testnet linea hub connector * feat: hub subgraph for linea hub connector * chore: rename * testnet prod sync (#5081) * feat: add bware linea goerli subgraph * chore: rebuild graph client * fix: thegraph network id for zksync-era * feat: 5069 testnet onboarding lh agents works (#5074) * feat: hh task for claiming linea message * feat: gas cost for linea propagate * test: LH tests for linea * fix: 5069 testnet onboarding lh agents works (#5080) * feat: hh task for claiming linea message * feat: gas cost for linea propagate * test: LH tests for linea * chore: yarn.lock * chore: update yarn.lock * chore: attempt fixing ci/cd for lighthouse * chore: fix broken ci/cd * fix: docker build fix (#5092) * fix: process from root linea (#5084) * feat: claimMessage interface * chore: rebuild * feat: linea process from root * test: linea process from root unit tests * feat: deploy testnet linea hub connector * feat: hub subgraph for linea hub connector * chore: rename --------- Co-authored-by: Liu <57480598+liu-zhipeng@users.noreply.github.com> Co-authored-by: preethamr Co-authored-by: Carlo Mazzaferro * fix: increase gas for linea propagate (#5095) * fix: increase gas for linea propagate * fix: linea process from root args * fix: linea propagate gas margin * testnet prod sync (#5106) * feat: add bware linea goerli subgraph * chore: rebuild graph client * fix: thegraph network id for zksync-era * feat: 5069 testnet onboarding lh agents works (#5074) * feat: hh task for claiming linea message * feat: gas cost for linea propagate * test: LH tests for linea * fix: 5069 testnet onboarding lh agents works (#5080) * feat: hh task for claiming linea message * feat: gas cost for linea propagate * test: LH tests for linea * chore: yarn.lock * chore: update yarn.lock * chore: attempt fixing ci/cd for lighthouse * chore: fix broken ci/cd * fix: docker build fix (#5092) * fix: process from root linea (#5084) * feat: claimMessage interface * chore: rebuild * feat: linea process from root * test: linea process from root unit tests * feat: deploy testnet linea hub connector * feat: hub subgraph for linea hub connector * chore: rename * fix: increase gas for linea propagate (#5095) * fix: increase gas for linea propagate * fix: linea process from root args * fix: linea propagate gas margin --------- Co-authored-by: Liu <57480598+liu-zhipeng@users.noreply.github.com> Co-authored-by: preethamr Co-authored-by: Carlo Mazzaferro * fix: linea process from root args (#5108) * testnet-prod sync (#5109) * feat: add bware linea goerli subgraph * chore: rebuild graph client * fix: thegraph network id for zksync-era * feat: 5069 testnet onboarding lh agents works (#5074) * feat: hh task for claiming linea message * feat: gas cost for linea propagate * test: LH tests for linea * fix: 5069 testnet onboarding lh agents works (#5080) * feat: hh task for claiming linea message * feat: gas cost for linea propagate * test: LH tests for linea * chore: yarn.lock * chore: update yarn.lock * chore: attempt fixing ci/cd for lighthouse * chore: fix broken ci/cd * fix: docker build fix (#5092) * fix: process from root linea (#5084) * feat: claimMessage interface * chore: rebuild * feat: linea process from root * test: linea process from root unit tests * feat: deploy testnet linea hub connector * feat: hub subgraph for linea hub connector * chore: rename * fix: increase gas for linea propagate (#5095) * fix: increase gas for linea propagate * fix: linea process from root args * fix: linea propagate gas margin * fix: linea process from root args (#5108) --------- Co-authored-by: Liu <57480598+liu-zhipeng@users.noreply.github.com> Co-authored-by: preethamr Co-authored-by: Carlo Mazzaferro * Testnet prod sync (#5111) * feat: add bware linea goerli subgraph * chore: rebuild graph client * fix: thegraph network id for zksync-era * feat: 5069 testnet onboarding lh agents works (#5074) * feat: hh task for claiming linea message * feat: gas cost for linea propagate * test: LH tests for linea * fix: 5069 testnet onboarding lh agents works (#5080) * feat: hh task for claiming linea message * feat: gas cost for linea propagate * test: LH tests for linea * chore: yarn.lock * chore: update yarn.lock * chore: attempt fixing ci/cd for lighthouse * chore: fix broken ci/cd * fix: docker build fix (#5092) * fix: process from root linea (#5084) * feat: claimMessage interface * chore: rebuild * feat: linea process from root * test: linea process from root unit tests * feat: deploy testnet linea hub connector * feat: hub subgraph for linea hub connector * chore: rename * fix: increase gas for linea propagate (#5095) * fix: increase gas for linea propagate * fix: linea process from root args * fix: linea propagate gas margin * fix: linea process from root args (#5108) --------- Co-authored-by: preethamr Co-authored-by: sanchaymittal Co-authored-by: Carlo Mazzaferro * fix: improve auto-scaling policies * increase lh prover subs as mem limits * fix: increase router publisher mem allocation * Update issue templates * fix: 5125 sendoutboundroot cron called twice (#5126) * Update issue templates * fix: remove `process.exit()` sendoutboundroot --------- Co-authored-by: alexwhte <37959170+alexwhte@users.noreply.github.com> * fix: 5125 sendoutboundroot cron called twice (#5129) * fix: remove `process.exit()` sendoutboundroot * fix: remove `process.exit` from LH * Testnet prod sync (#5130) * Update issue templates * fix: 5125 sendoutboundroot cron called twice (#5129) * fix: remove `process.exit()` sendoutboundroot * fix: remove `process.exit` from LH --------- Co-authored-by: alexwhte <37959170+alexwhte@users.noreply.github.com> * fix: increase lh mem (#5132) --------- Signed-off-by: hesnicewithit Co-authored-by: Eddie Co-authored-by: wanglonghong Co-authored-by: Rahul Sethuram Co-authored-by: Prathmesh Khandelwal <201952225@iiitvadodara.ac.in> Co-authored-by: Scrub Lord <90161049+Hesnicewithit@users.noreply.github.com> Co-authored-by: Layne Haber Co-authored-by: Maciek Co-authored-by: axlvaz <76085391+axlvaz@users.noreply.github.com> Co-authored-by: preethamr Co-authored-by: Carlo Mazzaferro Co-authored-by: Rahul Sethuram Co-authored-by: sanchaymittal Co-authored-by: Sonmezturk Co-authored-by: alexwhte <37959170+alexwhte@users.noreply.github.com> --- .github/ISSUE_TEMPLATE/enhancement.md | 4 +- .github/ISSUE_TEMPLATE/xERC20.md | 5 +- ops/mainnet/prod/core/config.tf | 12 ++-- ops/mainnet/prod/core/main.tf | 46 +++++++------- ops/testnet/prod/core/config.tf | 6 +- ops/testnet/prod/core/main.tf | 56 +++++++++-------- ops/testnet/staging/core/config.tf | 4 ++ ops/testnet/staging/core/main.tf | 62 ++++++++++--------- .../tasks/processFromRoot/processFromRoot.ts | 5 +- .../src/tasks/propagate/propagate.ts | 4 +- .../lighthouse/src/tasks/prover/prover.ts | 4 +- .../sendOutboundRoot/sendOutboundRoot.ts | 4 +- 12 files changed, 124 insertions(+), 88 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/enhancement.md b/.github/ISSUE_TEMPLATE/enhancement.md index 5b3775e7ca..fc7f8b590f 100644 --- a/.github/ISSUE_TEMPLATE/enhancement.md +++ b/.github/ISSUE_TEMPLATE/enhancement.md @@ -9,10 +9,10 @@ assignees: '' --- ## Problem -_What problem are we solving?_ +_What is the problem or opportunity?_ ## Impact -_Why does this matter? Who does it impact? How much does it impact them? What is the urgency?_ +_Why is it important? Who requested it? Who does it impact? What data do we have to suggest this is a problem? Is there specific timelines that increase urgency?_ ## Proposed Solution _[OPTIONAL] Thoughts on solution design. We could do to solve this problem._ diff --git a/.github/ISSUE_TEMPLATE/xERC20.md b/.github/ISSUE_TEMPLATE/xERC20.md index 29ee1c5fa1..1be121032a 100644 --- a/.github/ISSUE_TEMPLATE/xERC20.md +++ b/.github/ISSUE_TEMPLATE/xERC20.md @@ -2,8 +2,9 @@ name: New xERC20 about: This is for whitelisting a new xERC20. title: "[TOKEN] [Mainnet/Testnet] xERC20 Whitelisting" -labels: "xERC20 🪙" -assignees: "" +labels: "xERC20 \U0001FA99" +assignees: '' + --- ## Token Details diff --git a/ops/mainnet/prod/core/config.tf b/ops/mainnet/prod/core/config.tf index 4f38f78dd7..dfb74a998f 100644 --- a/ops/mainnet/prod/core/config.tf +++ b/ops/mainnet/prod/core/config.tf @@ -21,6 +21,10 @@ locals { { name = "DD_ENV", value = "${var.environment}-${var.stage}" }, { name = "GRAPH_API_KEY", value = var.graph_api_key } ] + router_publisher_env_vars = concat( + local.router_env_vars, [ + { name = "NODE_OPTIONS", value = "--max-old-space-size=1536" } + ]) lighthouse_env_vars = { NXTP_CONFIG = local.local_lighthouse_config, ENVIRONMENT = var.environment, @@ -101,22 +105,22 @@ locals { excludeListFromRelayerFee = ["0x5b9315ce1304df3b2a83b2074cbf849d160642ab"] }, "1869640809" = { - providers = ["https://optimism-mainnet.blastapi.io/${var.blast_key}", "https://rpc.ankr.com/optimism"], + providers = ["https://optimism-mainnet.blastapi.io/${var.blast_key}", "https://rpc.ankr.com/optimism"], excludeListFromRelayerFee = ["0x9D9ce29Dc7812ccb63aB14EA987B52d9aF053Eb3"] }, "1886350457" = { - providers = ["https://polygon-mainnet.blastapi.io/${var.blast_key}", "https://rpc.ankr.com/polygon"], + providers = ["https://polygon-mainnet.blastapi.io/${var.blast_key}", "https://rpc.ankr.com/polygon"], excludeListFromRelayerFee = ["0x83e8Cf4A51035665BAF97DdB0cf03b565AC76B44"] } "1634886255" = { - providers = ["https://arb-mainnet.g.alchemy.com/v2/${var.arbitrum_alchemy_key_0}", "https://rpc.ankr.com/arbitrum"], + providers = ["https://arb-mainnet.g.alchemy.com/v2/${var.arbitrum_alchemy_key_0}", "https://rpc.ankr.com/arbitrum"], excludeListFromRelayerFee = ["0xE6B7aB9EBCfBF1A72E489ff00CdF9C6473ff6224"] } "6450786" = { providers = ["https://bsc-mainnet.blastapi.io/${var.blast_key}", "https://bsc-dataseed1.binance.org", "https://bsc-dataseed2.binance.org", "https://rpc.ankr.com/bsc"] } "6778479" = { - providers = ["https://gnosis-mainnet.blastapi.io/${var.blast_key}", "https://rpc.gnosischain.com", "https://rpc.ankr.com/gnosis"], + providers = ["https://gnosis-mainnet.blastapi.io/${var.blast_key}", "https://rpc.gnosischain.com", "https://rpc.ankr.com/gnosis"], excludeListFromRelayerFee = ["0x6D4D82aE73DC9059Ac83B085b2505e00b5eF8511"] } } diff --git a/ops/mainnet/prod/core/main.tf b/ops/mainnet/prod/core/main.tf index ce96035080..f9cbba953a 100755 --- a/ops/mainnet/prod/core/main.tf +++ b/ops/mainnet/prod/core/main.tf @@ -70,15 +70,15 @@ module "router_publisher" { health_check_path = "/ping" container_port = 8080 loadbalancer_port = 80 - cpu = 512 - memory = 1024 + cpu = 1024 + memory = 2048 instance_count = 1 timeout = 180 ingress_cdir_blocks = ["0.0.0.0/0"] ingress_ipv6_cdir_blocks = [] service_security_groups = flatten([module.network.allow_all_sg, module.network.ecs_task_sg]) cert_arn = var.certificate_arn - container_env_vars = local.router_env_vars + container_env_vars = local.router_publisher_env_vars } module "router_executor" { @@ -209,14 +209,16 @@ module "sequencer_publisher" { } module "sequencer_publisher_auto_scaling" { - source = "../../../modules/auto-scaling" - stage = var.stage - environment = var.environment - domain = var.domain - ecs_service_name = module.sequencer_publisher.service_name - ecs_cluster_name = module.ecs.ecs_cluster_name - min_capacity = 10 - max_capacity = 300 + source = "../../../modules/auto-scaling" + stage = var.stage + environment = var.environment + domain = var.domain + ecs_service_name = module.sequencer_publisher.service_name + ecs_cluster_name = module.ecs.ecs_cluster_name + avg_cpu_utilization_target = 40 + avg_mem_utilization_target = 60 + min_capacity = 10 + max_capacity = 100 } module "sequencer_subscriber" { @@ -249,14 +251,16 @@ module "sequencer_subscriber" { } module "sequencer_subscriber_auto_scaling" { - source = "../../../modules/auto-scaling" - stage = var.stage - environment = var.environment - domain = var.domain - ecs_service_name = module.sequencer_subscriber.service_name - ecs_cluster_name = module.ecs.ecs_cluster_name - min_capacity = 10 - max_capacity = 100 + source = "../../../modules/auto-scaling" + stage = var.stage + environment = var.environment + domain = var.domain + ecs_service_name = module.sequencer_subscriber.service_name + ecs_cluster_name = module.ecs.ecs_cluster_name + avg_cpu_utilization_target = 40 + avg_mem_utilization_target = 60 + min_capacity = 10 + max_capacity = 40 } @@ -345,8 +349,8 @@ module "lighthouse_prover_subscriber_auto_scaling" { ecs_cluster_name = module.ecs.ecs_cluster_name min_capacity = 10 max_capacity = 200 - avg_cpu_utilization_target = 10 - avg_mem_utilization_target = 15 + avg_cpu_utilization_target = 20 + avg_mem_utilization_target = 40 } module "lighthouse_process_from_root_cron" { diff --git a/ops/testnet/prod/core/config.tf b/ops/testnet/prod/core/config.tf index 4e7cf5d3ce..d7ae6f176f 100644 --- a/ops/testnet/prod/core/config.tf +++ b/ops/testnet/prod/core/config.tf @@ -21,6 +21,10 @@ locals { { name = "DD_PROFILING_ENABLED", value = "true" }, { name = "DD_ENV", value = "${var.environment}-${var.stage}" }, ] + router_publisher_env_vars = concat( + local.router_env_vars, [ + { name = "NODE_OPTIONS", value = "--max-old-space-size=1536" } + ]) lighthouse_env_vars = { NXTP_CONFIG = local.local_lighthouse_config, ENVIRONMENT = var.environment, @@ -250,7 +254,7 @@ locals { "1734439522" = { providers = ["https://arb-goerli.g.alchemy.com/v2/${var.arbgoerli_alchemy_key_0}", "https://goerli-rollup.arbitrum.io/rpc"] } - "1668247156" = { + "1668247156" = { providers = ["https://linea-goerli.infura.io/v3/${var.infura_key}", "https://rpc.goerli.linea.build", "${var.linea_node}"] } } diff --git a/ops/testnet/prod/core/main.tf b/ops/testnet/prod/core/main.tf index 87cae9b330..559f3fc01f 100755 --- a/ops/testnet/prod/core/main.tf +++ b/ops/testnet/prod/core/main.tf @@ -71,15 +71,15 @@ module "router_publisher" { health_check_path = "/ping" container_port = 8080 loadbalancer_port = 80 - cpu = 512 - memory = 1024 + cpu = 1024 + memory = 2048 instance_count = 1 timeout = 180 ingress_cdir_blocks = ["0.0.0.0/0"] ingress_ipv6_cdir_blocks = [] service_security_groups = flatten([module.network.allow_all_sg, module.network.ecs_task_sg]) cert_arn = var.certificate_arn_testnet - container_env_vars = local.router_env_vars + container_env_vars = local.router_publisher_env_vars } module "router_executor" { @@ -211,14 +211,16 @@ module "sequencer_publisher" { } module "sequencer_publisher_auto_scaling" { - source = "../../../modules/auto-scaling" - stage = var.stage - environment = var.environment - domain = var.domain - ecs_service_name = module.sequencer_publisher.service_name - ecs_cluster_name = module.ecs.ecs_cluster_name - min_capacity = 10 - max_capacity = 300 + source = "../../../modules/auto-scaling" + stage = var.stage + environment = var.environment + domain = var.domain + ecs_service_name = module.sequencer_publisher.service_name + ecs_cluster_name = module.ecs.ecs_cluster_name + avg_cpu_utilization_target = 40 + avg_mem_utilization_target = 60 + min_capacity = 1 + max_capacity = 100 } module "sequencer_subscriber" { @@ -241,7 +243,7 @@ module "sequencer_subscriber" { loadbalancer_port = 80 cpu = 256 memory = 1024 - instance_count = 10 + instance_count = 1 timeout = 180 ingress_cdir_blocks = ["0.0.0.0/0"] ingress_ipv6_cdir_blocks = [] @@ -251,14 +253,16 @@ module "sequencer_subscriber" { } module "sequencer_subscriber_auto_scaling" { - source = "../../../modules/auto-scaling" - stage = var.stage - environment = var.environment - domain = var.domain - ecs_service_name = module.sequencer_subscriber.service_name - ecs_cluster_name = module.ecs.ecs_cluster_name - min_capacity = 10 - max_capacity = 100 + source = "../../../modules/auto-scaling" + stage = var.stage + environment = var.environment + domain = var.domain + ecs_service_name = module.sequencer_subscriber.service_name + ecs_cluster_name = module.ecs.ecs_cluster_name + avg_cpu_utilization_target = 40 + avg_mem_utilization_target = 60 + min_capacity = 1 + max_capacity = 40 } @@ -329,7 +333,7 @@ module "lighthouse_prover_subscriber" { loadbalancer_port = 80 cpu = 4096 memory = 8192 - instance_count = 10 + instance_count = 5 timeout = 290 ingress_cdir_blocks = ["0.0.0.0/0"] ingress_ipv6_cdir_blocks = [] @@ -344,10 +348,10 @@ module "lighthouse_prover_subscriber_auto_scaling" { domain = var.domain ecs_service_name = module.lighthouse_prover_subscriber.service_name ecs_cluster_name = module.ecs.ecs_cluster_name - min_capacity = 10 + min_capacity = 5 max_capacity = 200 - avg_cpu_utilization_target = 10 - avg_mem_utilization_target = 15 + avg_cpu_utilization_target = 20 + avg_mem_utilization_target = 40 } module "lighthouse_process_from_root_cron" { @@ -359,7 +363,7 @@ module "lighthouse_process_from_root_cron" { stage = var.stage container_env_vars = merge(local.lighthouse_env_vars, { LIGHTHOUSE_SERVICE = "process" }) schedule_expression = "rate(5 minutes)" - memory_size = 512 + memory_size = 1536 } @@ -384,7 +388,7 @@ module "lighthouse_sendoutboundroot_cron" { stage = var.stage container_env_vars = merge(local.lighthouse_env_vars, { LIGHTHOUSE_SERVICE = "sendoutboundroot" }) schedule_expression = "rate(30 minutes)" - memory_size = 512 + memory_size = 2048 } diff --git a/ops/testnet/staging/core/config.tf b/ops/testnet/staging/core/config.tf index c5f844ae3e..85f961029f 100644 --- a/ops/testnet/staging/core/config.tf +++ b/ops/testnet/staging/core/config.tf @@ -19,6 +19,10 @@ locals { { name = "DD_PROFILING_ENABLED", value = "true" }, { name = "DD_ENV", value = "${var.environment}-${var.stage}" }, ] + router_publisher_env_vars = concat( + local.router_env_vars, [ + { name = "NODE_OPTIONS", value = "--max-old-space-size=1536" } + ]) lighthouse_env_vars = { NXTP_CONFIG = local.local_lighthouse_config, ENVIRONMENT = var.environment, diff --git a/ops/testnet/staging/core/main.tf b/ops/testnet/staging/core/main.tf index d4be6080ac..1abb8e5011 100755 --- a/ops/testnet/staging/core/main.tf +++ b/ops/testnet/staging/core/main.tf @@ -68,15 +68,15 @@ module "router_publisher" { health_check_path = "/ping" container_port = 8080 loadbalancer_port = 80 - cpu = 512 - memory = 1024 + cpu = 1024 + memory = 2048 instance_count = 1 timeout = 180 ingress_cdir_blocks = ["0.0.0.0/0"] ingress_ipv6_cdir_blocks = [] service_security_groups = flatten([module.network.allow_all_sg, module.network.ecs_task_sg]) cert_arn = var.certificate_arn_testnet - container_env_vars = local.router_env_vars + container_env_vars = local.router_publisher_env_vars } module "router_executor" { @@ -209,14 +209,16 @@ module "sequencer_publisher" { } module "sequencer_publisher_auto_scaling" { - source = "../../../modules/auto-scaling" - stage = var.stage - environment = var.environment - domain = var.domain - ecs_service_name = module.sequencer_publisher.service_name - ecs_cluster_name = module.ecs.ecs_cluster_name - min_capacity = 10 - max_capacity = 300 + source = "../../../modules/auto-scaling" + stage = var.stage + environment = var.environment + domain = var.domain + ecs_service_name = module.sequencer_publisher.service_name + ecs_cluster_name = module.ecs.ecs_cluster_name + avg_cpu_utilization_target = 40 + avg_mem_utilization_target = 60 + min_capacity = 1 + max_capacity = 30 } module "sequencer_subscriber" { @@ -239,7 +241,7 @@ module "sequencer_subscriber" { loadbalancer_port = 80 cpu = 4096 memory = 8192 - instance_count = 5 + instance_count = 1 timeout = 180 ingress_cdir_blocks = ["0.0.0.0/0"] ingress_ipv6_cdir_blocks = [] @@ -249,14 +251,16 @@ module "sequencer_subscriber" { } module "sequencer_subscriber_auto_scaling" { - source = "../../../modules/auto-scaling" - stage = var.stage - environment = var.environment - domain = var.domain - ecs_service_name = module.sequencer_subscriber.service_name - ecs_cluster_name = module.ecs.ecs_cluster_name - min_capacity = 10 - max_capacity = 300 + source = "../../../modules/auto-scaling" + stage = var.stage + environment = var.environment + domain = var.domain + ecs_service_name = module.sequencer_subscriber.service_name + ecs_cluster_name = module.ecs.ecs_cluster_name + avg_cpu_utilization_target = 40 + avg_mem_utilization_target = 60 + min_capacity = 1 + max_capacity = 10 } module "sequencer_web3signer" { @@ -318,14 +322,16 @@ module "lighthouse_prover_subscriber" { } module "lighthouse_prover_subscriber_auto_scaling" { - source = "../../../modules/auto-scaling" - stage = var.stage - environment = var.environment - domain = var.domain - ecs_service_name = module.lighthouse_prover_subscriber.service_name - ecs_cluster_name = module.ecs.ecs_cluster_name - min_capacity = 10 - max_capacity = 300 + source = "../../../modules/auto-scaling" + stage = var.stage + environment = var.environment + domain = var.domain + ecs_service_name = module.lighthouse_prover_subscriber.service_name + ecs_cluster_name = module.ecs.ecs_cluster_name + min_capacity = 2 + max_capacity = 30 + avg_cpu_utilization_target = 20 + avg_mem_utilization_target = 40 } module "lighthouse_prover_cron" { diff --git a/packages/agents/lighthouse/src/tasks/processFromRoot/processFromRoot.ts b/packages/agents/lighthouse/src/tasks/processFromRoot/processFromRoot.ts index 26cd7c987f..79d6df4c9e 100644 --- a/packages/agents/lighthouse/src/tasks/processFromRoot/processFromRoot.ts +++ b/packages/agents/lighthouse/src/tasks/processFromRoot/processFromRoot.ts @@ -87,6 +87,9 @@ export const makeProcessFromRoot = async (config: NxtpLighthouseConfig, chainDat console.error("Error starting processor. Sad! :(", e); } finally { await closeDatabase(); - process.exit(); + + context.logger.info("Process from root complete!!!", requestContext, methodContext, { + chains: [...Object.keys(context.config.chains)], + }); } }; diff --git a/packages/agents/lighthouse/src/tasks/propagate/propagate.ts b/packages/agents/lighthouse/src/tasks/propagate/propagate.ts index 018fc28415..1a5ef8a288 100644 --- a/packages/agents/lighthouse/src/tasks/propagate/propagate.ts +++ b/packages/agents/lighthouse/src/tasks/propagate/propagate.ts @@ -91,6 +91,8 @@ export const makePropagate = async (config: NxtpLighthouseConfig, chainData: Map } catch (e: unknown) { console.error("Error starting Propagate task. Sad! :(", e); } finally { - process.exit(); + context.logger.info("Propagate task complete!", requestContext, methodContext, { + chains: [...Object.keys(context.config.chains)], + }); } }; diff --git a/packages/agents/lighthouse/src/tasks/prover/prover.ts b/packages/agents/lighthouse/src/tasks/prover/prover.ts index ccb2fff980..be36973977 100644 --- a/packages/agents/lighthouse/src/tasks/prover/prover.ts +++ b/packages/agents/lighthouse/src/tasks/prover/prover.ts @@ -16,6 +16,7 @@ import { acquireLock, prefetch, releaseLock } from "./operations/publisher"; const context: ProverContext = {} as any; export const getContext = () => context; export const makeProverPublisher = async (config: NxtpLighthouseConfig, chainData: Map) => { + const { requestContext, methodContext } = createLoggingContext(makeProverPublisher.name); try { await makeProver(config, chainData); if (!(await acquireLock())) throw new Error("Could not acquire lock"); @@ -30,7 +31,8 @@ export const makeProverPublisher = async (config: NxtpLighthouseConfig, chainDat console.error("Error starting Prover-Publisher. Sad! :(", e); } finally { await closeDatabase(); - process.exit(); + + context.logger.info("Prover complete!!!", requestContext, methodContext, {}); } }; diff --git a/packages/agents/lighthouse/src/tasks/sendOutboundRoot/sendOutboundRoot.ts b/packages/agents/lighthouse/src/tasks/sendOutboundRoot/sendOutboundRoot.ts index 6bd55a1c98..6c0c84383b 100644 --- a/packages/agents/lighthouse/src/tasks/sendOutboundRoot/sendOutboundRoot.ts +++ b/packages/agents/lighthouse/src/tasks/sendOutboundRoot/sendOutboundRoot.ts @@ -91,6 +91,8 @@ export const makeSendOutboundRoot = async (config: NxtpLighthouseConfig, chainDa } catch (e: unknown) { console.error("Error starting SendOutboundRoot task. Sad! :(", e); } finally { - process.exit(); + context.logger.info("SendOutboundRoot task complete!!!", requestContext, methodContext, { + chains: [...Object.keys(context.config.chains)], + }); } };