diff --git a/.github/ISSUE_TEMPLATE/enhancement.md b/.github/ISSUE_TEMPLATE/enhancement.md index 5b3775e7ca..fc7f8b590f 100644 --- a/.github/ISSUE_TEMPLATE/enhancement.md +++ b/.github/ISSUE_TEMPLATE/enhancement.md @@ -9,10 +9,10 @@ assignees: '' --- ## Problem -_What problem are we solving?_ +_What is the problem or opportunity?_ ## Impact -_Why does this matter? Who does it impact? How much does it impact them? What is the urgency?_ +_Why is it important? Who requested it? Who does it impact? What data do we have to suggest this is a problem? Is there specific timelines that increase urgency?_ ## Proposed Solution _[OPTIONAL] Thoughts on solution design. We could do to solve this problem._ diff --git a/.github/ISSUE_TEMPLATE/xERC20.md b/.github/ISSUE_TEMPLATE/xERC20.md index 29ee1c5fa1..1be121032a 100644 --- a/.github/ISSUE_TEMPLATE/xERC20.md +++ b/.github/ISSUE_TEMPLATE/xERC20.md @@ -2,8 +2,9 @@ name: New xERC20 about: This is for whitelisting a new xERC20. title: "[TOKEN] [Mainnet/Testnet] xERC20 Whitelisting" -labels: "xERC20 🪙" -assignees: "" +labels: "xERC20 \U0001FA99" +assignees: '' + --- ## Token Details diff --git a/ops/mainnet/prod/core/config.tf b/ops/mainnet/prod/core/config.tf index 4f38f78dd7..dfb74a998f 100644 --- a/ops/mainnet/prod/core/config.tf +++ b/ops/mainnet/prod/core/config.tf @@ -21,6 +21,10 @@ locals { { name = "DD_ENV", value = "${var.environment}-${var.stage}" }, { name = "GRAPH_API_KEY", value = var.graph_api_key } ] + router_publisher_env_vars = concat( + local.router_env_vars, [ + { name = "NODE_OPTIONS", value = "--max-old-space-size=1536" } + ]) lighthouse_env_vars = { NXTP_CONFIG = local.local_lighthouse_config, ENVIRONMENT = var.environment, @@ -101,22 +105,22 @@ locals { excludeListFromRelayerFee = ["0x5b9315ce1304df3b2a83b2074cbf849d160642ab"] }, "1869640809" = { - providers = ["https://optimism-mainnet.blastapi.io/${var.blast_key}", "https://rpc.ankr.com/optimism"], + providers = ["https://optimism-mainnet.blastapi.io/${var.blast_key}", "https://rpc.ankr.com/optimism"], excludeListFromRelayerFee = ["0x9D9ce29Dc7812ccb63aB14EA987B52d9aF053Eb3"] }, "1886350457" = { - providers = ["https://polygon-mainnet.blastapi.io/${var.blast_key}", "https://rpc.ankr.com/polygon"], + providers = ["https://polygon-mainnet.blastapi.io/${var.blast_key}", "https://rpc.ankr.com/polygon"], excludeListFromRelayerFee = ["0x83e8Cf4A51035665BAF97DdB0cf03b565AC76B44"] } "1634886255" = { - providers = ["https://arb-mainnet.g.alchemy.com/v2/${var.arbitrum_alchemy_key_0}", "https://rpc.ankr.com/arbitrum"], + providers = ["https://arb-mainnet.g.alchemy.com/v2/${var.arbitrum_alchemy_key_0}", "https://rpc.ankr.com/arbitrum"], excludeListFromRelayerFee = ["0xE6B7aB9EBCfBF1A72E489ff00CdF9C6473ff6224"] } "6450786" = { providers = ["https://bsc-mainnet.blastapi.io/${var.blast_key}", "https://bsc-dataseed1.binance.org", "https://bsc-dataseed2.binance.org", "https://rpc.ankr.com/bsc"] } "6778479" = { - providers = ["https://gnosis-mainnet.blastapi.io/${var.blast_key}", "https://rpc.gnosischain.com", "https://rpc.ankr.com/gnosis"], + providers = ["https://gnosis-mainnet.blastapi.io/${var.blast_key}", "https://rpc.gnosischain.com", "https://rpc.ankr.com/gnosis"], excludeListFromRelayerFee = ["0x6D4D82aE73DC9059Ac83B085b2505e00b5eF8511"] } } diff --git a/ops/mainnet/prod/core/main.tf b/ops/mainnet/prod/core/main.tf index ce96035080..f9cbba953a 100755 --- a/ops/mainnet/prod/core/main.tf +++ b/ops/mainnet/prod/core/main.tf @@ -70,15 +70,15 @@ module "router_publisher" { health_check_path = "/ping" container_port = 8080 loadbalancer_port = 80 - cpu = 512 - memory = 1024 + cpu = 1024 + memory = 2048 instance_count = 1 timeout = 180 ingress_cdir_blocks = ["0.0.0.0/0"] ingress_ipv6_cdir_blocks = [] service_security_groups = flatten([module.network.allow_all_sg, module.network.ecs_task_sg]) cert_arn = var.certificate_arn - container_env_vars = local.router_env_vars + container_env_vars = local.router_publisher_env_vars } module "router_executor" { @@ -209,14 +209,16 @@ module "sequencer_publisher" { } module "sequencer_publisher_auto_scaling" { - source = "../../../modules/auto-scaling" - stage = var.stage - environment = var.environment - domain = var.domain - ecs_service_name = module.sequencer_publisher.service_name - ecs_cluster_name = module.ecs.ecs_cluster_name - min_capacity = 10 - max_capacity = 300 + source = "../../../modules/auto-scaling" + stage = var.stage + environment = var.environment + domain = var.domain + ecs_service_name = module.sequencer_publisher.service_name + ecs_cluster_name = module.ecs.ecs_cluster_name + avg_cpu_utilization_target = 40 + avg_mem_utilization_target = 60 + min_capacity = 10 + max_capacity = 100 } module "sequencer_subscriber" { @@ -249,14 +251,16 @@ module "sequencer_subscriber" { } module "sequencer_subscriber_auto_scaling" { - source = "../../../modules/auto-scaling" - stage = var.stage - environment = var.environment - domain = var.domain - ecs_service_name = module.sequencer_subscriber.service_name - ecs_cluster_name = module.ecs.ecs_cluster_name - min_capacity = 10 - max_capacity = 100 + source = "../../../modules/auto-scaling" + stage = var.stage + environment = var.environment + domain = var.domain + ecs_service_name = module.sequencer_subscriber.service_name + ecs_cluster_name = module.ecs.ecs_cluster_name + avg_cpu_utilization_target = 40 + avg_mem_utilization_target = 60 + min_capacity = 10 + max_capacity = 40 } @@ -345,8 +349,8 @@ module "lighthouse_prover_subscriber_auto_scaling" { ecs_cluster_name = module.ecs.ecs_cluster_name min_capacity = 10 max_capacity = 200 - avg_cpu_utilization_target = 10 - avg_mem_utilization_target = 15 + avg_cpu_utilization_target = 20 + avg_mem_utilization_target = 40 } module "lighthouse_process_from_root_cron" { diff --git a/ops/testnet/prod/core/config.tf b/ops/testnet/prod/core/config.tf index 4e7cf5d3ce..d7ae6f176f 100644 --- a/ops/testnet/prod/core/config.tf +++ b/ops/testnet/prod/core/config.tf @@ -21,6 +21,10 @@ locals { { name = "DD_PROFILING_ENABLED", value = "true" }, { name = "DD_ENV", value = "${var.environment}-${var.stage}" }, ] + router_publisher_env_vars = concat( + local.router_env_vars, [ + { name = "NODE_OPTIONS", value = "--max-old-space-size=1536" } + ]) lighthouse_env_vars = { NXTP_CONFIG = local.local_lighthouse_config, ENVIRONMENT = var.environment, @@ -250,7 +254,7 @@ locals { "1734439522" = { providers = ["https://arb-goerli.g.alchemy.com/v2/${var.arbgoerli_alchemy_key_0}", "https://goerli-rollup.arbitrum.io/rpc"] } - "1668247156" = { + "1668247156" = { providers = ["https://linea-goerli.infura.io/v3/${var.infura_key}", "https://rpc.goerli.linea.build", "${var.linea_node}"] } } diff --git a/ops/testnet/prod/core/main.tf b/ops/testnet/prod/core/main.tf index 87cae9b330..559f3fc01f 100755 --- a/ops/testnet/prod/core/main.tf +++ b/ops/testnet/prod/core/main.tf @@ -71,15 +71,15 @@ module "router_publisher" { health_check_path = "/ping" container_port = 8080 loadbalancer_port = 80 - cpu = 512 - memory = 1024 + cpu = 1024 + memory = 2048 instance_count = 1 timeout = 180 ingress_cdir_blocks = ["0.0.0.0/0"] ingress_ipv6_cdir_blocks = [] service_security_groups = flatten([module.network.allow_all_sg, module.network.ecs_task_sg]) cert_arn = var.certificate_arn_testnet - container_env_vars = local.router_env_vars + container_env_vars = local.router_publisher_env_vars } module "router_executor" { @@ -211,14 +211,16 @@ module "sequencer_publisher" { } module "sequencer_publisher_auto_scaling" { - source = "../../../modules/auto-scaling" - stage = var.stage - environment = var.environment - domain = var.domain - ecs_service_name = module.sequencer_publisher.service_name - ecs_cluster_name = module.ecs.ecs_cluster_name - min_capacity = 10 - max_capacity = 300 + source = "../../../modules/auto-scaling" + stage = var.stage + environment = var.environment + domain = var.domain + ecs_service_name = module.sequencer_publisher.service_name + ecs_cluster_name = module.ecs.ecs_cluster_name + avg_cpu_utilization_target = 40 + avg_mem_utilization_target = 60 + min_capacity = 1 + max_capacity = 100 } module "sequencer_subscriber" { @@ -241,7 +243,7 @@ module "sequencer_subscriber" { loadbalancer_port = 80 cpu = 256 memory = 1024 - instance_count = 10 + instance_count = 1 timeout = 180 ingress_cdir_blocks = ["0.0.0.0/0"] ingress_ipv6_cdir_blocks = [] @@ -251,14 +253,16 @@ module "sequencer_subscriber" { } module "sequencer_subscriber_auto_scaling" { - source = "../../../modules/auto-scaling" - stage = var.stage - environment = var.environment - domain = var.domain - ecs_service_name = module.sequencer_subscriber.service_name - ecs_cluster_name = module.ecs.ecs_cluster_name - min_capacity = 10 - max_capacity = 100 + source = "../../../modules/auto-scaling" + stage = var.stage + environment = var.environment + domain = var.domain + ecs_service_name = module.sequencer_subscriber.service_name + ecs_cluster_name = module.ecs.ecs_cluster_name + avg_cpu_utilization_target = 40 + avg_mem_utilization_target = 60 + min_capacity = 1 + max_capacity = 40 } @@ -329,7 +333,7 @@ module "lighthouse_prover_subscriber" { loadbalancer_port = 80 cpu = 4096 memory = 8192 - instance_count = 10 + instance_count = 5 timeout = 290 ingress_cdir_blocks = ["0.0.0.0/0"] ingress_ipv6_cdir_blocks = [] @@ -344,10 +348,10 @@ module "lighthouse_prover_subscriber_auto_scaling" { domain = var.domain ecs_service_name = module.lighthouse_prover_subscriber.service_name ecs_cluster_name = module.ecs.ecs_cluster_name - min_capacity = 10 + min_capacity = 5 max_capacity = 200 - avg_cpu_utilization_target = 10 - avg_mem_utilization_target = 15 + avg_cpu_utilization_target = 20 + avg_mem_utilization_target = 40 } module "lighthouse_process_from_root_cron" { @@ -359,7 +363,7 @@ module "lighthouse_process_from_root_cron" { stage = var.stage container_env_vars = merge(local.lighthouse_env_vars, { LIGHTHOUSE_SERVICE = "process" }) schedule_expression = "rate(5 minutes)" - memory_size = 512 + memory_size = 1536 } @@ -384,7 +388,7 @@ module "lighthouse_sendoutboundroot_cron" { stage = var.stage container_env_vars = merge(local.lighthouse_env_vars, { LIGHTHOUSE_SERVICE = "sendoutboundroot" }) schedule_expression = "rate(30 minutes)" - memory_size = 512 + memory_size = 2048 } diff --git a/ops/testnet/staging/core/config.tf b/ops/testnet/staging/core/config.tf index c5f844ae3e..85f961029f 100644 --- a/ops/testnet/staging/core/config.tf +++ b/ops/testnet/staging/core/config.tf @@ -19,6 +19,10 @@ locals { { name = "DD_PROFILING_ENABLED", value = "true" }, { name = "DD_ENV", value = "${var.environment}-${var.stage}" }, ] + router_publisher_env_vars = concat( + local.router_env_vars, [ + { name = "NODE_OPTIONS", value = "--max-old-space-size=1536" } + ]) lighthouse_env_vars = { NXTP_CONFIG = local.local_lighthouse_config, ENVIRONMENT = var.environment, diff --git a/ops/testnet/staging/core/main.tf b/ops/testnet/staging/core/main.tf index d4be6080ac..1abb8e5011 100755 --- a/ops/testnet/staging/core/main.tf +++ b/ops/testnet/staging/core/main.tf @@ -68,15 +68,15 @@ module "router_publisher" { health_check_path = "/ping" container_port = 8080 loadbalancer_port = 80 - cpu = 512 - memory = 1024 + cpu = 1024 + memory = 2048 instance_count = 1 timeout = 180 ingress_cdir_blocks = ["0.0.0.0/0"] ingress_ipv6_cdir_blocks = [] service_security_groups = flatten([module.network.allow_all_sg, module.network.ecs_task_sg]) cert_arn = var.certificate_arn_testnet - container_env_vars = local.router_env_vars + container_env_vars = local.router_publisher_env_vars } module "router_executor" { @@ -209,14 +209,16 @@ module "sequencer_publisher" { } module "sequencer_publisher_auto_scaling" { - source = "../../../modules/auto-scaling" - stage = var.stage - environment = var.environment - domain = var.domain - ecs_service_name = module.sequencer_publisher.service_name - ecs_cluster_name = module.ecs.ecs_cluster_name - min_capacity = 10 - max_capacity = 300 + source = "../../../modules/auto-scaling" + stage = var.stage + environment = var.environment + domain = var.domain + ecs_service_name = module.sequencer_publisher.service_name + ecs_cluster_name = module.ecs.ecs_cluster_name + avg_cpu_utilization_target = 40 + avg_mem_utilization_target = 60 + min_capacity = 1 + max_capacity = 30 } module "sequencer_subscriber" { @@ -239,7 +241,7 @@ module "sequencer_subscriber" { loadbalancer_port = 80 cpu = 4096 memory = 8192 - instance_count = 5 + instance_count = 1 timeout = 180 ingress_cdir_blocks = ["0.0.0.0/0"] ingress_ipv6_cdir_blocks = [] @@ -249,14 +251,16 @@ module "sequencer_subscriber" { } module "sequencer_subscriber_auto_scaling" { - source = "../../../modules/auto-scaling" - stage = var.stage - environment = var.environment - domain = var.domain - ecs_service_name = module.sequencer_subscriber.service_name - ecs_cluster_name = module.ecs.ecs_cluster_name - min_capacity = 10 - max_capacity = 300 + source = "../../../modules/auto-scaling" + stage = var.stage + environment = var.environment + domain = var.domain + ecs_service_name = module.sequencer_subscriber.service_name + ecs_cluster_name = module.ecs.ecs_cluster_name + avg_cpu_utilization_target = 40 + avg_mem_utilization_target = 60 + min_capacity = 1 + max_capacity = 10 } module "sequencer_web3signer" { @@ -318,14 +322,16 @@ module "lighthouse_prover_subscriber" { } module "lighthouse_prover_subscriber_auto_scaling" { - source = "../../../modules/auto-scaling" - stage = var.stage - environment = var.environment - domain = var.domain - ecs_service_name = module.lighthouse_prover_subscriber.service_name - ecs_cluster_name = module.ecs.ecs_cluster_name - min_capacity = 10 - max_capacity = 300 + source = "../../../modules/auto-scaling" + stage = var.stage + environment = var.environment + domain = var.domain + ecs_service_name = module.lighthouse_prover_subscriber.service_name + ecs_cluster_name = module.ecs.ecs_cluster_name + min_capacity = 2 + max_capacity = 30 + avg_cpu_utilization_target = 20 + avg_mem_utilization_target = 40 } module "lighthouse_prover_cron" { diff --git a/packages/agents/lighthouse/src/tasks/processFromRoot/processFromRoot.ts b/packages/agents/lighthouse/src/tasks/processFromRoot/processFromRoot.ts index 26cd7c987f..79d6df4c9e 100644 --- a/packages/agents/lighthouse/src/tasks/processFromRoot/processFromRoot.ts +++ b/packages/agents/lighthouse/src/tasks/processFromRoot/processFromRoot.ts @@ -87,6 +87,9 @@ export const makeProcessFromRoot = async (config: NxtpLighthouseConfig, chainDat console.error("Error starting processor. Sad! :(", e); } finally { await closeDatabase(); - process.exit(); + + context.logger.info("Process from root complete!!!", requestContext, methodContext, { + chains: [...Object.keys(context.config.chains)], + }); } }; diff --git a/packages/agents/lighthouse/src/tasks/propagate/propagate.ts b/packages/agents/lighthouse/src/tasks/propagate/propagate.ts index 018fc28415..1a5ef8a288 100644 --- a/packages/agents/lighthouse/src/tasks/propagate/propagate.ts +++ b/packages/agents/lighthouse/src/tasks/propagate/propagate.ts @@ -91,6 +91,8 @@ export const makePropagate = async (config: NxtpLighthouseConfig, chainData: Map } catch (e: unknown) { console.error("Error starting Propagate task. Sad! :(", e); } finally { - process.exit(); + context.logger.info("Propagate task complete!", requestContext, methodContext, { + chains: [...Object.keys(context.config.chains)], + }); } }; diff --git a/packages/agents/lighthouse/src/tasks/prover/prover.ts b/packages/agents/lighthouse/src/tasks/prover/prover.ts index ccb2fff980..be36973977 100644 --- a/packages/agents/lighthouse/src/tasks/prover/prover.ts +++ b/packages/agents/lighthouse/src/tasks/prover/prover.ts @@ -16,6 +16,7 @@ import { acquireLock, prefetch, releaseLock } from "./operations/publisher"; const context: ProverContext = {} as any; export const getContext = () => context; export const makeProverPublisher = async (config: NxtpLighthouseConfig, chainData: Map) => { + const { requestContext, methodContext } = createLoggingContext(makeProverPublisher.name); try { await makeProver(config, chainData); if (!(await acquireLock())) throw new Error("Could not acquire lock"); @@ -30,7 +31,8 @@ export const makeProverPublisher = async (config: NxtpLighthouseConfig, chainDat console.error("Error starting Prover-Publisher. Sad! :(", e); } finally { await closeDatabase(); - process.exit(); + + context.logger.info("Prover complete!!!", requestContext, methodContext, {}); } }; diff --git a/packages/agents/lighthouse/src/tasks/sendOutboundRoot/sendOutboundRoot.ts b/packages/agents/lighthouse/src/tasks/sendOutboundRoot/sendOutboundRoot.ts index 6bd55a1c98..6c0c84383b 100644 --- a/packages/agents/lighthouse/src/tasks/sendOutboundRoot/sendOutboundRoot.ts +++ b/packages/agents/lighthouse/src/tasks/sendOutboundRoot/sendOutboundRoot.ts @@ -91,6 +91,8 @@ export const makeSendOutboundRoot = async (config: NxtpLighthouseConfig, chainDa } catch (e: unknown) { console.error("Error starting SendOutboundRoot task. Sad! :(", e); } finally { - process.exit(); + context.logger.info("SendOutboundRoot task complete!!!", requestContext, methodContext, { + chains: [...Object.keys(context.config.chains)], + }); } };