From 1f882051aa4b175e9de1c2ed82c55df2f2294810 Mon Sep 17 00:00:00 2001 From: Arnau Bennassar Date: Fri, 25 Oct 2024 18:37:19 +0200 Subject: [PATCH 01/33] Feat/doc non evm integration (#139) * WIP * link non evm integrations * wip * gpt rewrite * Update docs/SUMMARY.md Co-authored-by: Dra Murphy <149679879+kmurphypolygon@users.noreply.github.com> * Apply suggestions from code review Co-authored-by: Dra Murphy <149679879+kmurphypolygon@users.noreply.github.com> --------- Co-authored-by: Dra Murphy <149679879+kmurphypolygon@users.noreply.github.com> --- docs/SUMMARY.md | 1 + docs/non_evm_integration.md | 69 +++++++++++++++++++++++++++++++++++++ 2 files changed, 70 insertions(+) create mode 100644 docs/non_evm_integration.md diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 04254646..3bc96318 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -3,3 +3,4 @@ - [Getting Started](./getting_started.md) - [Local Debug](./local_debug.md) - [DA Integration](./da_integration.md) +- [Non-EVM integrations](./non_evm_integration.md) diff --git a/docs/non_evm_integration.md b/docs/non_evm_integration.md new file mode 100644 index 00000000..ed304ea5 --- /dev/null +++ b/docs/non_evm_integration.md @@ -0,0 +1,69 @@ +# Integrating non-EVM systems + +This guide explains how to connect a third-party execution environment to the AggLayer using the CDK. + +## Important note + +The following information is experimental, and there aren't any working examples of non-EVM integrations with the AggLayer yet. While we know what needs to be done conceptually, the implementation details are likely to evolve. Think of this as a rough overview of the effort involved, rather than a step-by-step guide towards a production deployment. + +## Key Concepts + +Any system (chain or not chain) should be able to interact with the [unified LxLy bridge](https://docs.polygon.technology/zkEVM/architecture/unified-LxLy) and settle using the [AggLayer](https://docs.polygon.technology/learn/agglayer/overview/); especially when using the [Pessimistic Proof](https://docs.polygon.technology/learn/agglayer/pessimistic_proof/) option. Support for additional proofs, such as consensus, execution, or data availability are planned for the future. But, for now, this guide is based solely on using the Pessimistic Proof for settlement. + +The CDK client handles the integration with both the unified LxLy bridge and AggLayer. Think of it as an SDK to bring your project into the AggLayer ecosystem. You'll need to write some custom code in an adapter/plugin style so that the CDK client can connect with your service. + +In some cases, you might need to write code in `Go`. When that happens, the code should be in a separate repo and imported into the CDK as a dependency. The goal is to provide implementations that can interact with the *smart contracts* of the system being integrated, allowing the CDK client to reuse the same logic across different systems. Basically, you’ll need to create some *adapters* for the new system, while the existing code handles the rest. + +## Components for integration + +### Smart contracts + +For EVM-based integrations, there are two relevant smart contracts: + +- [Global exit root](https://github.com/0xPolygonHermez/zkevm-contracts/blob/feature/sovereign-bridge/contracts/v2/sovereignChains/GlobalExitRootManagerL2SovereignChain.sol) +- [Bridge](https://github.com/0xPolygonHermez/zkevm-contracts/blob/feature/sovereign-bridge/contracts/v2/sovereignChains/BridgeL2SovereignChain.sol) + +The integrated system needs to implement similar functionality. It doesn't have to be a smart contract per se, and it doesn't need to be split into two parts, but it should perform the functions that we list here: + +- Bridge assets and messages to other networks. +- Handle incoming asset/message claims. +- Export local exit roots (a hash needed for other networks to claim assets). +- Import global exit roots (a hash needed for processing bridge claims). + +### AggOracle + +This component imports global exit roots into the smart contract(s). It should be implemented as a `Go` package, using the [EVM example](../aggoracle/chaingersender/evm.go) as a reference. It should implement the `ChainSender` interface defined [here](../aggoracle/oracle.go). + +### BridgeSync + +BridgeSync synchronizes information about bridges and claims originating from the L2 service attached to the CDK client. In other words, it monitors what's happening with the bridge smart contract, collects the necessary data for interacting with the AggLayer, and feeds the bridge service to enable claims on destination networks. + +> **Heads up:** These interfaces may change. + +To process events from non-EVM systems, you'll need a `downloader` and `driver`. The current setup needs some tweaks to support custom implementations. In short, you need to work with the [`Processor`](../bridgesync/processor.go), particularly the `ProcessorInterface` found [here](../sync/driver.go). The `Events` in `Block` are just interfaces, which should be parsed as `Event` structs defined in the [`Processor`](../bridgesync/processor.go). + +### Claim sponsor + +This component performs claims on behalf of users, which is crucial for systems with "gas" fees (transaction costs). Without it, gas-based systems could face a chicken/egg situation: How can users pay for a claim if they need a previous claim to get the funds to pay for it? + +The claim sponsor is optional and may not be needed in some setups. The [bridge RPC](../rpc/bridge.go) includes a config parameter to enable or disable it. To implement a claim sponsor that can perform claim transactions on the bridge smart contract, you'll need to implement the `ClaimSender` interface, defined [here](../claimsponsor/claimsponsor.go). + +### Last GER sync + +> **Warning:** These interfaces may also change. + +This component tracks which global exit roots have been imported. It helps the bridge service know when incoming bridges are ready to be claimed. The work needed is similar to that for the bridge sync: Implement the [`ProcessorInterface`](../sync/driver.go), with events of type `Event` defined [here](../lastgersync/processor.go). + +## Additional considerations + +### Bridge + +Once all components are implemented, the network should be connected to the unified LxLy bridge. However, keep in mind: + +- Outgoing bridges should work with current tools and UIs, but incoming bridges may not. When using the claim sponsor, things should just work. However, the claim sponsor is optional... The point being that the existing UIs are built to send EVM transactions to make the claim in the absence of claim sponsor. So any claim interaction beyond the auto-claim functionality will need UIs and tooling that are out of the sope of the CDK. +- Bridging assets/messages to another network is specific to the integrated system. You'll need to create mechanisms to interact with the *bridge smart contract* of your service for these actions. +- We’re moving towards an *in-CDK* bridge service (spec [here](https://hackmd.io/0vA-XU2BRHmH3Ab0j4ouZw)), replacing the current separate service ([here](https://github.com/0xPolygonHermez/zkevm-bridge-service)). There's no stable API yet, and SDKs/UIs are still in development. + +### AggLayer + +AggLayer integration will work once the components are ready, but initially, it will only support Pessimistic Proof. Later updates will add more security features like execution proofs, consensus proofs, data availability, and forced transactions. These will be optional, while Pessimistic Proof will remain mandatory. \ No newline at end of file From 04b9854dd6b0267cf9bd782e94d011e9da5d647f Mon Sep 17 00:00:00 2001 From: Arnau Bennassar Date: Fri, 25 Oct 2024 19:02:38 +0200 Subject: [PATCH 02/33] Feature/doc rpc (#83) * WIP * WIP * feat: add openrpc.json doc file for the rpc * pr reqs --- rpc/bridge.go | 4 +- rpc/openrpc.json | 386 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 388 insertions(+), 2 deletions(-) create mode 100644 rpc/openrpc.json diff --git a/rpc/bridge.go b/rpc/bridge.go index c769158e..96394a4f 100644 --- a/rpc/bridge.go +++ b/rpc/bridge.go @@ -148,10 +148,10 @@ func (b *BridgeEndpoints) InjectedInfoAfterIndex(networkID uint32, l1InfoTreeInd ) } -// ClaimProof returns the proofs needed to claim a bridge. NetworkID and depositCount refere to the bridge origin +// GetProof returns the proofs needed to claim a bridge. NetworkID and depositCount refere to the bridge origin // while globalExitRoot should be already injected on the destination network. // This call needs to be done to a client of the same network were the bridge tx was sent -func (b *BridgeEndpoints) ClaimProof( +func (b *BridgeEndpoints) GetProof( networkID uint32, depositCount uint32, l1InfoTreeIndex uint32, ) (interface{}, rpc.Error) { ctx, cancel := context.WithTimeout(context.Background(), b.readTimeout) diff --git a/rpc/openrpc.json b/rpc/openrpc.json new file mode 100644 index 00000000..4e3a2518 --- /dev/null +++ b/rpc/openrpc.json @@ -0,0 +1,386 @@ +{ + "openrpc": "1.0.0", + "info": { + "title": "CDK Endpoints", + "version": "0.0.1" + }, + "methods": [ + { + "name": "bridge_l1InfoTreeIndexForBridge", + "summary": "Returns the first L1 Info Tree index in which the bridge was included. NetworkID represents the origin network. This call needs to be done to a client of the same network were the bridge tx was sent", + "params": [ + { + "$ref": "#/components/contentDescriptors/NetworkID" + }, + { + "$ref": "#/components/contentDescriptors/DepositCount" + } + ], + "result": { + "$ref": "#/components/contentDescriptors/L1InfoTreeIndex" + }, + "examples": [ + { + "name": "example", + "params": [], + "result": { + "name": "exampleResult", + "value": "0x1" + } + } + ] + }, + { + "name": "bridge_injectedInfoAfterIndex", + "summary": "Return the first GER injected onto the network that is linked to the given index or greater. This call is useful to understand when a bridge is ready to be claimed on its destination network", + "params": [ + { + "$ref": "#/components/contentDescriptors/NetworkID" + }, + { + "$ref": "#/components/contentDescriptors/L1InfoTreeIndex" + } + ], + "result": { + "$ref": "#/components/contentDescriptors/L1InfoTreeLeaf" + }, + "examples": [ ] + }, + { + "name": "bridge_getProof", + "summary": "Gets the proof needed to perform a claim for a given bridge", + "params": [ + { + "$ref": "#/components/contentDescriptors/NetworkID" + }, + { + "$ref": "#/components/contentDescriptors/DepositCount" + }, + { + "$ref": "#/components/contentDescriptors/L1InfoTreeIndex" + } + ], + "result": { + "$ref": "#/components/contentDescriptors/Proof" + }, + "examples": [] + }, + { + "name": "bridge_sponsorClaim", + "summary": "Request to sponsor the claim tx for a given bridge", + "params": [ + { + "$ref": "#/components/contentDescriptors/SponsorClaim" + } + ], + "result": { + "name": "empty", + "schema": {"type": "null"} + }, + "examples": [] + }, + { + "name": "bridge_getSponsoredClaimStatus", + "summary": "Gets the proof needed to perform a claim for a given bridge", + "params": [ + { + "$ref": "#/components/contentDescriptors/GlobalIndex" + } + ], + "result": { + "$ref": "#/components/contentDescriptors/ClaimStatus" + }, + "examples": [] + } + ], + "components": { + "contentDescriptors": { + "NetworkID": { + "name": "networkID", + "required": true, + "schema": { + "$ref": "#/components/schemas/NetworkID" + } + }, + "DepositCount": { + "name": "depositCount", + "required": true, + "schema": { + "$ref": "#/components/schemas/DepositCount" + } + }, + "L1InfoTreeIndex": { + "name": "l1InfoTreeIndex", + "required": true, + "schema": { + "$ref": "#/components/schemas/L1InfoTreeIndex" + } + }, + "L1InfoTreeLeaf": { + "name": "l1InfoTreeLeaf", + "required": true, + "schema": { + "$ref": "#/components/schemas/L1InfoTreeLeaf" + } + }, + "Proof": { + "name": "proof", + "required": true, + "schema": { + "$ref": "#/components/schemas/Proof" + } + }, + "SponsorClaim": { + "name": "sponsorClaim", + "required": true, + "schema": { + "$ref": "#/components/schemas/SponsorClaim" + } + }, + "GlobalIndex": { + "name": "globalIndex", + "required": true, + "schema": { + "$ref": "#/components/schemas/GlobalIndex" + } + }, + "ClaimStatus": { + "name": "claimStatus", + "required": true, + "schema": { + "$ref": "#/components/schemas/ClaimStatus" + } + } + }, + "schemas": { + "Bytes": { + "title": "bytes", + "type": "string", + "description": "Hex representation of a variable length byte array", + "pattern": "^0x([a-fA-F0-9]?)+$" + }, + "Integer": { + "title": "integer", + "type": "string", + "pattern": "^0x[a-fA-F0-9]+$", + "description": "Hex representation of the integer" + }, + "Keccak": { + "title": "keccak", + "type": "string", + "description": "Hex representation of a Keccak 256 hash", + "pattern": "^0x[a-fA-F\\d]{64}$" + }, + "Address": { + "title": "address", + "type": "string", + "pattern": "^0x[a-fA-F\\d]{40}$" + }, + "BlockHash": { + "title": "blockHash", + "type": "string", + "pattern": "^0x[a-fA-F\\d]{64}$", + "description": "The hex representation of the Keccak 256 of the RLP encoded block" + }, + "BlockNumber": { + "title": "blockNumber", + "type": "string", + "description": "The hex representation of the block's height", + "$ref": "#/components/schemas/Integer" + }, + "BlockPosition": { + "title": "blockPosition", + "type": "string", + "description": "The hex representation of the position inside the block", + "$ref": "#/components/schemas/Integer" + }, + "NetworkID": { + "title": "networkID", + "type": "string", + "description": "The hex representation of the network ID", + "$ref": "#/components/schemas/Integer" + }, + "DepositCount": { + "title": "depositCount", + "type": "string", + "description": "The hex representation of the deposit count", + "$ref": "#/components/schemas/Integer" + }, + "L1InfoTreeIndex": { + "title": "l1InfoTreeIndex", + "type": "string", + "description": "The hex representation of the L1 info tree index", + "$ref": "#/components/schemas/Integer" + }, + "L1InfoTreeLeaf": { + "title": "l1InfoTreeLeaf", + "type": "object", + "readOnly": true, + "properties": { + "blockNumber": { + "$ref": "#/components/schemas/BlockNumber" + }, + "blockPosition": { + "$ref": "#/components/schemas/BlockPosition" + }, + "previousBlockHash": { + "$ref": "#/components/schemas/Keccak" + }, + "timestamp": { + "title": "blockTimeStamp", + "type": "string", + "description": "The unix timestamp for when the block was collated" + }, + "l1InfoTreeIndex": { + "$ref": "#/components/schemas/L1InfoTreeIndex" + }, + "mainnetExitRoot": { + "$ref": "#/components/schemas/Keccak" + }, + "rollupExitRoot": { + "$ref": "#/components/schemas/Keccak" + }, + "globalExitRoot": { + "$ref": "#/components/schemas/Keccak" + }, + "hash": { + "$ref": "#/components/schemas/Keccak" + } + } + }, + "MerkleProof": { + "title": "merkleProof", + "type": "array", + "description": "Array of hashes that constitute a merkle proof", + "items": { + "$ref": "#/components/schemas/Keccak" + } + }, + "ProofLocalExitRoot": { + "title": "proofLocalExitRoot", + "description": "Merkle Proof that proofs the existance of a deposit in the local exit tree of a network", + "$ref": "#/components/schemas/MerkleProof" + }, + "ProofRollupExitRoot": { + "title": "proofLocalExitRoot", + "description": "Merkle Proof that proofs the existance of a deposit in the local exit tree of a network", + "$ref": "#/components/schemas/MerkleProof" + }, + "Proof": { + "title": "proof", + "type": "object", + "readOnly": true, + "properties": { + "l1InfoTreeLeaf": { + "$ref": "#/components/schemas/L1InfoTreeLeaf" + }, + "proofLocalExitRoot": { + "$ref": "#/components/schemas/ProofLocalExitRoot" + }, + "proofRollupExitRoot": { + "$ref": "#/components/schemas/ProofRollupExitRoot" + } + } + }, + "LeafType": { + "title": "leafType", + "type": "string", + "description": "The hex representation of the leaf type", + "$ref": "#/components/schemas/Integer" + }, + "GlobalIndex": { + "title": "globalIndex", + "type": "string", + "description": "The hex representation of the global index", + "$ref": "#/components/schemas/Integer" + }, + "OriginNetwork": { + "title": "originNetwork", + "type": "string", + "description": "The hex representation of the origin network ID of the token", + "$ref": "#/components/schemas/Integer" + }, + "OriginTokenAddress": { + "title": "originTokenAddress", + "type": "string", + "description": "address of the token on it's origin network", + "$ref": "#/components/schemas/Address" + }, + "DestinationNetwork": { + "title": "destinationNetwork", + "type": "string", + "description": "The hex representation of the destination network ID", + "$ref": "#/components/schemas/Integer" + }, + "DestinationAddress": { + "title": "destinationAddress", + "type": "string", + "description": "address of the receiver of the bridge", + "$ref": "#/components/schemas/Address" + }, + "Amount": { + "title": "amount", + "description": "Amount of tokens being bridged", + "$ref": "#/components/schemas/Keccak" + }, + "Metadata": { + "title": "metadata", + "description": "Extra data included in the bridge", + "$ref": "#/components/schemas/Bytes" + }, + "SponsorClaim": { + "title": "sponsorClaim", + "type": "object", + "readOnly": true, + "properties": { + "leafType": { + "$ref": "#/components/schemas/LeafType" + }, + "proofLocalExitRoot": { + "$ref": "#/components/schemas/ProofLocalExitRoot" + }, + "proofRollupExitRoot": { + "$ref": "#/components/schemas/ProofRollupExitRoot" + }, + "globalIndex": { + "$ref": "#/components/schemas/GlobalIndex" + }, + "mainnetExitRoot": { + "$ref": "#/components/schemas/Keccak" + }, + "rollupExitRoot": { + "$ref": "#/components/schemas/Keccak" + }, + "originNetwork": { + "$ref": "#/components/schemas/OriginNetwork" + }, + "originTokenAddress": { + "$ref": "#/components/schemas/OriginTokenAddress" + }, + "destinationNetwork": { + "$ref": "#/components/schemas/DestinationNetwork" + }, + "destinationAddress": { + "$ref": "#/components/schemas/DestinationAddress" + }, + "amount": { + "$ref": "#/components/schemas/Amount" + }, + "metadata": { + "$ref": "#/components/schemas/Metadata" + } + } + }, + "ClaimStatus": { + "title": "claimStatus", + "description": "The status of a claim", + "type": "string", + "enum": [ + "pending", + "failed", + "success" + ] + } + } + } +} From 33a6d4c66b35c1c2533353a44cb0babd22b9b45d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toni=20Ram=C3=ADrez?= <58293609+ToniRamirezM@users.noreply.github.com> Date: Mon, 28 Oct 2024 10:12:34 +0100 Subject: [PATCH 03/33] feat: remove DS from aggregator (#138) * feat: remove DS from aggregator * feat: unit tests * fix: rust * fix: seq-sender tests * fix: local_config script * fix: remove unused file * fix: default config * fix: test config * fix: nil l1inforoot * feat: improve coverage * fix: comments * feat: remove DS lib --- aggregator/aggregator.go | 654 +++--------------- aggregator/aggregator_test.go | 531 ++++++-------- aggregator/config.go | 20 +- aggregator/db/migrations/0004.sql | 23 + aggregator/interfaces.go | 38 +- aggregator/mocks/mock_StreamClient.go | 247 ------- aggregator/mocks/mock_rpc.go | 87 +++ aggregator/mocks/mock_state.go | 84 --- config/default.go | 10 +- crates/cdk-config/src/aggregator.rs | 30 +- crates/cdk/src/config_render.rs | 2 - go.mod | 1 - go.sum | 4 - sequencesender/rpc.go => rpc/batch.go | 78 ++- rpc/batch_test.go | 265 +++++++ .../rpcbatch => rpc/types}/rpcbatch.go | 53 +- scripts/local_config | 2 +- sequencesender.json | 1 - sequencesender/config.go | 8 - sequencesender/mocks/mock_etherman.go | 2 +- sequencesender/mocks/mock_ethtxmanager.go | 28 +- sequencesender/mocks/mock_rpc.go | 88 +++ sequencesender/rpc_test.go | 115 --- sequencesender/sequencesender.go | 21 +- sequencesender/sequencesender_test.go | 13 +- state/datastream.go | 12 - state/interfaces.go | 4 - state/pgstatestorage/batch.go | 66 -- test/Makefile | 7 +- .../kurtosis-cdk-node-config.toml.template | 3 +- test/config/test.config.toml | 9 +- 31 files changed, 933 insertions(+), 1573 deletions(-) create mode 100644 aggregator/db/migrations/0004.sql delete mode 100644 aggregator/mocks/mock_StreamClient.go create mode 100644 aggregator/mocks/mock_rpc.go rename sequencesender/rpc.go => rpc/batch.go (52%) create mode 100644 rpc/batch_test.go rename {sequencesender/seqsendertypes/rpcbatch => rpc/types}/rpcbatch.go (70%) delete mode 100644 sequencesender.json create mode 100644 sequencesender/mocks/mock_rpc.go delete mode 100644 sequencesender/rpc_test.go delete mode 100644 state/datastream.go delete mode 100644 state/pgstatestorage/batch.go diff --git a/aggregator/aggregator.go b/aggregator/aggregator.go index 7106b615..1998e842 100644 --- a/aggregator/aggregator.go +++ b/aggregator/aggregator.go @@ -14,7 +14,6 @@ import ( "time" "unicode" - "github.com/0xPolygon/cdk-rpc/rpc" cdkTypes "github.com/0xPolygon/cdk-rpc/types" "github.com/0xPolygon/cdk/aggregator/agglayer" ethmanTypes "github.com/0xPolygon/cdk/aggregator/ethmantypes" @@ -23,13 +22,11 @@ import ( "github.com/0xPolygon/cdk/config/types" "github.com/0xPolygon/cdk/l1infotree" "github.com/0xPolygon/cdk/log" + "github.com/0xPolygon/cdk/rpc" "github.com/0xPolygon/cdk/state" - "github.com/0xPolygon/cdk/state/datastream" "github.com/0xPolygon/zkevm-ethtx-manager/ethtxmanager" ethtxlog "github.com/0xPolygon/zkevm-ethtx-manager/log" ethtxtypes "github.com/0xPolygon/zkevm-ethtx-manager/types" - "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" - streamlog "github.com/0xPolygonHermez/zkevm-data-streamer/log" synclog "github.com/0xPolygonHermez/zkevm-synchronizer-l1/log" "github.com/0xPolygonHermez/zkevm-synchronizer-l1/state/entities" "github.com/0xPolygonHermez/zkevm-synchronizer-l1/synchronizer" @@ -39,20 +36,14 @@ import ( "google.golang.org/grpc" grpchealth "google.golang.org/grpc/health/grpc_health_v1" "google.golang.org/grpc/peer" - "google.golang.org/protobuf/proto" ) const ( - dataStreamType = 1 mockedStateRoot = "0x090bcaf734c4f06c93954a827b45a6e8c67b8e0fd1e0a35a1c5982d6961828f9" mockedLocalExitRoot = "0x17c04c3760510b48c6012742c540a81aba4bca2f78b9d14bfd2f123e2e53ea3e" maxDBBigIntValue = 9223372036854775807 ) -var ( - errBusy = errors.New("witness server is busy") -) - type finalProofMsg struct { proverName string proverID string @@ -70,24 +61,15 @@ type Aggregator struct { state StateInterface etherman Etherman ethTxManager EthTxManagerClient - streamClient StreamClient l1Syncr synchronizer.Synchronizer halted atomic.Bool - streamClientMutex *sync.Mutex - profitabilityChecker aggregatorTxProfitabilityChecker timeSendFinalProof time.Time timeCleanupLockedProofs types.Duration stateDBMutex *sync.Mutex timeSendFinalProofMutex *sync.RWMutex - // Data stream handling variables - currentBatchStreamData []byte - currentStreamBatch state.Batch - currentStreamBatchRaw state.BatchRawV2 - currentStreamL2Block state.L2BlockRaw - finalProof chan finalProofMsg verifyingProof bool @@ -99,6 +81,8 @@ type Aggregator struct { sequencerPrivateKey *ecdsa.PrivateKey aggLayerClient agglayer.AgglayerClientInterface + + rpcClient RPCInterface } // New creates a new aggregator. @@ -132,24 +116,6 @@ func New( logger.Fatalf("error creating ethtxmanager client: %v", err) } - var streamClient *datastreamer.StreamClient - - if !cfg.SyncModeOnlyEnabled { - // Data stream client logs - streamLogConfig := streamlog.Config{ - Environment: streamlog.LogEnvironment(cfg.Log.Environment), - Level: cfg.Log.Level, - Outputs: cfg.Log.Outputs, - } - - logger.Info("Creating data stream client....") - streamClient, err = datastreamer.NewClientWithLogsConfig(cfg.StreamClient.Server, dataStreamType, streamLogConfig) - if err != nil { - logger.Fatalf("failed to create stream client, error: %v", err) - } - logger.Info("Data stream client created.") - } - // Synchonizer logs syncLogConfig := synclog.Config{ Environment: synclog.LogEnvironment(cfg.Log.Environment), @@ -188,18 +154,16 @@ func New( state: stateInterface, etherman: etherman, ethTxManager: ethTxManager, - streamClient: streamClient, - streamClientMutex: &sync.Mutex{}, l1Syncr: l1Syncr, profitabilityChecker: profitabilityChecker, stateDBMutex: &sync.Mutex{}, timeSendFinalProofMutex: &sync.RWMutex{}, timeCleanupLockedProofs: cfg.CleanupLockedProofsInterval, finalProof: make(chan finalProofMsg), - currentBatchStreamData: []byte{}, aggLayerClient: aggLayerClient, sequencerPrivateKey: sequencerPrivateKey, witnessRetrievalChan: make(chan state.DBBatch), + rpcClient: rpc.NewBatchEndpoints(cfg.RPCURL), } if a.ctx == nil { @@ -208,7 +172,6 @@ func New( // Set function to handle the batches from the data stream if !cfg.SyncModeOnlyEnabled { - a.streamClient.SetProcessEntryFunc(a.handleReceivedDataStream) a.l1Syncr.SetCallbackOnReorgDone(a.handleReorg) a.l1Syncr.SetCallbackOnRollbackBatches(a.handleRollbackBatches) } @@ -216,51 +179,6 @@ func New( return a, nil } -func (a *Aggregator) resetCurrentBatchData() { - a.currentBatchStreamData = []byte{} - a.currentStreamBatchRaw = state.BatchRawV2{ - Blocks: make([]state.L2BlockRaw, 0), - } - a.currentStreamL2Block = state.L2BlockRaw{} -} - -func (a *Aggregator) retrieveWitness() { - var success bool - for { - dbBatch := <-a.witnessRetrievalChan - inner: - for !success { - var err error - // Get Witness - dbBatch.Witness, err = a.getWitness(dbBatch.Batch.BatchNumber, a.cfg.WitnessURL, a.cfg.UseFullWitness) - if err != nil { - if errors.Is(err, errBusy) { - a.logger.Debugf( - "Witness server is busy, retrying get witness for batch %d in %v", - dbBatch.Batch.BatchNumber, a.cfg.RetryTime.Duration, - ) - } else { - a.logger.Errorf("Failed to get witness for batch %d, err: %v", dbBatch.Batch.BatchNumber, err) - } - time.Sleep(a.cfg.RetryTime.Duration) - - continue inner - } - - err = a.state.AddBatch(a.ctx, &dbBatch, nil) - if err != nil { - a.logger.Errorf("Error adding batch: %v", err) - time.Sleep(a.cfg.RetryTime.Duration) - - continue inner - } - success = true - } - - success = false - } -} - func (a *Aggregator) handleReorg(reorgData synchronizer.ReorgExecutionResult) { a.logger.Warnf("Reorg detected, reorgData: %+v", reorgData) @@ -269,9 +187,20 @@ func (a *Aggregator) handleReorg(reorgData synchronizer.ReorgExecutionResult) { if err != nil { a.logger.Errorf("Error getting last virtual batch number: %v", err) } else { - err = a.state.DeleteBatchesNewerThanBatchNumber(a.ctx, lastVBatchNumber, nil) + // Delete wip proofs + err = a.state.DeleteUngeneratedProofs(a.ctx, nil) if err != nil { - a.logger.Errorf("Error deleting batches newer than batch number %d: %v", lastVBatchNumber, err) + a.logger.Errorf("Error deleting ungenerated proofs: %v", err) + } else { + a.logger.Info("Deleted ungenerated proofs") + } + + // Delete any proof for the batches that have been rolled back + err = a.state.DeleteGeneratedProofs(a.ctx, lastVBatchNumber+1, maxDBBigIntValue, nil) + if err != nil { + a.logger.Errorf("Error deleting generated proofs: %v", err) + } else { + a.logger.Infof("Deleted generated proofs for batches newer than %d", lastVBatchNumber) } } @@ -289,33 +218,12 @@ func (a *Aggregator) handleReorg(reorgData synchronizer.ReorgExecutionResult) { func (a *Aggregator) handleRollbackBatches(rollbackData synchronizer.RollbackBatchesData) { a.logger.Warnf("Rollback batches event, rollbackBatchesData: %+v", rollbackData) - a.streamClientMutex.Lock() - defer a.streamClientMutex.Unlock() - - dsClientWasRunning := a.streamClient.IsStarted() - var err error - if dsClientWasRunning { - // Disable the process entry function to avoid processing the data stream - a.streamClient.ResetProcessEntryFunc() - - // Stop Reading the data stream - err = a.streamClient.ExecCommandStop() - if err != nil { - a.logger.Errorf("failed to stop data stream: %v.", err) - } else { - a.logger.Info("Data stream client stopped") - } - } - // Get new last verified batch number from L1 - var lastVerifiedBatchNumber uint64 - if err == nil { - lastVerifiedBatchNumber, err = a.etherman.GetLatestVerifiedBatchNum() - if err != nil { - a.logger.Errorf("Error getting latest verified batch number: %v", err) - } + lastVerifiedBatchNumber, err := a.etherman.GetLatestVerifiedBatchNum() + if err != nil { + a.logger.Errorf("Error getting latest verified batch number: %v", err) } // Check lastVerifiedBatchNumber makes sense @@ -326,26 +234,6 @@ func (a *Aggregator) handleRollbackBatches(rollbackData synchronizer.RollbackBat ) } - // Delete invalidated batches - if err == nil { - err = a.state.DeleteBatchesNewerThanBatchNumber(a.ctx, rollbackData.LastBatchNumber, nil) - if err != nil { - a.logger.Errorf("Error deleting batches newer than batch number %d: %v", rollbackData.LastBatchNumber, err) - } else { - a.logger.Infof("Deleted batches newer than batch number %d", rollbackData.LastBatchNumber) - } - } - - // Older batches data can also be deleted - if err == nil { - err = a.state.DeleteBatchesOlderThanBatchNumber(a.ctx, rollbackData.LastBatchNumber, nil) - if err != nil { - a.logger.Errorf("Error deleting batches older than batch number %d: %v", rollbackData.LastBatchNumber, err) - } else { - a.logger.Infof("Deleted batches older than batch number %d", rollbackData.LastBatchNumber) - } - } - // Delete wip proofs if err == nil { err = a.state.DeleteUngeneratedProofs(a.ctx, nil) @@ -366,42 +254,6 @@ func (a *Aggregator) handleRollbackBatches(rollbackData synchronizer.RollbackBat } } - if err == nil { - // Reset current batch data previously read from the data stream - a.resetCurrentBatchData() - a.currentStreamBatch = state.Batch{} - a.logger.Info("Current batch data reset") - - var marshalledBookMark []byte - // Reset the data stream reading point - bookMark := &datastream.BookMark{ - Type: datastream.BookmarkType_BOOKMARK_TYPE_BATCH, - Value: rollbackData.LastBatchNumber + 1, - } - - marshalledBookMark, err = proto.Marshal(bookMark) - //nolint:gocritic - if err != nil { - a.logger.Error("failed to marshal bookmark: %v", err) - } else { - // Restart the stream client if needed - if dsClientWasRunning { - a.streamClient.SetProcessEntryFunc(a.handleReceivedDataStream) - err = a.streamClient.Start() - if err != nil { - a.logger.Errorf("failed to start stream client, error: %v", err) - } else { - // Resume data stream reading - err = a.streamClient.ExecCommandStartBookmark(marshalledBookMark) - if err != nil { - a.logger.Errorf("failed to connect to data stream: %v", err) - } - a.logger.Info("Data stream client resumed") - } - } - } - } - if err == nil { a.logger.Info("Handling rollback batches event finished successfully") } else { @@ -414,255 +266,6 @@ func (a *Aggregator) handleRollbackBatches(rollbackData synchronizer.RollbackBat } } -func (a *Aggregator) handleReceivedDataStream( - entry *datastreamer.FileEntry, client *datastreamer.StreamClient, server *datastreamer.StreamServer, -) error { - forcedBlockhashL1 := common.Hash{} - - if !a.halted.Load() { - if entry.Type != datastreamer.EntryType(datastreamer.EtBookmark) { - a.currentBatchStreamData = append(a.currentBatchStreamData, entry.Encode()...) - - switch entry.Type { - case datastreamer.EntryType(datastream.EntryType_ENTRY_TYPE_BATCH_START): - // Check currentStreamBatchRaw is empty as sanity check - if len(a.currentStreamBatchRaw.Blocks) > 0 { - a.logger.Errorf("currentStreamBatchRaw should be empty, "+ - "but it contains %v blocks", len(a.currentStreamBatchRaw.Blocks)) - a.resetCurrentBatchData() - } - batch := &datastream.BatchStart{} - err := proto.Unmarshal(entry.Data, batch) - if err != nil { - a.logger.Errorf("Error unmarshalling batch: %v", err) - - return err - } - - a.currentStreamBatch.BatchNumber = batch.Number - a.currentStreamBatch.ChainID = batch.ChainId - a.currentStreamBatch.ForkID = batch.ForkId - a.currentStreamBatch.Type = batch.Type - case datastreamer.EntryType(datastream.EntryType_ENTRY_TYPE_BATCH_END): - batch := &datastream.BatchEnd{} - err := proto.Unmarshal(entry.Data, batch) - if err != nil { - a.logger.Errorf("Error unmarshalling batch: %v", err) - - return err - } - - a.currentStreamBatch.LocalExitRoot = common.BytesToHash(batch.LocalExitRoot) - a.currentStreamBatch.StateRoot = common.BytesToHash(batch.StateRoot) - - // Add last block (if any) to the current batch - if a.currentStreamL2Block.BlockNumber != 0 { - a.currentStreamBatchRaw.Blocks = append(a.currentStreamBatchRaw.Blocks, a.currentStreamL2Block) - } - - // Save Current Batch - if a.currentStreamBatch.BatchNumber != 0 { - var batchl2Data []byte - - // Get batchl2Data from L1 - virtualBatch, err := a.l1Syncr.GetVirtualBatchByBatchNumber(a.ctx, a.currentStreamBatch.BatchNumber) - if err != nil && !errors.Is(err, entities.ErrNotFound) { - a.logger.Errorf("Error getting virtual batch: %v", err) - - return err - } - - for errors.Is(err, entities.ErrNotFound) { - a.logger.Debug("Waiting for virtual batch to be available") - time.Sleep(a.cfg.RetryTime.Duration) - virtualBatch, err = a.l1Syncr.GetVirtualBatchByBatchNumber(a.ctx, a.currentStreamBatch.BatchNumber) - - if err != nil && !errors.Is(err, entities.ErrNotFound) { - a.logger.Errorf("Error getting virtual batch: %v", err) - - return err - } - } - - // Encode batch - if a.currentStreamBatch.Type != datastream.BatchType_BATCH_TYPE_INVALID && - a.currentStreamBatch.Type != datastream.BatchType_BATCH_TYPE_INJECTED { - batchl2Data, err = state.EncodeBatchV2(&a.currentStreamBatchRaw) - if err != nil { - a.logger.Errorf("Error encoding batch: %v", err) - - return err - } - } - - // If the batch is marked as Invalid in the DS we enforce retrieve the data from L1 - if a.cfg.UseL1BatchData || - a.currentStreamBatch.Type == datastream.BatchType_BATCH_TYPE_INVALID || - a.currentStreamBatch.Type == datastream.BatchType_BATCH_TYPE_INJECTED { - a.currentStreamBatch.BatchL2Data = virtualBatch.BatchL2Data - } else { - a.currentStreamBatch.BatchL2Data = batchl2Data - } - - // Compare BatchL2Data from L1 and DataStream - if common.Bytes2Hex(batchl2Data) != common.Bytes2Hex(virtualBatch.BatchL2Data) && - a.currentStreamBatch.Type != datastream.BatchType_BATCH_TYPE_INJECTED { - a.logger.Warnf("BatchL2Data from L1 and data stream are different for batch %d", a.currentStreamBatch.BatchNumber) - - if a.currentStreamBatch.Type == datastream.BatchType_BATCH_TYPE_INVALID { - a.logger.Warnf("Batch is marked as invalid in data stream") - } else { - a.logger.Warnf("DataStream BatchL2Data:%v", common.Bytes2Hex(batchl2Data)) - } - a.logger.Warnf("L1 BatchL2Data:%v", common.Bytes2Hex(virtualBatch.BatchL2Data)) - } - - // Get L1InfoRoot - sequence, err := a.l1Syncr.GetSequenceByBatchNumber(a.ctx, a.currentStreamBatch.BatchNumber) - if err != nil { - a.logger.Errorf("Error getting sequence: %v", err) - - return err - } - - for sequence == nil { - a.logger.Debug("Waiting for sequence to be available") - time.Sleep(a.cfg.RetryTime.Duration) - sequence, err = a.l1Syncr.GetSequenceByBatchNumber(a.ctx, a.currentStreamBatch.BatchNumber) - if err != nil { - a.logger.Errorf("Error getting sequence: %v", err) - - return err - } - } - - a.currentStreamBatch.L1InfoRoot = sequence.L1InfoRoot - a.currentStreamBatch.Timestamp = sequence.Timestamp - - // Calculate Acc Input Hash - oldDBBatch, err := a.state.GetBatch(a.ctx, a.currentStreamBatch.BatchNumber-1, nil) - if err != nil { - a.logger.Errorf("Error getting batch %d: %v", a.currentStreamBatch.BatchNumber-1, err) - - return err - } - - // Injected Batch - if a.currentStreamBatch.BatchNumber == 1 { - l1Block, err := a.l1Syncr.GetL1BlockByNumber(a.ctx, virtualBatch.BlockNumber) - if err != nil { - a.logger.Errorf("Error getting L1 block: %v", err) - - return err - } - - forcedBlockhashL1 = l1Block.ParentHash - a.currentStreamBatch.L1InfoRoot = a.currentStreamBatch.GlobalExitRoot - } - - accInputHash := cdkcommon.CalculateAccInputHash( - a.logger, - oldDBBatch.Batch.AccInputHash, - a.currentStreamBatch.BatchL2Data, - a.currentStreamBatch.L1InfoRoot, - uint64(a.currentStreamBatch.Timestamp.Unix()), - a.currentStreamBatch.Coinbase, - forcedBlockhashL1, - ) - a.currentStreamBatch.AccInputHash = accInputHash - - dbBatch := state.DBBatch{ - Batch: a.currentStreamBatch, - Datastream: a.currentBatchStreamData, - Witness: nil, - } - - // Check if the batch is already in the DB to keep its witness - wDBBatch, err := a.state.GetBatch(a.ctx, a.currentStreamBatch.BatchNumber, nil) - if err != nil { - if !errors.Is(err, state.ErrNotFound) { - a.logger.Errorf("Error getting batch %d: %v", a.currentStreamBatch.BatchNumber, err) - - return err - } - } - - if wDBBatch != nil && wDBBatch.Witness != nil && len(wDBBatch.Witness) > 0 { - dbBatch.Witness = wDBBatch.Witness - } - - // Store batch in the DB - err = a.state.AddBatch(a.ctx, &dbBatch, nil) - if err != nil { - a.logger.Errorf("Error adding batch: %v", err) - - return err - } - - // Retrieve the witness - if len(dbBatch.Witness) == 0 { - a.witnessRetrievalChan <- dbBatch - } - } - - // Reset current batch data - a.resetCurrentBatchData() - - case datastreamer.EntryType(datastream.EntryType_ENTRY_TYPE_L2_BLOCK): - // Add previous block (if any) to the current batch - if a.currentStreamL2Block.BlockNumber != 0 { - a.currentStreamBatchRaw.Blocks = append(a.currentStreamBatchRaw.Blocks, a.currentStreamL2Block) - } - // "Open" the new block - l2Block := &datastream.L2Block{} - err := proto.Unmarshal(entry.Data, l2Block) - if err != nil { - a.logger.Errorf("Error unmarshalling L2Block: %v", err) - - return err - } - - header := state.ChangeL2BlockHeader{ - DeltaTimestamp: l2Block.DeltaTimestamp, - IndexL1InfoTree: l2Block.L1InfotreeIndex, - } - - a.currentStreamL2Block.ChangeL2BlockHeader = header - a.currentStreamL2Block.Transactions = make([]state.L2TxRaw, 0) - a.currentStreamL2Block.BlockNumber = l2Block.Number - a.currentStreamBatch.L1InfoTreeIndex = l2Block.L1InfotreeIndex - a.currentStreamBatch.Coinbase = common.BytesToAddress(l2Block.Coinbase) - a.currentStreamBatch.GlobalExitRoot = common.BytesToHash(l2Block.GlobalExitRoot) - - case datastreamer.EntryType(datastream.EntryType_ENTRY_TYPE_TRANSACTION): - l2Tx := &datastream.Transaction{} - err := proto.Unmarshal(entry.Data, l2Tx) - if err != nil { - a.logger.Errorf("Error unmarshalling L2Tx: %v", err) - - return err - } - // New Tx raw - tx, err := state.DecodeTx(common.Bytes2Hex(l2Tx.Encoded)) - if err != nil { - a.logger.Errorf("Error decoding tx: %v", err) - - return err - } - - l2TxRaw := state.L2TxRaw{ - EfficiencyPercentage: uint8(l2Tx.EffectiveGasPricePercentage), - TxAlreadyEncoded: false, - Tx: tx, - } - a.currentStreamL2Block.Transactions = append(a.currentStreamL2Block.Transactions, l2TxRaw) - } - } - } - - return nil -} - // Start starts the aggregator func (a *Aggregator) Start() error { // Initial L1 Sync blocking @@ -700,39 +303,13 @@ func (a *Aggregator) Start() error { return err } - // Cleanup data base - err = a.state.DeleteBatchesOlderThanBatchNumber(a.ctx, lastVerifiedBatchNumber, nil) - if err != nil { - return err - } - // Delete ungenerated recursive proofs err = a.state.DeleteUngeneratedProofs(a.ctx, nil) if err != nil { return fmt.Errorf("failed to initialize proofs cache %w", err) } - accInputHash, err := a.getVerifiedBatchAccInputHash(a.ctx, lastVerifiedBatchNumber) - if err != nil { - return err - } - a.logger.Infof("Last Verified Batch Number:%v", lastVerifiedBatchNumber) - a.logger.Infof("Starting AccInputHash:%v", accInputHash.String()) - - // Store Acc Input Hash of the latest verified batch - dummyDBBatch := state.DBBatch{ - Batch: state.Batch{ - BatchNumber: lastVerifiedBatchNumber, - AccInputHash: *accInputHash, - }, - Datastream: []byte{0}, - Witness: []byte{0}, - } - err = a.state.AddBatch(a.ctx, &dummyDBBatch, nil) - if err != nil { - return err - } a.resetVerifyProofTime() @@ -740,35 +317,6 @@ func (a *Aggregator) Start() error { go a.sendFinalProof() go a.ethTxManager.Start() - // Witness retrieval workers - for i := 0; i < a.cfg.MaxWitnessRetrievalWorkers; i++ { - go a.retrieveWitness() - } - - // Start stream client - a.streamClientMutex.Lock() - defer a.streamClientMutex.Unlock() - - err = a.streamClient.Start() - if err != nil { - a.logger.Fatalf("failed to start stream client, error: %v", err) - } - - bookMark := &datastream.BookMark{ - Type: datastream.BookmarkType_BOOKMARK_TYPE_BATCH, - Value: lastVerifiedBatchNumber + 1, - } - - marshalledBookMark, err := proto.Marshal(bookMark) - if err != nil { - a.logger.Fatalf("failed to marshal bookmark: %v", err) - } - - err = a.streamClient.ExecCommandStartBookmark(marshalledBookMark) - if err != nil { - a.logger.Fatalf("failed to connect to data stream: %v", err) - } - // A this point everything is ready, so start serving go func() { a.logger.Infof("Server listening on port %d", a.cfg.Port) @@ -891,18 +439,18 @@ func (a *Aggregator) sendFinalProof() { a.startProofVerification() - finalDBBatch, err := a.state.GetBatch(ctx, proof.BatchNumberFinal, nil) + // Get Batch from RPC + rpcFinalBatch, err := a.rpcClient.GetBatch(proof.BatchNumberFinal) if err != nil { - tmpLogger.Errorf("Failed to retrieve batch with number [%d]: %v", proof.BatchNumberFinal, err) + a.logger.Errorf("error getting batch %d from RPC: %v.", proof.BatchNumberFinal, err) a.endProofVerification() - continue } inputs := ethmanTypes.FinalProofInputs{ FinalProof: msg.finalProof, - NewLocalExitRoot: finalDBBatch.Batch.LocalExitRoot.Bytes(), - NewStateRoot: finalDBBatch.Batch.StateRoot.Bytes(), + NewLocalExitRoot: rpcFinalBatch.LocalExitRoot().Bytes(), + NewStateRoot: rpcFinalBatch.StateRoot().Bytes(), } switch a.cfg.SettlementBackend { @@ -1052,15 +600,16 @@ func (a *Aggregator) buildFinalProof( string(finalProof.Public.NewLocalExitRoot) == mockedLocalExitRoot { // This local exit root and state root come from the mock // prover, use the one captured by the executor instead - finalDBBatch, err := a.state.GetBatch(ctx, proof.BatchNumberFinal, nil) + rpcFinalBatch, err := a.rpcClient.GetBatch(proof.BatchNumberFinal) if err != nil { - return nil, fmt.Errorf("failed to retrieve batch with number [%d]", proof.BatchNumberFinal) + return nil, fmt.Errorf("error getting batch %d from RPC: %w", proof.BatchNumberFinal, err) } + tmpLogger.Warnf( "NewLocalExitRoot and NewStateRoot look like a mock values, using values from executor instead: LER: %v, SR: %v", - finalDBBatch.Batch.LocalExitRoot.TerminalString(), finalDBBatch.Batch.StateRoot.TerminalString()) - finalProof.Public.NewStateRoot = finalDBBatch.Batch.StateRoot.Bytes() - finalProof.Public.NewLocalExitRoot = finalDBBatch.Batch.LocalExitRoot.Bytes() + rpcFinalBatch.LocalExitRoot().TerminalString(), rpcFinalBatch.StateRoot().TerminalString()) + finalProof.Public.NewStateRoot = rpcFinalBatch.StateRoot().Bytes() + finalProof.Public.NewLocalExitRoot = rpcFinalBatch.LocalExitRoot().Bytes() } return finalProof, nil @@ -1459,15 +1008,6 @@ func (a *Aggregator) tryAggregateProofs(ctx context.Context, prover ProverInterf return true, nil } -func (a *Aggregator) getVerifiedBatchAccInputHash(ctx context.Context, batchNumber uint64) (*common.Hash, error) { - accInputHash, err := a.etherman.GetBatchAccInputHash(ctx, batchNumber) - if err != nil { - return nil, err - } - - return &accInputHash, nil -} - func (a *Aggregator) getAndLockBatchToProve( ctx context.Context, prover ProverInterface, ) (*state.Batch, []byte, *state.Proof, error) { @@ -1511,7 +1051,8 @@ func (a *Aggregator) getAndLockBatchToProve( // Not found, so it it not possible to verify the batch yet if sequence == nil || errors.Is(err, entities.ErrNotFound) { - tmpLogger.Infof("No sequence found for batch %d", batchNumberToVerify) + tmpLogger.Infof("Sequencing event for batch %d has not been synced yet, "+ + "so it is not possible to verify it yet. Waiting...", batchNumberToVerify) return nil, nil, nil, state.ErrNotFound } @@ -1521,23 +1062,73 @@ func (a *Aggregator) getAndLockBatchToProve( ToBatchNumber: sequence.ToBatchNumber, } - // Check if the batch is already in the DB - dbBatch, err := a.state.GetBatch(ctx, batchNumberToVerify, nil) - if err != nil { - if errors.Is(err, state.ErrNotFound) { - tmpLogger.Infof("Batch (%d) is not yet in DB", batchNumberToVerify) - } - + // Get Batch from L1 Syncer + virtualBatch, err := a.l1Syncr.GetVirtualBatchByBatchNumber(a.ctx, batchNumberToVerify) + if err != nil && !errors.Is(err, entities.ErrNotFound) { + a.logger.Errorf("Error getting virtual batch: %v", err) return nil, nil, nil, err + } else if errors.Is(err, entities.ErrNotFound) { + a.logger.Infof("Virtual batch %d has not been synced yet, "+ + "so it is not possible to verify it yet. Waiting...", batchNumberToVerify) + return nil, nil, nil, state.ErrNotFound } - // Check if the witness is already in the DB - if len(dbBatch.Witness) == 0 { - tmpLogger.Infof("Witness for batch %d is not yet in DB", batchNumberToVerify) + // Get Batch from RPC + rpcBatch, err := a.rpcClient.GetBatch(batchNumberToVerify) + if err != nil { + a.logger.Errorf("error getting batch %d from RPC: %v.", batchNumberToVerify, err) + return nil, nil, nil, err + } - return nil, nil, nil, state.ErrNotFound + // Compare BatchL2Data from virtual batch and rpcBatch (skipping injected batch (1)) + if batchNumberToVerify != 1 && (common.Bytes2Hex(virtualBatch.BatchL2Data) != common.Bytes2Hex(rpcBatch.L2Data())) { + a.logger.Warnf("BatchL2Data from virtual batch %d does not match the one from RPC", batchNumberToVerify) + a.logger.Warnf("VirtualBatch BatchL2Data:%v", common.Bytes2Hex(virtualBatch.BatchL2Data)) + a.logger.Warnf("RPC BatchL2Data:%v", common.Bytes2Hex(rpcBatch.L2Data())) + } + + l1InfoRoot := common.Hash{} + + if virtualBatch.L1InfoRoot == nil { + log.Debugf("L1InfoRoot is nil for batch %d", batchNumberToVerify) + virtualBatch.L1InfoRoot = &l1InfoRoot + } + + // Create state batch + stateBatch := &state.Batch{ + BatchNumber: rpcBatch.BatchNumber(), + Coinbase: rpcBatch.LastCoinbase(), + // Use L1 batch data + BatchL2Data: virtualBatch.BatchL2Data, + StateRoot: rpcBatch.StateRoot(), + LocalExitRoot: rpcBatch.LocalExitRoot(), + AccInputHash: rpcBatch.AccInputHash(), + L1InfoTreeIndex: rpcBatch.L1InfoTreeIndex(), + L1InfoRoot: *virtualBatch.L1InfoRoot, + Timestamp: time.Unix(int64(rpcBatch.LastL2BLockTimestamp()), 0), + GlobalExitRoot: rpcBatch.GlobalExitRoot(), + ChainID: a.cfg.ChainID, + ForkID: a.cfg.ForkId, + } + + // Request the witness from the server, if it is busy just keep looping until it is available + start := time.Now() + witness, err := a.rpcClient.GetWitness(batchNumberToVerify, a.cfg.UseFullWitness) + for err != nil { + if errors.Is(err, rpc.ErrBusy) { + a.logger.Debugf( + "Witness server is busy, retrying get witness for batch %d in %v", + batchNumberToVerify, a.cfg.RetryTime.Duration, + ) + } else { + a.logger.Errorf("Failed to get witness for batch %d, err: %v", batchNumberToVerify, err) + } + time.Sleep(a.cfg.RetryTime.Duration) } + end := time.Now() + a.logger.Debugf("Time to get witness for batch %d: %v", batchNumberToVerify, end.Sub(start)) + // Store the sequence in aggregator DB err = a.state.AddSequence(ctx, stateSequence, nil) if err != nil { tmpLogger.Infof("Error storing sequence for batch %d", batchNumberToVerify) @@ -1546,8 +1137,8 @@ func (a *Aggregator) getAndLockBatchToProve( } // All the data required to generate a proof is ready - tmpLogger.Infof("Found virtual batch %d pending to generate proof", dbBatch.Batch.BatchNumber) - tmpLogger = tmpLogger.WithFields("batch", dbBatch.Batch.BatchNumber) + tmpLogger.Infof("Found virtual batch %d pending to generate proof", virtualBatch.BatchNumber) + tmpLogger = tmpLogger.WithFields("batch", virtualBatch.BatchNumber) tmpLogger.Info("Checking profitability to aggregate batch") @@ -1567,8 +1158,8 @@ func (a *Aggregator) getAndLockBatchToProve( now := time.Now().Round(time.Microsecond) proof := &state.Proof{ - BatchNumber: dbBatch.Batch.BatchNumber, - BatchNumberFinal: dbBatch.Batch.BatchNumber, + BatchNumber: virtualBatch.BatchNumber, + BatchNumberFinal: virtualBatch.BatchNumber, Prover: &proverName, ProverID: &proverID, GeneratingSince: &now, @@ -1582,7 +1173,7 @@ func (a *Aggregator) getAndLockBatchToProve( return nil, nil, nil, err } - return &dbBatch.Batch, dbBatch.Witness, proof, nil + return stateBatch, witness, proof, nil } func (a *Aggregator) tryGenerateBatchProof(ctx context.Context, prover ProverInterface) (bool, error) { @@ -1820,16 +1411,11 @@ func (a *Aggregator) buildInputProver( forcedBlockhashL1 = l1Block.ParentHash l1InfoRoot = batchToVerify.GlobalExitRoot.Bytes() - } /*else { - forcedBlockhashL1, err = a.state.GetForcedBatchParentHash(ctx, *batchToVerify.ForcedBatchNum, nil) - if err != nil { - return nil, err - } - }*/ + } } // Get Old Acc Input Hash - oldDBBatch, err := a.state.GetBatch(ctx, batchToVerify.BatchNumber-1, nil) + rpcOldBatch, err := a.rpcClient.GetBatch(batchToVerify.BatchNumber - 1) if err != nil { return nil, err } @@ -1837,7 +1423,7 @@ func (a *Aggregator) buildInputProver( inputProver := &prover.StatelessInputProver{ PublicInputs: &prover.StatelessPublicInputs{ Witness: witness, - OldAccInputHash: oldDBBatch.Batch.AccInputHash.Bytes(), + OldAccInputHash: rpcOldBatch.AccInputHash().Bytes(), OldBatchNum: batchToVerify.BatchNumber - 1, ChainId: batchToVerify.ChainID, ForkId: batchToVerify.ForkID, @@ -1855,42 +1441,6 @@ func (a *Aggregator) buildInputProver( return inputProver, nil } -func (a *Aggregator) getWitness(batchNumber uint64, url string, fullWitness bool) ([]byte, error) { - var ( - witness string - response rpc.Response - err error - ) - - witnessType := "trimmed" - if fullWitness { - witnessType = "full" - } - - a.logger.Infof("Requesting witness for batch %d of type %s", batchNumber, witnessType) - - response, err = rpc.JSONRPCCall(url, "zkevm_getBatchWitness", batchNumber, witnessType) - if err != nil { - return nil, err - } - - // Check if the response is an error - if response.Error != nil { - if response.Error.Message == "busy" { - return nil, errBusy - } - - return nil, fmt.Errorf("error from witness for batch %d: %v", batchNumber, response.Error) - } - - err = json.Unmarshal(response.Result, &witness) - if err != nil { - return nil, err - } - - return common.FromHex(witness), nil -} - func printInputProver(logger *log.Logger, inputProver *prover.StatelessInputProver) { if !logger.IsEnabledLogLevel(zapcore.DebugLevel) { return diff --git a/aggregator/aggregator_test.go b/aggregator/aggregator_test.go index 657a34cf..f6e27b0f 100644 --- a/aggregator/aggregator_test.go +++ b/aggregator/aggregator_test.go @@ -20,16 +20,14 @@ import ( "github.com/0xPolygon/cdk/aggregator/prover" "github.com/0xPolygon/cdk/config/types" "github.com/0xPolygon/cdk/log" + rpctypes "github.com/0xPolygon/cdk/rpc/types" "github.com/0xPolygon/cdk/state" - "github.com/0xPolygon/cdk/state/datastream" - "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" "github.com/0xPolygonHermez/zkevm-synchronizer-l1/synchronizer" "github.com/ethereum/go-ethereum/common" ethTypes "github.com/ethereum/go-ethereum/core/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "google.golang.org/protobuf/proto" ) var ( @@ -57,6 +55,7 @@ type mox struct { proverMock *mocks.ProverInterfaceMock aggLayerClientMock *mocks.AgglayerClientInterfaceMock synchronizerMock *mocks.SynchronizerInterfaceMock + rpcMock *mocks.RPCInterfaceMock } func WaitUntil(t *testing.T, wg *sync.WaitGroup, timeout time.Duration) { @@ -75,28 +74,39 @@ func WaitUntil(t *testing.T, wg *sync.WaitGroup, timeout time.Duration) { } } -func Test_resetCurrentBatchData(t *testing.T) { - t.Parallel() +func Test_Start(t *testing.T) { + mockState := new(mocks.StateInterfaceMock) + mockL1Syncr := new(mocks.SynchronizerInterfaceMock) + mockEtherman := new(mocks.EthermanMock) + mockEthTxManager := new(mocks.EthTxManagerClientMock) - a := Aggregator{ - currentBatchStreamData: []byte("test"), - currentStreamBatchRaw: state.BatchRawV2{ - Blocks: []state.L2BlockRaw{ - { - BlockNumber: 1, - ChangeL2BlockHeader: state.ChangeL2BlockHeader{}, - Transactions: []state.L2TxRaw{}, - }, - }, - }, - currentStreamL2Block: state.L2BlockRaw{}, - } + mockL1Syncr.On("Sync", mock.Anything).Return(nil) + mockEtherman.On("GetLatestVerifiedBatchNum").Return(uint64(90), nil).Once() + mockState.On("DeleteUngeneratedProofs", mock.Anything, nil).Return(nil).Once() + mockState.On("CleanupLockedProofs", mock.Anything, "", nil).Return(int64(0), nil) - a.resetCurrentBatchData() + mockEthTxManager.On("Start").Return(nil) - assert.Equal(t, []byte{}, a.currentBatchStreamData) - assert.Equal(t, state.BatchRawV2{Blocks: make([]state.L2BlockRaw, 0)}, a.currentStreamBatchRaw) - assert.Equal(t, state.L2BlockRaw{}, a.currentStreamL2Block) + ctx := context.Background() + a := &Aggregator{ + state: mockState, + logger: log.GetDefaultLogger(), + halted: atomic.Bool{}, + l1Syncr: mockL1Syncr, + etherman: mockEtherman, + ethTxManager: mockEthTxManager, + ctx: ctx, + stateDBMutex: &sync.Mutex{}, + timeSendFinalProofMutex: &sync.RWMutex{}, + timeCleanupLockedProofs: types.Duration{Duration: 5 * time.Second}, + } + go func() { + err := a.Start() + require.NoError(t, err) + }() + time.Sleep(time.Second) + a.ctx.Done() + time.Sleep(time.Second) } func Test_handleReorg(t *testing.T) { @@ -115,7 +125,8 @@ func Test_handleReorg(t *testing.T) { } mockL1Syncr.On("GetLastestVirtualBatchNumber", mock.Anything).Return(uint64(100), nil).Once() - mockState.On("DeleteBatchesNewerThanBatchNumber", mock.Anything, uint64(100), mock.Anything).Return(nil).Once() + mockState.On("DeleteGeneratedProofs", mock.Anything, mock.Anything, mock.Anything, nil).Return(nil).Once() + mockState.On("DeleteUngeneratedProofs", mock.Anything, nil).Return(nil).Once() go a.handleReorg(reorgData) time.Sleep(3 * time.Second) @@ -128,7 +139,6 @@ func Test_handleReorg(t *testing.T) { func Test_handleRollbackBatches(t *testing.T) { t.Parallel() - mockStreamClient := new(mocks.StreamClientMock) mockEtherman := new(mocks.EthermanMock) mockState := new(mocks.StateInterfaceMock) @@ -137,241 +147,84 @@ func Test_handleRollbackBatches(t *testing.T) { LastBatchNumber: 100, } - mockStreamClient.On("IsStarted").Return(true).Once() - mockStreamClient.On("ResetProcessEntryFunc").Return().Once() - mockStreamClient.On("SetProcessEntryFunc", mock.Anything).Return().Once() - mockStreamClient.On("ExecCommandStop").Return(nil).Once() - mockStreamClient.On("Start").Return(nil).Once() - mockStreamClient.On("ExecCommandStartBookmark", mock.Anything).Return(nil).Once() mockEtherman.On("GetLatestVerifiedBatchNum").Return(uint64(90), nil).Once() - mockState.On("DeleteBatchesNewerThanBatchNumber", mock.Anything, rollbackData.LastBatchNumber, nil).Return(nil).Once() - mockState.On("DeleteBatchesOlderThanBatchNumber", mock.Anything, rollbackData.LastBatchNumber, nil).Return(nil).Once() - mockState.On("DeleteUngeneratedProofs", mock.Anything, nil).Return(nil).Once() - mockState.On("DeleteGeneratedProofs", mock.Anything, rollbackData.LastBatchNumber+1, mock.AnythingOfType("uint64"), nil).Return(nil).Once() + mockState.On("DeleteUngeneratedProofs", mock.Anything, mock.Anything).Return(nil).Once() + mockState.On("DeleteGeneratedProofs", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() a := Aggregator{ - ctx: context.Background(), - streamClient: mockStreamClient, - etherman: mockEtherman, - state: mockState, - logger: log.GetDefaultLogger(), - halted: atomic.Bool{}, - streamClientMutex: &sync.Mutex{}, - currentBatchStreamData: []byte{}, - currentStreamBatchRaw: state.BatchRawV2{}, - currentStreamL2Block: state.L2BlockRaw{}, + ctx: context.Background(), + etherman: mockEtherman, + state: mockState, + logger: log.GetDefaultLogger(), + halted: atomic.Bool{}, } a.halted.Store(false) a.handleRollbackBatches(rollbackData) assert.False(t, a.halted.Load()) - mockStreamClient.AssertExpectations(t) mockEtherman.AssertExpectations(t) mockState.AssertExpectations(t) } -func Test_handleReceivedDataStream_BatchStart(t *testing.T) { +func Test_handleRollbackBatchesHalt(t *testing.T) { t.Parallel() + mockEtherman := new(mocks.EthermanMock) mockState := new(mocks.StateInterfaceMock) - mockL1Syncr := new(mocks.SynchronizerInterfaceMock) - agg := Aggregator{ - state: mockState, - l1Syncr: mockL1Syncr, - logger: log.GetDefaultLogger(), - halted: atomic.Bool{}, - currentStreamBatch: state.Batch{}, - } - - // Prepare a FileEntry for Batch Start - batchStartData, err := proto.Marshal(&datastream.BatchStart{ - Number: 1, - ChainId: 2, - ForkId: 3, - Type: datastream.BatchType_BATCH_TYPE_REGULAR, - }) - assert.NoError(t, err) - - batchStartEntry := &datastreamer.FileEntry{ - Type: datastreamer.EntryType(datastream.EntryType_ENTRY_TYPE_BATCH_START), - Data: batchStartData, - } - - // Test the handleReceivedDataStream for Batch Start - err = agg.handleReceivedDataStream(batchStartEntry, nil, nil) - assert.NoError(t, err) - - assert.Equal(t, agg.currentStreamBatch.BatchNumber, uint64(1)) - assert.Equal(t, agg.currentStreamBatch.ChainID, uint64(2)) - assert.Equal(t, agg.currentStreamBatch.ForkID, uint64(3)) - assert.Equal(t, agg.currentStreamBatch.Type, datastream.BatchType_BATCH_TYPE_REGULAR) -} -func Test_handleReceivedDataStream_BatchEnd(t *testing.T) { - t.Parallel() + mockEtherman.On("GetLatestVerifiedBatchNum").Return(uint64(110), nil).Once() + mockState.On("DeleteUngeneratedProofs", mock.Anything, mock.Anything).Return(nil).Once() + mockState.On("DeleteGeneratedProofs", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() - mockState := new(mocks.StateInterfaceMock) - mockL1Syncr := new(mocks.SynchronizerInterfaceMock) - a := Aggregator{ - state: mockState, - l1Syncr: mockL1Syncr, - logger: log.GetDefaultLogger(), - halted: atomic.Bool{}, - currentStreamBatch: state.Batch{ - BatchNumber: uint64(2), - Type: datastream.BatchType_BATCH_TYPE_REGULAR, - Coinbase: common.Address{}, - }, - currentStreamL2Block: state.L2BlockRaw{ - BlockNumber: uint64(10), - }, - currentStreamBatchRaw: state.BatchRawV2{ - Blocks: []state.L2BlockRaw{ - { - BlockNumber: uint64(9), - ChangeL2BlockHeader: state.ChangeL2BlockHeader{}, - Transactions: []state.L2TxRaw{}, - }, - }, - }, - cfg: Config{ - UseL1BatchData: false, - }, + // Test data + rollbackData := synchronizer.RollbackBatchesData{ + LastBatchNumber: 100, } - batchEndData, err := proto.Marshal(&datastream.BatchEnd{ - Number: 1, - LocalExitRoot: []byte{1, 2, 3}, - StateRoot: []byte{4, 5, 6}, - Debug: nil, - }) - assert.NoError(t, err) - - batchEndEntry := &datastreamer.FileEntry{ - Type: datastreamer.EntryType(datastream.EntryType_ENTRY_TYPE_BATCH_END), - Data: batchEndData, + a := Aggregator{ + ctx: context.Background(), + etherman: mockEtherman, + state: mockState, + logger: log.GetDefaultLogger(), + halted: atomic.Bool{}, } - mockState.On("GetBatch", mock.Anything, a.currentStreamBatch.BatchNumber-1, nil). - Return(&state.DBBatch{ - Batch: state.Batch{ - AccInputHash: common.Hash{}, - }, - }, nil).Once() - mockState.On("GetBatch", mock.Anything, a.currentStreamBatch.BatchNumber, nil). - Return(&state.DBBatch{ - Witness: []byte("test_witness"), - }, nil).Once() - mockState.On("AddBatch", mock.Anything, mock.Anything, nil).Return(nil).Once() - mockL1Syncr.On("GetVirtualBatchByBatchNumber", mock.Anything, a.currentStreamBatch.BatchNumber). - Return(&synchronizer.VirtualBatch{BatchL2Data: []byte{1, 2, 3}}, nil).Once() - mockL1Syncr.On("GetSequenceByBatchNumber", mock.Anything, a.currentStreamBatch.BatchNumber). - Return(&synchronizer.SequencedBatches{ - L1InfoRoot: common.Hash{}, - Timestamp: time.Now(), - }, nil).Once() - - err = a.handleReceivedDataStream(batchEndEntry, nil, nil) - assert.NoError(t, err) - - assert.Equal(t, a.currentBatchStreamData, []byte{}) - assert.Equal(t, a.currentStreamBatchRaw, state.BatchRawV2{Blocks: make([]state.L2BlockRaw, 0)}) - assert.Equal(t, a.currentStreamL2Block, state.L2BlockRaw{}) + a.halted.Store(false) + go a.handleRollbackBatches(rollbackData) + time.Sleep(3 * time.Second) - mockState.AssertExpectations(t) - mockL1Syncr.AssertExpectations(t) + assert.True(t, a.halted.Load()) + mockEtherman.AssertExpectations(t) } -func Test_handleReceivedDataStream_L2Block(t *testing.T) { +func Test_handleRollbackBatchesError(t *testing.T) { t.Parallel() - a := Aggregator{ - currentStreamL2Block: state.L2BlockRaw{ - BlockNumber: uint64(9), - }, - currentStreamBatchRaw: state.BatchRawV2{ - Blocks: []state.L2BlockRaw{}, - }, - currentStreamBatch: state.Batch{}, - } - - // Mock data for L2Block - l2Block := &datastream.L2Block{ - Number: uint64(10), - DeltaTimestamp: uint32(5), - L1InfotreeIndex: uint32(1), - Coinbase: []byte{0x01}, - GlobalExitRoot: []byte{0x02}, - } + mockEtherman := new(mocks.EthermanMock) + mockState := new(mocks.StateInterfaceMock) - l2BlockData, err := proto.Marshal(l2Block) - assert.NoError(t, err) + mockEtherman.On("GetLatestVerifiedBatchNum").Return(uint64(110), fmt.Errorf("error")).Once() - l2BlockEntry := &datastreamer.FileEntry{ - Type: datastreamer.EntryType(datastream.EntryType_ENTRY_TYPE_L2_BLOCK), - Data: l2BlockData, + // Test data + rollbackData := synchronizer.RollbackBatchesData{ + LastBatchNumber: 100, } - err = a.handleReceivedDataStream(l2BlockEntry, nil, nil) - assert.NoError(t, err) - - assert.Equal(t, uint64(10), a.currentStreamL2Block.BlockNumber) - assert.Equal(t, uint32(5), a.currentStreamL2Block.ChangeL2BlockHeader.DeltaTimestamp) - assert.Equal(t, uint32(1), a.currentStreamL2Block.ChangeL2BlockHeader.IndexL1InfoTree) - assert.Equal(t, 0, len(a.currentStreamL2Block.Transactions)) - assert.Equal(t, uint32(1), a.currentStreamBatch.L1InfoTreeIndex) - assert.Equal(t, common.BytesToAddress([]byte{0x01}), a.currentStreamBatch.Coinbase) - assert.Equal(t, common.BytesToHash([]byte{0x02}), a.currentStreamBatch.GlobalExitRoot) -} - -func Test_handleReceivedDataStream_Transaction(t *testing.T) { - t.Parallel() - a := Aggregator{ - currentStreamL2Block: state.L2BlockRaw{ - Transactions: []state.L2TxRaw{}, - }, - logger: log.GetDefaultLogger(), + ctx: context.Background(), + etherman: mockEtherman, + state: mockState, + logger: log.GetDefaultLogger(), + halted: atomic.Bool{}, } - tx := ethTypes.NewTransaction( - 0, - common.HexToAddress("0x01"), - big.NewInt(1000000000000000000), - uint64(21000), - big.NewInt(20000000000), - nil, - ) - - // Encode transaction into RLP format - var buf bytes.Buffer - err := tx.EncodeRLP(&buf) - require.NoError(t, err, "Failed to encode transaction") - - transaction := &datastream.Transaction{ - L2BlockNumber: uint64(10), - Index: uint64(0), - IsValid: true, - Encoded: buf.Bytes(), - EffectiveGasPricePercentage: uint32(90), - } - - transactionData, err := proto.Marshal(transaction) - assert.NoError(t, err) - - transactionEntry := &datastreamer.FileEntry{ - Type: datastreamer.EntryType(datastream.EntryType_ENTRY_TYPE_TRANSACTION), - Data: transactionData, - } - - err = a.handleReceivedDataStream(transactionEntry, nil, nil) - assert.NoError(t, err) + a.halted.Store(false) + go a.handleRollbackBatches(rollbackData) + time.Sleep(3 * time.Second) - assert.Len(t, a.currentStreamL2Block.Transactions, 1) - assert.Equal(t, uint8(90), a.currentStreamL2Block.Transactions[0].EfficiencyPercentage) - assert.False(t, a.currentStreamL2Block.Transactions[0].TxAlreadyEncoded) - assert.NotNil(t, a.currentStreamL2Block.Transactions[0].Tx) + assert.True(t, a.halted.Load()) + mockEtherman.AssertExpectations(t) } func Test_sendFinalProofSuccess(t *testing.T) { @@ -403,18 +256,13 @@ func Test_sendFinalProofSuccess(t *testing.T) { } a.cfg = cfg - m.stateMock.On("GetBatch", mock.Anything, batchNumFinal, nil).Run(func(args mock.Arguments) { - }).Return(&state.DBBatch{ - Batch: state.Batch{ - LocalExitRoot: common.Hash{}, - StateRoot: common.Hash{}, - }, - }, nil).Once() + batch := rpctypes.NewRPCBatch(batchNumFinal, common.Hash{}, []string{}, []byte{}, common.Hash{}, common.Hash{}, common.Hash{}, common.Address{}, false) + m.rpcMock.On("GetBatch", batchNumFinal).Return(batch, nil) m.etherman.On("GetRollupId").Return(uint32(1)).Once() testHash := common.BytesToHash([]byte("test hash")) - m.aggLayerClientMock.On("SendTx", mock.Anything).Return(testHash, nil).Once() - m.aggLayerClientMock.On("WaitTxToBeMined", testHash, mock.Anything).Return(nil).Once() + m.aggLayerClientMock.On("SendTx", mock.Anything).Return(testHash, nil) + m.aggLayerClientMock.On("WaitTxToBeMined", testHash, mock.Anything).Return(nil) }, asserts: func(a *Aggregator) { assert.False(a.verifyingProof) @@ -433,13 +281,8 @@ func Test_sendFinalProofSuccess(t *testing.T) { } a.cfg = cfg - m.stateMock.On("GetBatch", mock.Anything, batchNumFinal, nil).Run(func(args mock.Arguments) { - }).Return(&state.DBBatch{ - Batch: state.Batch{ - LocalExitRoot: common.Hash{}, - StateRoot: common.Hash{}, - }, - }, nil).Once() + batch := rpctypes.NewRPCBatch(batchNumFinal, common.Hash{}, []string{}, []byte{}, common.Hash{}, common.Hash{}, common.Hash{}, common.Address{}, false) + m.rpcMock.On("GetBatch", batchNumFinal).Return(batch, nil) m.etherman.On("BuildTrustedVerifyBatchesTxData", batchNum-1, batchNumFinal, mock.Anything, common.HexToAddress(senderAddr)).Return(&toAddr, data, nil).Once() m.ethTxManager.On("Add", mock.Anything, &toAddr, big.NewInt(0), data, a.cfg.GasOffset, (*ethTypes.BlobTxSidecar)(nil)).Return(nil, nil).Once() @@ -458,6 +301,7 @@ func Test_sendFinalProofSuccess(t *testing.T) { ethTxManager := mocks.NewEthTxManagerClientMock(t) etherman := mocks.NewEthermanMock(t) aggLayerClient := mocks.NewAgglayerClientInterfaceMock(t) + rpcMock := mocks.NewRPCInterfaceMock(t) curve := elliptic.P256() privateKey, err := ecdsa.GenerateKey(curve, rand.Reader) @@ -474,6 +318,7 @@ func Test_sendFinalProofSuccess(t *testing.T) { stateDBMutex: &sync.Mutex{}, timeSendFinalProofMutex: &sync.RWMutex{}, sequencerPrivateKey: privateKey, + rpcClient: rpcMock, } a.ctx, a.exit = context.WithCancel(context.Background()) @@ -482,6 +327,7 @@ func Test_sendFinalProofSuccess(t *testing.T) { ethTxManager: ethTxManager, etherman: etherman, aggLayerClientMock: aggLayerClient, + rpcMock: rpcMock, } if tc.setup != nil { tc.setup(m, &a) @@ -532,7 +378,7 @@ func Test_sendFinalProofError(t *testing.T) { { name: "Failed to settle on Agglayer: GetBatch error", setup: func(m mox, a *Aggregator) { - m.stateMock.On("GetBatch", mock.Anything, batchNumFinal, nil).Run(func(args mock.Arguments) { + m.rpcMock.On("GetBatch", batchNumFinal).Run(func(args mock.Arguments) { // test is done, stop the sendFinalProof method fmt.Println("Stopping sendFinalProof") a.exit() @@ -550,13 +396,8 @@ func Test_sendFinalProofError(t *testing.T) { } a.cfg = cfg - m.stateMock.On("GetBatch", mock.Anything, batchNumFinal, nil).Run(func(args mock.Arguments) { - }).Return(&state.DBBatch{ - Batch: state.Batch{ - LocalExitRoot: common.Hash{}, - StateRoot: common.Hash{}, - }, - }, nil).Once() + batch := rpctypes.NewRPCBatch(batchNumFinal, common.Hash{}, []string{}, []byte{}, common.Hash{}, common.Hash{}, common.Hash{}, common.Address{}, false) + m.rpcMock.On("GetBatch", batchNumFinal).Return(batch, nil) m.etherman.On("GetRollupId").Return(uint32(1)).Once() m.aggLayerClientMock.On("SendTx", mock.Anything).Run(func(args mock.Arguments) { @@ -579,13 +420,8 @@ func Test_sendFinalProofError(t *testing.T) { } a.cfg = cfg - m.stateMock.On("GetBatch", mock.Anything, batchNumFinal, nil).Run(func(args mock.Arguments) { - }).Return(&state.DBBatch{ - Batch: state.Batch{ - LocalExitRoot: common.Hash{}, - StateRoot: common.Hash{}, - }, - }, nil).Once() + batch := rpctypes.NewRPCBatch(batchNumFinal, common.Hash{}, []string{}, []byte{}, common.Hash{}, common.Hash{}, common.Hash{}, common.Address{}, false) + m.rpcMock.On("GetBatch", batchNumFinal).Return(batch, nil) m.etherman.On("GetRollupId").Return(uint32(1)).Once() m.aggLayerClientMock.On("SendTx", mock.Anything).Return(common.Hash{}, nil).Once() @@ -608,13 +444,8 @@ func Test_sendFinalProofError(t *testing.T) { } a.cfg = cfg - m.stateMock.On("GetBatch", mock.Anything, batchNumFinal, nil).Run(func(args mock.Arguments) { - }).Return(&state.DBBatch{ - Batch: state.Batch{ - LocalExitRoot: common.Hash{}, - StateRoot: common.Hash{}, - }, - }, nil).Once() + batch := rpctypes.NewRPCBatch(batchNumFinal, common.Hash{}, []string{}, []byte{}, common.Hash{}, common.Hash{}, common.Hash{}, common.Address{}, false) + m.rpcMock.On("GetBatch", batchNumFinal).Return(batch, nil) m.etherman.On("BuildTrustedVerifyBatchesTxData", batchNum-1, batchNumFinal, mock.Anything, sender).Run(func(args mock.Arguments) { fmt.Println("Stopping sendFinalProof") @@ -636,13 +467,8 @@ func Test_sendFinalProofError(t *testing.T) { } a.cfg = cfg - m.stateMock.On("GetBatch", mock.Anything, batchNumFinal, nil).Run(func(args mock.Arguments) { - }).Return(&state.DBBatch{ - Batch: state.Batch{ - LocalExitRoot: common.Hash{}, - StateRoot: common.Hash{}, - }, - }, nil).Once() + batch := rpctypes.NewRPCBatch(batchNumFinal, common.Hash{}, []string{}, []byte{}, common.Hash{}, common.Hash{}, common.Hash{}, common.Address{}, false) + m.rpcMock.On("GetBatch", batchNumFinal).Return(batch, nil) m.etherman.On("BuildTrustedVerifyBatchesTxData", batchNum-1, batchNumFinal, mock.Anything, sender).Return(nil, nil, nil).Once() m.ethTxManager.On("Add", mock.Anything, mock.Anything, big.NewInt(0), mock.Anything, a.cfg.GasOffset, (*ethTypes.BlobTxSidecar)(nil)).Run(func(args mock.Arguments) { @@ -664,6 +490,7 @@ func Test_sendFinalProofError(t *testing.T) { ethTxManager := mocks.NewEthTxManagerClientMock(t) etherman := mocks.NewEthermanMock(t) aggLayerClient := mocks.NewAgglayerClientInterfaceMock(t) + rpcMock := mocks.NewRPCInterfaceMock(t) curve := elliptic.P256() privateKey, err := ecdsa.GenerateKey(curve, rand.Reader) @@ -680,6 +507,7 @@ func Test_sendFinalProofError(t *testing.T) { stateDBMutex: &sync.Mutex{}, timeSendFinalProofMutex: &sync.RWMutex{}, sequencerPrivateKey: privateKey, + rpcClient: rpcMock, } a.ctx, a.exit = context.WithCancel(context.Background()) @@ -688,6 +516,7 @@ func Test_sendFinalProofError(t *testing.T) { ethTxManager: ethTxManager, etherman: etherman, aggLayerClientMock: aggLayerClient, + rpcMock: rpcMock, } if tc.setup != nil { tc.setup(m, &a) @@ -760,19 +589,13 @@ func Test_buildFinalProof(t *testing.T) { }, } - finalDBBatch := &state.DBBatch{ - Batch: state.Batch{ - StateRoot: common.BytesToHash([]byte("mock StateRoot")), - LocalExitRoot: common.BytesToHash([]byte("mock LocalExitRoot")), - }, - } - m.proverMock.On("Name").Return("name").Once() m.proverMock.On("ID").Return("id").Once() m.proverMock.On("Addr").Return("addr").Once() m.proverMock.On("FinalProof", recursiveProof.Proof, a.cfg.SenderAddress).Return(&finalProofID, nil).Once() m.proverMock.On("WaitFinalProof", mock.Anything, finalProofID).Return(&finalProof, nil).Once() - m.stateMock.On("GetBatch", mock.Anything, batchNumFinal, nil).Return(finalDBBatch, nil).Once() + finalBatch := rpctypes.NewRPCBatch(batchNumFinal, common.Hash{}, []string{}, []byte{}, common.Hash{}, common.BytesToHash([]byte("mock LocalExitRoot")), common.BytesToHash([]byte("mock StateRoot")), common.Address{}, false) + m.rpcMock.On("GetBatch", batchNumFinal).Return(finalBatch, nil).Once() }, asserts: func(err error, fProof *prover.FinalProof) { assert.NoError(err) @@ -789,9 +612,11 @@ func Test_buildFinalProof(t *testing.T) { t.Run(tc.name, func(t *testing.T) { proverMock := mocks.NewProverInterfaceMock(t) stateMock := mocks.NewStateInterfaceMock(t) + rpcMock := mocks.NewRPCInterfaceMock(t) m := mox{ proverMock: proverMock, stateMock: stateMock, + rpcMock: rpcMock, } a := Aggregator{ state: stateMock, @@ -799,6 +624,7 @@ func Test_buildFinalProof(t *testing.T) { cfg: Config{ SenderAddress: common.BytesToAddress([]byte("from")).Hex(), }, + rpcClient: rpcMock, } tc.setup(m, &a) @@ -1594,13 +1420,19 @@ func Test_tryGenerateBatchProof(t *testing.T) { TxProfitabilityCheckerType: ProfitabilityAcceptAll, SenderAddress: from.Hex(), IntervalAfterWhichBatchConsolidateAnyway: types.Duration{Duration: time.Second * 1}, + ChainID: uint64(1), + ForkId: uint64(12), } lastVerifiedBatchNum := uint64(22) + batchNum := uint64(23) + batchToProve := state.Batch{ BatchNumber: batchNum, } + proofID := "proofId" + proverName := "proverName" proverID := "proverID" recursiveProof := "recursiveProof" @@ -1609,6 +1441,7 @@ func Test_tryGenerateBatchProof(t *testing.T) { matchProverCtxFn := func(ctx context.Context) bool { return ctx.Value("owner") == ownerProver } matchAggregatorCtxFn := func(ctx context.Context) bool { return ctx.Value("owner") == ownerAggregator } fixedTimestamp := time.Date(2023, 10, 13, 15, 0, 0, 0, time.UTC) + l1InfoTreeLeaf := []synchronizer.L1InfoTreeLeaf{ { GlobalExitRoot: common.Hash{}, @@ -1682,11 +1515,13 @@ func Test_tryGenerateBatchProof(t *testing.T) { ChainID: uint64(1), ForkID: uint64(12), } - dbBatch := state.DBBatch{ - Witness: []byte("witness"), - Batch: batch, + virtualBatch := synchronizer.VirtualBatch{ + BatchNumber: lastVerifiedBatchNum + 1, + BatchL2Data: batchL2Data, + L1InfoRoot: &l1InfoRoot, } + m.synchronizerMock.On("GetVirtualBatchByBatchNumber", mock.Anything, lastVerifiedBatchNum+1).Return(&virtualBatch, nil).Once() m.etherman.On("GetLatestVerifiedBatchNum").Return(lastVerifiedBatchNum, nil).Once() m.stateMock.On("CheckProofExistsForBatch", mock.MatchedBy(matchProverCtxFn), mock.AnythingOfType("uint64"), nil).Return(false, nil).Once() sequence := synchronizer.SequencedBatches{ @@ -1694,7 +1529,12 @@ func Test_tryGenerateBatchProof(t *testing.T) { ToBatchNumber: uint64(20), } m.synchronizerMock.On("GetSequenceByBatchNumber", mock.MatchedBy(matchProverCtxFn), lastVerifiedBatchNum+1).Return(&sequence, nil).Once() - m.stateMock.On("GetBatch", mock.MatchedBy(matchProverCtxFn), lastVerifiedBatchNum+1, nil).Return(&dbBatch, nil).Once() + + rpcBatch := rpctypes.NewRPCBatch(lastVerifiedBatchNum+1, common.Hash{}, []string{}, batchL2Data, common.Hash{}, common.BytesToHash([]byte("mock LocalExitRoot")), common.BytesToHash([]byte("mock StateRoot")), common.Address{}, false) + rpcBatch.SetLastL2BLockTimestamp(uint64(time.Now().Unix())) + m.rpcMock.On("GetBatch", lastVerifiedBatchNum+1).Return(rpcBatch, nil) + m.rpcMock.On("GetWitness", lastVerifiedBatchNum+1, false).Return([]byte("witness"), nil) + m.stateMock.On("AddSequence", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Return(nil).Once() m.stateMock.On("AddGeneratedProof", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Run( func(args mock.Arguments) { @@ -1716,13 +1556,8 @@ func Test_tryGenerateBatchProof(t *testing.T) { }, }, nil).Twice() - oldDBBatch := state.DBBatch{ - Batch: state.Batch{ - AccInputHash: common.Hash{}, - }, - } - m.stateMock.On("GetBatch", mock.Anything, lastVerifiedBatchNum, nil).Return(&oldDBBatch, nil).Twice() - expectedInputProver, err := a.buildInputProver(context.Background(), &batch, dbBatch.Witness) + m.rpcMock.On("GetBatch", lastVerifiedBatchNum).Return(rpcBatch, nil) + expectedInputProver, err := a.buildInputProver(context.Background(), &batch, []byte("witness")) require.NoError(err) m.proverMock.On("BatchProof", expectedInputProver).Return(nil, errTest).Once() @@ -1733,7 +1568,6 @@ func Test_tryGenerateBatchProof(t *testing.T) { assert.ErrorIs(err, errTest) }, }, - //nolint:dupl { name: "WaitRecursiveProof prover error", setup: func(m mox, a *Aggregator) { @@ -1753,11 +1587,15 @@ func Test_tryGenerateBatchProof(t *testing.T) { ChainID: uint64(1), ForkID: uint64(12), } - dbBatch := state.DBBatch{ - Witness: []byte("witness"), - Batch: batch, + + virtualBatch := synchronizer.VirtualBatch{ + BatchNumber: lastVerifiedBatchNum + 1, + BatchL2Data: batchL2Data, + L1InfoRoot: &l1InfoRoot, } + m.synchronizerMock.On("GetVirtualBatchByBatchNumber", mock.Anything, lastVerifiedBatchNum+1).Return(&virtualBatch, nil).Once() + m.etherman.On("GetLatestVerifiedBatchNum").Return(lastVerifiedBatchNum, nil).Once() m.stateMock.On("CheckProofExistsForBatch", mock.MatchedBy(matchProverCtxFn), mock.AnythingOfType("uint64"), nil).Return(false, nil).Once() sequence := synchronizer.SequencedBatches{ @@ -1765,7 +1603,10 @@ func Test_tryGenerateBatchProof(t *testing.T) { ToBatchNumber: uint64(20), } m.synchronizerMock.On("GetSequenceByBatchNumber", mock.MatchedBy(matchProverCtxFn), lastVerifiedBatchNum+1).Return(&sequence, nil).Once() - m.stateMock.On("GetBatch", mock.MatchedBy(matchProverCtxFn), lastVerifiedBatchNum+1, nil).Return(&dbBatch, nil).Once() + rpcBatch := rpctypes.NewRPCBatch(lastVerifiedBatchNum+1, common.Hash{}, []string{}, batchL2Data, common.Hash{}, common.BytesToHash([]byte("mock LocalExitRoot")), common.BytesToHash([]byte("mock StateRoot")), common.Address{}, false) + rpcBatch.SetLastL2BLockTimestamp(uint64(time.Now().Unix())) + m.rpcMock.On("GetBatch", lastVerifiedBatchNum+1).Return(rpcBatch, nil) + m.rpcMock.On("GetWitness", lastVerifiedBatchNum+1, false).Return([]byte("witness"), nil) m.stateMock.On("AddSequence", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Return(nil).Once() m.stateMock.On("AddGeneratedProof", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Run( func(args mock.Arguments) { @@ -1788,13 +1629,8 @@ func Test_tryGenerateBatchProof(t *testing.T) { }, }, nil).Twice() - oldDBBatch := state.DBBatch{ - Batch: state.Batch{ - AccInputHash: common.Hash{}, - }, - } - m.stateMock.On("GetBatch", mock.Anything, lastVerifiedBatchNum, nil).Return(&oldDBBatch, nil).Twice() - expectedInputProver, err := a.buildInputProver(context.Background(), &batch, dbBatch.Witness) + m.rpcMock.On("GetBatch", lastVerifiedBatchNum).Return(rpcBatch, nil).Twice() + expectedInputProver, err := a.buildInputProver(context.Background(), &batch, []byte("witness")) require.NoError(err) m.proverMock.On("BatchProof", expectedInputProver).Return(&proofID, nil).Once() @@ -1805,8 +1641,7 @@ func Test_tryGenerateBatchProof(t *testing.T) { assert.False(result) assert.ErrorIs(err, errTest) }, - }, //nolint:dupl - //nolint:dupl + }, { name: "DeleteBatchProofs error after WaitRecursiveProof prover error", setup: func(m mox, a *Aggregator) { @@ -1826,10 +1661,6 @@ func Test_tryGenerateBatchProof(t *testing.T) { ChainID: uint64(1), ForkID: uint64(12), } - dbBatch := state.DBBatch{ - Witness: []byte("witness"), - Batch: batch, - } m.etherman.On("GetLatestVerifiedBatchNum").Return(lastVerifiedBatchNum, nil).Once() m.stateMock.On("CheckProofExistsForBatch", mock.MatchedBy(matchProverCtxFn), mock.AnythingOfType("uint64"), nil).Return(false, nil).Once() @@ -1838,7 +1669,9 @@ func Test_tryGenerateBatchProof(t *testing.T) { ToBatchNumber: uint64(20), } m.synchronizerMock.On("GetSequenceByBatchNumber", mock.MatchedBy(matchProverCtxFn), lastVerifiedBatchNum+1).Return(&sequence, nil).Once() - m.stateMock.On("GetBatch", mock.MatchedBy(matchProverCtxFn), lastVerifiedBatchNum+1, nil).Return(&dbBatch, nil).Once() + rpcBatch := rpctypes.NewRPCBatch(lastVerifiedBatchNum+1, common.Hash{}, []string{}, batchL2Data, common.Hash{}, common.BytesToHash([]byte("mock LocalExitRoot")), common.BytesToHash([]byte("mock StateRoot")), common.Address{}, false) + rpcBatch.SetLastL2BLockTimestamp(uint64(time.Now().Unix())) + m.rpcMock.On("GetBatch", lastVerifiedBatchNum+1).Return(rpcBatch, nil).Once() m.stateMock.On("AddSequence", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Return(nil).Once() m.stateMock.On("AddGeneratedProof", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Run( func(args mock.Arguments) { @@ -1861,15 +1694,20 @@ func Test_tryGenerateBatchProof(t *testing.T) { }, }, nil).Twice() - oldDBBatch := state.DBBatch{ - Batch: state.Batch{ - AccInputHash: common.Hash{}, - }, - } - m.stateMock.On("GetBatch", mock.Anything, lastVerifiedBatchNum, nil).Return(&oldDBBatch, nil).Twice() - expectedInputProver, err := a.buildInputProver(context.Background(), &batch, dbBatch.Witness) + m.rpcMock.On("GetBatch", lastVerifiedBatchNum).Return(rpcBatch, nil).Twice() + expectedInputProver, err := a.buildInputProver(context.Background(), &batch, []byte("witness")) require.NoError(err) + m.rpcMock.On("GetWitness", lastVerifiedBatchNum+1, false).Return([]byte("witness"), nil) + + virtualBatch := synchronizer.VirtualBatch{ + BatchNumber: lastVerifiedBatchNum + 1, + BatchL2Data: batchL2Data, + L1InfoRoot: &l1InfoRoot, + } + + m.synchronizerMock.On("GetVirtualBatchByBatchNumber", mock.Anything, lastVerifiedBatchNum+1).Return(&virtualBatch, nil).Once() + m.proverMock.On("BatchProof", expectedInputProver).Return(&proofID, nil).Once() m.proverMock.On("WaitRecursiveProof", mock.MatchedBy(matchProverCtxFn), proofID).Return("", common.Hash{}, errTest).Once() m.stateMock.On("DeleteGeneratedProofs", mock.MatchedBy(matchAggregatorCtxFn), batchToProve.BatchNumber, batchToProve.BatchNumber, nil).Return(errTest).Once() @@ -1878,7 +1716,7 @@ func Test_tryGenerateBatchProof(t *testing.T) { assert.False(result) assert.ErrorIs(err, errTest) }, - }, //nolint:dupl + }, { name: "not time to send final ok", setup: func(m mox, a *Aggregator) { @@ -1899,10 +1737,6 @@ func Test_tryGenerateBatchProof(t *testing.T) { ChainID: uint64(1), ForkID: uint64(12), } - dbBatch := state.DBBatch{ - Witness: []byte("witness"), - Batch: batch, - } m.etherman.On("GetLatestVerifiedBatchNum").Return(lastVerifiedBatchNum, nil).Once() m.stateMock.On("CheckProofExistsForBatch", mock.MatchedBy(matchProverCtxFn), mock.AnythingOfType("uint64"), nil).Return(false, nil).Once() @@ -1911,7 +1745,7 @@ func Test_tryGenerateBatchProof(t *testing.T) { ToBatchNumber: uint64(20), } m.synchronizerMock.On("GetSequenceByBatchNumber", mock.MatchedBy(matchProverCtxFn), lastVerifiedBatchNum+1).Return(&sequence, nil).Once() - m.stateMock.On("GetBatch", mock.MatchedBy(matchProverCtxFn), lastVerifiedBatchNum+1, nil).Return(&dbBatch, nil).Once() + m.stateMock.On("AddSequence", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Return(nil).Once() m.stateMock.On("AddGeneratedProof", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Run( func(args mock.Arguments) { @@ -1934,13 +1768,23 @@ func Test_tryGenerateBatchProof(t *testing.T) { }, }, nil).Twice() - oldDBBatch := state.DBBatch{ - Batch: state.Batch{ - AccInputHash: common.Hash{}, - }, + rpcBatch := rpctypes.NewRPCBatch(lastVerifiedBatchNum, common.Hash{}, []string{}, batchL2Data, common.Hash{}, common.BytesToHash([]byte("mock LocalExitRoot")), common.BytesToHash([]byte("mock StateRoot")), common.Address{}, false) + rpcBatch.SetLastL2BLockTimestamp(uint64(time.Now().Unix())) + rpcBatch2 := rpctypes.NewRPCBatch(lastVerifiedBatchNum+1, common.Hash{}, []string{}, batchL2Data, common.Hash{}, common.BytesToHash([]byte("mock LocalExitRoot")), common.BytesToHash([]byte("mock StateRoot")), common.Address{}, false) + rpcBatch2.SetLastL2BLockTimestamp(uint64(time.Now().Unix())) + m.rpcMock.On("GetBatch", lastVerifiedBatchNum).Return(rpcBatch, nil) + m.rpcMock.On("GetBatch", lastVerifiedBatchNum+1).Return(rpcBatch2, nil) + m.rpcMock.On("GetWitness", lastVerifiedBatchNum+1, false).Return([]byte("witness"), nil) + + virtualBatch := synchronizer.VirtualBatch{ + BatchNumber: lastVerifiedBatchNum + 1, + BatchL2Data: batchL2Data, + L1InfoRoot: &l1InfoRoot, } - m.stateMock.On("GetBatch", mock.Anything, lastVerifiedBatchNum, nil).Return(&oldDBBatch, nil).Twice() - expectedInputProver, err := a.buildInputProver(context.Background(), &batch, dbBatch.Witness) + + m.synchronizerMock.On("GetVirtualBatchByBatchNumber", mock.Anything, lastVerifiedBatchNum+1).Return(&virtualBatch, nil).Once() + + expectedInputProver, err := a.buildInputProver(context.Background(), &batch, []byte("witness")) require.NoError(err) m.proverMock.On("BatchProof", expectedInputProver).Return(&proofID, nil).Once() @@ -1987,10 +1831,6 @@ func Test_tryGenerateBatchProof(t *testing.T) { ChainID: uint64(1), ForkID: uint64(12), } - dbBatch := state.DBBatch{ - Witness: []byte("witness"), - Batch: batch, - } m.etherman.On("GetLatestVerifiedBatchNum").Return(lastVerifiedBatchNum, nil).Once() m.stateMock.On("CheckProofExistsForBatch", mock.MatchedBy(matchProverCtxFn), mock.AnythingOfType("uint64"), nil).Return(false, nil).Once() @@ -1999,7 +1839,25 @@ func Test_tryGenerateBatchProof(t *testing.T) { ToBatchNumber: uint64(20), } m.synchronizerMock.On("GetSequenceByBatchNumber", mock.MatchedBy(matchProverCtxFn), lastVerifiedBatchNum+1).Return(&sequence, nil).Once() - m.stateMock.On("GetBatch", mock.MatchedBy(matchProverCtxFn), lastVerifiedBatchNum+1, nil).Return(&dbBatch, nil).Once() + + rpcBatch := rpctypes.NewRPCBatch(lastVerifiedBatchNum, common.Hash{}, []string{}, batchL2Data, common.Hash{}, common.BytesToHash([]byte("mock LocalExitRoot")), common.BytesToHash([]byte("mock StateRoot")), common.Address{}, false) + rpcBatch.SetLastL2BLockTimestamp(uint64(time.Now().Unix())) + rpcBatch2 := rpctypes.NewRPCBatch(lastVerifiedBatchNum+1, common.Hash{}, []string{}, batchL2Data, common.Hash{}, common.BytesToHash([]byte("mock LocalExitRoot")), common.BytesToHash([]byte("mock StateRoot")), common.Address{}, false) + rpcBatch2.SetLastL2BLockTimestamp(uint64(time.Now().Unix())) + m.rpcMock.On("GetBatch", lastVerifiedBatchNum).Return(rpcBatch, nil) + m.rpcMock.On("GetBatch", lastVerifiedBatchNum+1).Return(rpcBatch2, nil) + m.rpcMock.On("GetWitness", lastVerifiedBatchNum+1, false).Return([]byte("witness"), nil) + + virtualBatch := synchronizer.VirtualBatch{ + BatchNumber: lastVerifiedBatchNum + 1, + BatchL2Data: batchL2Data, + L1InfoRoot: &l1InfoRoot, + } + + m.synchronizerMock.On("GetVirtualBatchByBatchNumber", mock.Anything, lastVerifiedBatchNum+1).Return(&virtualBatch, nil).Once() + + m.rpcMock.On("GetWitness", lastVerifiedBatchNum+1, false).Return([]byte("witness"), nil) + m.stateMock.On("AddSequence", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Return(nil).Once() m.stateMock.On("AddGeneratedProof", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Run( func(args mock.Arguments) { @@ -2022,13 +1880,7 @@ func Test_tryGenerateBatchProof(t *testing.T) { }, }, nil).Twice() - oldDBBatch := state.DBBatch{ - Batch: state.Batch{ - AccInputHash: common.Hash{}, - }, - } - m.stateMock.On("GetBatch", mock.Anything, lastVerifiedBatchNum, nil).Return(&oldDBBatch, nil).Twice() - expectedInputProver, err := a.buildInputProver(context.Background(), &batch, dbBatch.Witness) + expectedInputProver, err := a.buildInputProver(context.Background(), &batch, []byte("witness")) require.NoError(err) m.proverMock.On("BatchProof", expectedInputProver).Return(&proofID, nil).Once() @@ -2064,6 +1916,7 @@ func Test_tryGenerateBatchProof(t *testing.T) { etherman := mocks.NewEthermanMock(t) proverMock := mocks.NewProverInterfaceMock(t) synchronizerMock := mocks.NewSynchronizerInterfaceMock(t) + mockRPC := mocks.NewRPCInterfaceMock(t) a := Aggregator{ cfg: cfg, @@ -2077,6 +1930,7 @@ func Test_tryGenerateBatchProof(t *testing.T) { finalProof: make(chan finalProofMsg), profitabilityChecker: NewTxProfitabilityCheckerAcceptAll(stateMock, cfg.IntervalAfterWhichBatchConsolidateAnyway.Duration), l1Syncr: synchronizerMock, + rpcClient: mockRPC, } aggregatorCtx := context.WithValue(context.Background(), "owner", ownerAggregator) //nolint:staticcheck a.ctx, a.exit = context.WithCancel(aggregatorCtx) @@ -2087,6 +1941,7 @@ func Test_tryGenerateBatchProof(t *testing.T) { etherman: etherman, proverMock: proverMock, synchronizerMock: synchronizerMock, + rpcMock: mockRPC, } if tc.setup != nil { tc.setup(m, &a) diff --git a/aggregator/config.go b/aggregator/config.go index fbbc9c9b..cdef80fd 100644 --- a/aggregator/config.go +++ b/aggregator/config.go @@ -112,21 +112,18 @@ type Config struct { // final gas: 1100 GasOffset uint64 `mapstructure:"GasOffset"` + // RPCURL is the URL of the RPC server + RPCURL string `mapstructure:"RPCURL"` + // WitnessURL is the URL of the witness server WitnessURL string `mapstructure:"WitnessURL"` - // UseL1BatchData is a flag to enable the use of L1 batch data in the aggregator - UseL1BatchData bool `mapstructure:"UseL1BatchData"` - // UseFullWitness is a flag to enable the use of full witness in the aggregator UseFullWitness bool `mapstructure:"UseFullWitness"` // DB is the database configuration DB db.Config `mapstructure:"DB"` - // StreamClient is the config for the stream client - StreamClient StreamClientCfg `mapstructure:"StreamClient"` - // EthTxManager is the config for the ethtxmanager EthTxManager ethtxmanager.Config `mapstructure:"EthTxManager"` @@ -149,22 +146,11 @@ type Config struct { // AggLayerURL url of the agglayer service AggLayerURL string `mapstructure:"AggLayerURL"` - // MaxWitnessRetrievalWorkers is the maximum number of workers that will be used to retrieve the witness - MaxWitnessRetrievalWorkers int `mapstructure:"MaxWitnessRetrievalWorkers"` - // SyncModeOnlyEnabled is a flag that activates sync mode exclusively. // When enabled, the aggregator will sync data only from L1 and will not generate or read the data stream. SyncModeOnlyEnabled bool `mapstructure:"SyncModeOnlyEnabled"` } -// StreamClientCfg contains the data streamer's configuration properties -type StreamClientCfg struct { - // Datastream server to connect - Server string `mapstructure:"Server"` - // Log is the log configuration - Log log.Config `mapstructure:"Log"` -} - // newKeyFromKeystore creates a private key from a keystore file func newKeyFromKeystore(cfg types.KeystoreFileConfig) (*ecdsa.PrivateKey, error) { if cfg.Path == "" && cfg.Password == "" { diff --git a/aggregator/db/migrations/0004.sql b/aggregator/db/migrations/0004.sql new file mode 100644 index 00000000..cb186fc0 --- /dev/null +++ b/aggregator/db/migrations/0004.sql @@ -0,0 +1,23 @@ +-- +migrate Down +CREATE TABLE IF NOT EXISTS aggregator.batch ( + batch_num BIGINT NOT NULL, + batch jsonb NOT NULL, + datastream varchar NOT NULL, + PRIMARY KEY (batch_num) +); + +ALTER TABLE aggregator.proof + ADD CONSTRAINT IF NOT EXISTS proof_batch_num_fkey FOREIGN KEY (batch_num) REFERENCES aggregator.batch (batch_num) ON DELETE CASCADE; + +ALTER TABLE aggregator.sequence + ADD CONSTRAINT IF NOT EXISTS sequence_from_batch_num_fkey FOREIGN KEY (from_batch_num) REFERENCES aggregator.batch (batch_num) ON DELETE CASCADE; + + +-- +migrate Up +ALTER TABLE aggregator.proof + DROP CONSTRAINT IF EXISTS proof_batch_num_fkey; + +ALTER TABLE aggregator.sequence + DROP CONSTRAINT IF EXISTS sequence_from_batch_num_fkey; + +DROP TABLE IF EXISTS aggregator.batch; diff --git a/aggregator/interfaces.go b/aggregator/interfaces.go index ee70d07c..81f63d94 100644 --- a/aggregator/interfaces.go +++ b/aggregator/interfaces.go @@ -6,17 +6,21 @@ import ( ethmanTypes "github.com/0xPolygon/cdk/aggregator/ethmantypes" "github.com/0xPolygon/cdk/aggregator/prover" + "github.com/0xPolygon/cdk/rpc/types" "github.com/0xPolygon/cdk/state" "github.com/0xPolygon/zkevm-ethtx-manager/ethtxmanager" ethtxtypes "github.com/0xPolygon/zkevm-ethtx-manager/types" - "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" + ethtypes "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto/kzg4844" "github.com/jackc/pgx/v4" ) // Consumer interfaces required by the package. +type RPCInterface interface { + GetBatch(batchNumber uint64) (*types.RPCBatch, error) + GetWitness(batchNumber uint64, fullWitness bool) ([]byte, error) +} type ProverInterface interface { Name() string @@ -37,9 +41,9 @@ type Etherman interface { BuildTrustedVerifyBatchesTxData( lastVerifiedBatch, newVerifiedBatch uint64, inputs *ethmanTypes.FinalProofInputs, beneficiary common.Address, ) (to *common.Address, data []byte, err error) - GetLatestBlockHeader(ctx context.Context) (*types.Header, error) + GetLatestBlockHeader(ctx context.Context) (*ethtypes.Header, error) GetBatchAccInputHash(ctx context.Context, batchNumber uint64) (common.Hash, error) - HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) + HeaderByNumber(ctx context.Context, number *big.Int) (*ethtypes.Header, error) } // aggregatorTxProfitabilityChecker interface for different profitability @@ -62,26 +66,6 @@ type StateInterface interface { CleanupLockedProofs(ctx context.Context, duration string, dbTx pgx.Tx) (int64, error) CheckProofExistsForBatch(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) (bool, error) AddSequence(ctx context.Context, sequence state.Sequence, dbTx pgx.Tx) error - AddBatch(ctx context.Context, dbBatch *state.DBBatch, dbTx pgx.Tx) error - GetBatch(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) (*state.DBBatch, error) - DeleteBatchesOlderThanBatchNumber(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) error - DeleteBatchesNewerThanBatchNumber(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) error -} - -// StreamClient represents the stream client behaviour -type StreamClient interface { - Start() error - ExecCommandStart(fromEntry uint64) error - ExecCommandStartBookmark(fromBookmark []byte) error - ExecCommandStop() error - ExecCommandGetHeader() (datastreamer.HeaderEntry, error) - ExecCommandGetEntry(fromEntry uint64) (datastreamer.FileEntry, error) - ExecCommandGetBookmark(fromBookmark []byte) (datastreamer.FileEntry, error) - GetFromStream() uint64 - GetTotalEntries() uint64 - SetProcessEntryFunc(f datastreamer.ProcessEntryFunc) - ResetProcessEntryFunc() - IsStarted() bool } // EthTxManagerClient represents the eth tx manager interface @@ -92,7 +76,7 @@ type EthTxManagerClient interface { value *big.Int, data []byte, gasOffset uint64, - sidecar *types.BlobTxSidecar, + sidecar *ethtypes.BlobTxSidecar, ) (common.Hash, error) AddWithGas( ctx context.Context, @@ -100,11 +84,11 @@ type EthTxManagerClient interface { value *big.Int, data []byte, gasOffset uint64, - sidecar *types.BlobTxSidecar, + sidecar *ethtypes.BlobTxSidecar, gas uint64, ) (common.Hash, error) EncodeBlobData(data []byte) (kzg4844.Blob, error) - MakeBlobSidecar(blobs []kzg4844.Blob) *types.BlobTxSidecar + MakeBlobSidecar(blobs []kzg4844.Blob) *ethtypes.BlobTxSidecar ProcessPendingMonitoredTxs(ctx context.Context, resultHandler ethtxmanager.ResultHandler) Remove(ctx context.Context, id common.Hash) error RemoveAll(ctx context.Context) error diff --git a/aggregator/mocks/mock_StreamClient.go b/aggregator/mocks/mock_StreamClient.go deleted file mode 100644 index 7962d31e..00000000 --- a/aggregator/mocks/mock_StreamClient.go +++ /dev/null @@ -1,247 +0,0 @@ -// Code generated by mockery v2.39.0. DO NOT EDIT. - -package mocks - -import ( - datastreamer "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" - mock "github.com/stretchr/testify/mock" -) - -// StreamClientMock is an autogenerated mock type for the StreamClient type -type StreamClientMock struct { - mock.Mock -} - -// ExecCommandGetBookmark provides a mock function with given fields: fromBookmark -func (_m *StreamClientMock) ExecCommandGetBookmark(fromBookmark []byte) (datastreamer.FileEntry, error) { - ret := _m.Called(fromBookmark) - - if len(ret) == 0 { - panic("no return value specified for ExecCommandGetBookmark") - } - - var r0 datastreamer.FileEntry - var r1 error - if rf, ok := ret.Get(0).(func([]byte) (datastreamer.FileEntry, error)); ok { - return rf(fromBookmark) - } - if rf, ok := ret.Get(0).(func([]byte) datastreamer.FileEntry); ok { - r0 = rf(fromBookmark) - } else { - r0 = ret.Get(0).(datastreamer.FileEntry) - } - - if rf, ok := ret.Get(1).(func([]byte) error); ok { - r1 = rf(fromBookmark) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ExecCommandGetEntry provides a mock function with given fields: fromEntry -func (_m *StreamClientMock) ExecCommandGetEntry(fromEntry uint64) (datastreamer.FileEntry, error) { - ret := _m.Called(fromEntry) - - if len(ret) == 0 { - panic("no return value specified for ExecCommandGetEntry") - } - - var r0 datastreamer.FileEntry - var r1 error - if rf, ok := ret.Get(0).(func(uint64) (datastreamer.FileEntry, error)); ok { - return rf(fromEntry) - } - if rf, ok := ret.Get(0).(func(uint64) datastreamer.FileEntry); ok { - r0 = rf(fromEntry) - } else { - r0 = ret.Get(0).(datastreamer.FileEntry) - } - - if rf, ok := ret.Get(1).(func(uint64) error); ok { - r1 = rf(fromEntry) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ExecCommandGetHeader provides a mock function with given fields: -func (_m *StreamClientMock) ExecCommandGetHeader() (datastreamer.HeaderEntry, error) { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for ExecCommandGetHeader") - } - - var r0 datastreamer.HeaderEntry - var r1 error - if rf, ok := ret.Get(0).(func() (datastreamer.HeaderEntry, error)); ok { - return rf() - } - if rf, ok := ret.Get(0).(func() datastreamer.HeaderEntry); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(datastreamer.HeaderEntry) - } - - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ExecCommandStart provides a mock function with given fields: fromEntry -func (_m *StreamClientMock) ExecCommandStart(fromEntry uint64) error { - ret := _m.Called(fromEntry) - - if len(ret) == 0 { - panic("no return value specified for ExecCommandStart") - } - - var r0 error - if rf, ok := ret.Get(0).(func(uint64) error); ok { - r0 = rf(fromEntry) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// ExecCommandStartBookmark provides a mock function with given fields: fromBookmark -func (_m *StreamClientMock) ExecCommandStartBookmark(fromBookmark []byte) error { - ret := _m.Called(fromBookmark) - - if len(ret) == 0 { - panic("no return value specified for ExecCommandStartBookmark") - } - - var r0 error - if rf, ok := ret.Get(0).(func([]byte) error); ok { - r0 = rf(fromBookmark) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// ExecCommandStop provides a mock function with given fields: -func (_m *StreamClientMock) ExecCommandStop() error { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for ExecCommandStop") - } - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// GetFromStream provides a mock function with given fields: -func (_m *StreamClientMock) GetFromStream() uint64 { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for GetFromStream") - } - - var r0 uint64 - if rf, ok := ret.Get(0).(func() uint64); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(uint64) - } - - return r0 -} - -// GetTotalEntries provides a mock function with given fields: -func (_m *StreamClientMock) GetTotalEntries() uint64 { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for GetTotalEntries") - } - - var r0 uint64 - if rf, ok := ret.Get(0).(func() uint64); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(uint64) - } - - return r0 -} - -// IsStarted provides a mock function with given fields: -func (_m *StreamClientMock) IsStarted() bool { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for IsStarted") - } - - var r0 bool - if rf, ok := ret.Get(0).(func() bool); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(bool) - } - - return r0 -} - -// ResetProcessEntryFunc provides a mock function with given fields: -func (_m *StreamClientMock) ResetProcessEntryFunc() { - _m.Called() -} - -// SetProcessEntryFunc provides a mock function with given fields: f -func (_m *StreamClientMock) SetProcessEntryFunc(f datastreamer.ProcessEntryFunc) { - _m.Called(f) -} - -// Start provides a mock function with given fields: -func (_m *StreamClientMock) Start() error { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for Start") - } - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// NewStreamClientMock creates a new instance of StreamClientMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewStreamClientMock(t interface { - mock.TestingT - Cleanup(func()) -}) *StreamClientMock { - mock := &StreamClientMock{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/aggregator/mocks/mock_rpc.go b/aggregator/mocks/mock_rpc.go new file mode 100644 index 00000000..2f3c07e4 --- /dev/null +++ b/aggregator/mocks/mock_rpc.go @@ -0,0 +1,87 @@ +// Code generated by mockery v2.39.0. DO NOT EDIT. + +package mocks + +import ( + types "github.com/0xPolygon/cdk/rpc/types" + mock "github.com/stretchr/testify/mock" +) + +// RPCInterfaceMock is an autogenerated mock type for the RPCInterface type +type RPCInterfaceMock struct { + mock.Mock +} + +// GetBatch provides a mock function with given fields: batchNumber +func (_m *RPCInterfaceMock) GetBatch(batchNumber uint64) (*types.RPCBatch, error) { + ret := _m.Called(batchNumber) + + if len(ret) == 0 { + panic("no return value specified for GetBatch") + } + + var r0 *types.RPCBatch + var r1 error + if rf, ok := ret.Get(0).(func(uint64) (*types.RPCBatch, error)); ok { + return rf(batchNumber) + } + if rf, ok := ret.Get(0).(func(uint64) *types.RPCBatch); ok { + r0 = rf(batchNumber) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.RPCBatch) + } + } + + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(batchNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetWitness provides a mock function with given fields: batchNumber, fullWitness +func (_m *RPCInterfaceMock) GetWitness(batchNumber uint64, fullWitness bool) ([]byte, error) { + ret := _m.Called(batchNumber, fullWitness) + + if len(ret) == 0 { + panic("no return value specified for GetWitness") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(uint64, bool) ([]byte, error)); ok { + return rf(batchNumber, fullWitness) + } + if rf, ok := ret.Get(0).(func(uint64, bool) []byte); ok { + r0 = rf(batchNumber, fullWitness) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(uint64, bool) error); ok { + r1 = rf(batchNumber, fullWitness) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewRPCInterfaceMock creates a new instance of RPCInterfaceMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewRPCInterfaceMock(t interface { + mock.TestingT + Cleanup(func()) +}) *RPCInterfaceMock { + mock := &RPCInterfaceMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggregator/mocks/mock_state.go b/aggregator/mocks/mock_state.go index 8879dd05..74c9021b 100644 --- a/aggregator/mocks/mock_state.go +++ b/aggregator/mocks/mock_state.go @@ -16,24 +16,6 @@ type StateInterfaceMock struct { mock.Mock } -// AddBatch provides a mock function with given fields: ctx, dbBatch, dbTx -func (_m *StateInterfaceMock) AddBatch(ctx context.Context, dbBatch *state.DBBatch, dbTx pgx.Tx) error { - ret := _m.Called(ctx, dbBatch, dbTx) - - if len(ret) == 0 { - panic("no return value specified for AddBatch") - } - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *state.DBBatch, pgx.Tx) error); ok { - r0 = rf(ctx, dbBatch, dbTx) - } else { - r0 = ret.Error(0) - } - - return r0 -} - // AddGeneratedProof provides a mock function with given fields: ctx, proof, dbTx func (_m *StateInterfaceMock) AddGeneratedProof(ctx context.Context, proof *state.Proof, dbTx pgx.Tx) error { ret := _m.Called(ctx, proof, dbTx) @@ -202,42 +184,6 @@ func (_m *StateInterfaceMock) CleanupLockedProofs(ctx context.Context, duration return r0, r1 } -// DeleteBatchesNewerThanBatchNumber provides a mock function with given fields: ctx, batchNumber, dbTx -func (_m *StateInterfaceMock) DeleteBatchesNewerThanBatchNumber(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) error { - ret := _m.Called(ctx, batchNumber, dbTx) - - if len(ret) == 0 { - panic("no return value specified for DeleteBatchesNewerThanBatchNumber") - } - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) error); ok { - r0 = rf(ctx, batchNumber, dbTx) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DeleteBatchesOlderThanBatchNumber provides a mock function with given fields: ctx, batchNumber, dbTx -func (_m *StateInterfaceMock) DeleteBatchesOlderThanBatchNumber(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) error { - ret := _m.Called(ctx, batchNumber, dbTx) - - if len(ret) == 0 { - panic("no return value specified for DeleteBatchesOlderThanBatchNumber") - } - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) error); ok { - r0 = rf(ctx, batchNumber, dbTx) - } else { - r0 = ret.Error(0) - } - - return r0 -} - // DeleteGeneratedProofs provides a mock function with given fields: ctx, batchNumber, batchNumberFinal, dbTx func (_m *StateInterfaceMock) DeleteGeneratedProofs(ctx context.Context, batchNumber uint64, batchNumberFinal uint64, dbTx pgx.Tx) error { ret := _m.Called(ctx, batchNumber, batchNumberFinal, dbTx) @@ -274,36 +220,6 @@ func (_m *StateInterfaceMock) DeleteUngeneratedProofs(ctx context.Context, dbTx return r0 } -// GetBatch provides a mock function with given fields: ctx, batchNumber, dbTx -func (_m *StateInterfaceMock) GetBatch(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) (*state.DBBatch, error) { - ret := _m.Called(ctx, batchNumber, dbTx) - - if len(ret) == 0 { - panic("no return value specified for GetBatch") - } - - var r0 *state.DBBatch - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) (*state.DBBatch, error)); ok { - return rf(ctx, batchNumber, dbTx) - } - if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) *state.DBBatch); ok { - r0 = rf(ctx, batchNumber, dbTx) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*state.DBBatch) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, uint64, pgx.Tx) error); ok { - r1 = rf(ctx, batchNumber, dbTx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - // GetProofReadyToVerify provides a mock function with given fields: ctx, lastVerfiedBatchNumber, dbTx func (_m *StateInterfaceMock) GetProofReadyToVerify(ctx context.Context, lastVerfiedBatchNumber uint64, dbTx pgx.Tx) (*state.Proof, error) { ret := _m.Called(ctx, lastVerfiedBatchNumber, dbTx) diff --git a/config/default.go b/config/default.go index 442a44e0..5e5fafcb 100644 --- a/config/default.go +++ b/config/default.go @@ -4,7 +4,7 @@ package config // environment / deployment const DefaultMandatoryVars = ` L1URL = "http://localhost:8545" -L2URL = "localhost:8123" +L2URL = "http://localhost:8123" L1AggOracleURL = "http://test-aggoracle-l1:8545" L2AggOracleURL = "http://test-aggoracle-l2:8545" @@ -16,9 +16,8 @@ IsValidiumMode = false L2Coinbase = "0xfa3b44587990f97ba8b6ba7e230a5f0e95d14b3d" SequencerPrivateKeyPath = "/app/sequencer.keystore" SequencerPrivateKeyPassword = "test" -WitnessURL = "localhost:8123" +WitnessURL = "http://localhost:8123" AggLayerURL = "https://agglayer-dev.polygon.technology" -StreamServer = "localhost:6900" AggregatorPrivateKeyPath = "/app/keystore/aggregator.keystore" AggregatorPrivateKeyPassword = "testonly" @@ -133,13 +132,12 @@ SenderAddress = "{{SenderProofToL1Addr}}" CleanupLockedProofsInterval = "2m" GeneratingProofCleanupThreshold = "10m" GasOffset = 0 +RPCURL = "{{L2URL}}" WitnessURL = "{{WitnessURL}}" -UseL1BatchData = true UseFullWitness = false SettlementBackend = "l1" AggLayerTxTimeout = "5m" AggLayerURL = "{{AggLayerURL}}" -MaxWitnessRetrievalWorkers = 2 SyncModeOnlyEnabled = false [Aggregator.SequencerPrivateKey] Path = "{{SequencerPrivateKeyPath}}" @@ -156,8 +154,6 @@ SyncModeOnlyEnabled = false Environment ="{{Log.Environment}}" # "production" or "development" Level = "{{Log.Level}}" Outputs = ["stderr"] - [Aggregator.StreamClient] - Server = "{{StreamServer}}" [Aggregator.EthTxManager] FrequencyToMonitorTxs = "1s" WaitTxToBeMined = "2m" diff --git a/crates/cdk-config/src/aggregator.rs b/crates/cdk-config/src/aggregator.rs index 2e059a2f..8f37a9af 100644 --- a/crates/cdk-config/src/aggregator.rs +++ b/crates/cdk-config/src/aggregator.rs @@ -2,21 +2,6 @@ use ethers::types::Address; use serde::Deserialize; use url::Url; -/// The StreamClient configuration. -#[derive(Deserialize, Debug, Clone)] -pub struct StreamClient { - #[serde(rename = "Server", default)] - pub server: String, -} - -impl Default for StreamClient { - fn default() -> Self { - Self { - server: "localhost:9092".to_string(), - } - } -} - #[derive(Deserialize, Debug, Clone)] pub struct EthTxManager { #[serde(rename = "Etherman")] @@ -74,6 +59,8 @@ pub struct Aggregator { pub generating_proof_cleanup_threshold: String, #[serde(rename = "GasOffset", default)] pub gas_offset: u64, + #[serde(rename = "RPCURL", default = "default_url")] + pub rpc_url: Url, #[serde(rename = "WitnessURL", default = "default_url")] pub witness_url: Url, #[serde(rename = "SenderAddress", default = "default_address")] @@ -84,18 +71,11 @@ pub struct Aggregator { pub agg_layer_tx_timeout: String, #[serde(rename = "AggLayerURL", default = "default_url")] pub agg_layer_url: Url, - #[serde(rename = "UseL1BatchData", default)] - pub use_l1_batch_data: bool, #[serde(rename = "UseFullWitness", default)] pub use_full_witness: bool, - #[serde(rename = "MaxWitnessRetrievalWorkers", default)] - pub max_witness_retrieval_workers: u32, #[serde(rename = "SyncModeOnlyEnabled", default)] pub sync_mode_only_enabled: bool, - #[serde(rename = "StreamClient", default)] - pub stream_client: StreamClient, - #[serde(rename = "EthTxManager", default)] pub eth_tx_manager: EthTxManager, } @@ -127,18 +107,14 @@ impl Default for Aggregator { cleanup_locked_proofs_interval: "1h".to_string(), generating_proof_cleanup_threshold: "10m".to_string(), gas_offset: 0, + rpc_url: default_url(), witness_url: default_url(), sender_address: default_address(), settlement_backend: "default".to_string(), agg_layer_tx_timeout: "30s".to_string(), agg_layer_url: Url::parse("http://localhost:8547").unwrap(), - use_l1_batch_data: true, use_full_witness: false, - max_witness_retrieval_workers: 4, sync_mode_only_enabled: false, - stream_client: StreamClient { - server: "localhost:9092".to_string(), - }, eth_tx_manager: EthTxManager { etherman: Etherman { url: "http://localhost:9093".to_string(), diff --git a/crates/cdk/src/config_render.rs b/crates/cdk/src/config_render.rs index 2c230c52..ab3d05c3 100644 --- a/crates/cdk/src/config_render.rs +++ b/crates/cdk/src/config_render.rs @@ -93,7 +93,6 @@ fn render_yaml(config: &Config, res: Rendered) -> String { chain: dynamic-{chain_id} zkevm.l2-chain-id: {chain_id} zkevm.l2-sequencer-rpc-url: {l2_sequencer_rpc_url} -zkevm.l2-datastreamer-url: {datastreamer_host} zkevm.l1-chain-id: {l1_chain_id} zkevm.l1-rpc-url: {l1_rpc_url} zkevm.address-sequencer: {sequencer_address} @@ -117,7 +116,6 @@ ws: true "#, chain_id = config.aggregator.chain_id.clone(), l2_sequencer_rpc_url = config.aggregator.witness_url.to_string(), - datastreamer_host = config.aggregator.stream_client.server, l1_rpc_url = config.aggregator.eth_tx_manager.etherman.url, l1_chain_id = config.network_config.l1.l1_chain_id, sequencer_address = config.sequence_sender.l2_coinbase, diff --git a/go.mod b/go.mod index 631e54b7..ae03382e 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,6 @@ require ( github.com/0xPolygon/cdk-data-availability v0.0.10 github.com/0xPolygon/cdk-rpc v0.0.0-20241004114257-6c3cb6eebfb6 github.com/0xPolygon/zkevm-ethtx-manager v0.2.0 - github.com/0xPolygonHermez/zkevm-data-streamer v0.2.7 github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.5 github.com/ethereum/go-ethereum v1.14.8 github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3 diff --git a/go.sum b/go.sum index 4a0095c8..96f2dc93 100644 --- a/go.sum +++ b/go.sum @@ -6,10 +6,6 @@ github.com/0xPolygon/cdk-rpc v0.0.0-20241004114257-6c3cb6eebfb6 h1:FXL/rcO7/GtZ3 github.com/0xPolygon/cdk-rpc v0.0.0-20241004114257-6c3cb6eebfb6/go.mod h1:2scWqMMufrQXu7TikDgQ3BsyaKoX8qP26D6E262vSOg= github.com/0xPolygon/zkevm-ethtx-manager v0.2.0 h1:QWE6nKBBHkMEiza723hJk0+oZbLSdQZTX4I48jWw15I= github.com/0xPolygon/zkevm-ethtx-manager v0.2.0/go.mod h1:lqQmzSo2OXEZItD0R4Cd+lqKFxphXEWgqHefVcGDZZc= -github.com/0xPolygonHermez/zkevm-data-streamer v0.2.7 h1:73sYxRQ9cOmtYBEyHePgEwrVULR+YruSQxVXCt/SmzU= -github.com/0xPolygonHermez/zkevm-data-streamer v0.2.7/go.mod h1:7nM7Ihk+fTG1TQPwdZoGOYd3wprqqyIyjtS514uHzWE= -github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.4 h1:+ZbyEpaBZu88jWtov/7iBWvwgBMu5cxlvAFDxsPrnGQ= -github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.4/go.mod h1:X4Su/M/+hSISqdl9yomKlRsbTyuZHsRohporyHsP8gg= github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.5 h1:YmnhuCl349MoNASN0fMeGKU1o9HqJhiZkfMsA/1cTRA= github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.5/go.mod h1:X4Su/M/+hSISqdl9yomKlRsbTyuZHsRohporyHsP8gg= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= diff --git a/sequencesender/rpc.go b/rpc/batch.go similarity index 52% rename from sequencesender/rpc.go rename to rpc/batch.go index a604da37..59e10b20 100644 --- a/sequencesender/rpc.go +++ b/rpc/batch.go @@ -1,23 +1,42 @@ -package sequencesender +package rpc import ( "encoding/json" + "errors" "fmt" "math/big" "github.com/0xPolygon/cdk-rpc/rpc" "github.com/0xPolygon/cdk/log" - "github.com/0xPolygon/cdk/sequencesender/seqsendertypes/rpcbatch" + "github.com/0xPolygon/cdk/rpc/types" "github.com/0xPolygon/cdk/state" "github.com/ethereum/go-ethereum/common" ) -func getBatchFromRPC(addr string, batchNumber uint64) (*rpcbatch.RPCBatch, error) { +var ( + // ErrBusy is returned when the witness server is busy + ErrBusy = errors.New("witness server is busy") +) + +const busyResponse = "busy" + +type BatchEndpoints struct { + url string +} + +func NewBatchEndpoints(url string) *BatchEndpoints { + return &BatchEndpoints{url: url} +} + +func (b *BatchEndpoints) GetBatch(batchNumber uint64) (*types.RPCBatch, error) { type zkEVMBatch struct { + AccInputHash string `json:"accInputHash"` Blocks []string `json:"blocks"` BatchL2Data string `json:"batchL2Data"` Coinbase string `json:"coinbase"` GlobalExitRoot string `json:"globalExitRoot"` + LocalExitRoot string `json:"localExitRoot"` + StateRoot string `json:"stateRoot"` Closed bool `json:"closed"` Timestamp string `json:"timestamp"` } @@ -26,7 +45,7 @@ func getBatchFromRPC(addr string, batchNumber uint64) (*rpcbatch.RPCBatch, error log.Infof("Getting batch %d from RPC", batchNumber) - response, err := rpc.JSONRPCCall(addr, "zkevm_getBatchByNumber", batchNumber) + response, err := rpc.JSONRPCCall(b.url, "zkevm_getBatchByNumber", batchNumber) if err != nil { return nil, err } @@ -47,14 +66,13 @@ func getBatchFromRPC(addr string, batchNumber uint64) (*rpcbatch.RPCBatch, error return nil, fmt.Errorf("error unmarshalling the batch from the response calling zkevm_getBatchByNumber: %w", err) } - rpcBatch, err := rpcbatch.New(batchNumber, zkEVMBatchData.Blocks, common.FromHex(zkEVMBatchData.BatchL2Data), - common.HexToHash(zkEVMBatchData.GlobalExitRoot), common.HexToAddress(zkEVMBatchData.Coinbase), zkEVMBatchData.Closed) - if err != nil { - return nil, fmt.Errorf("error creating the rpc batch: %w", err) - } + rpcBatch := types.NewRPCBatch(batchNumber, common.HexToHash(zkEVMBatchData.AccInputHash), zkEVMBatchData.Blocks, + common.FromHex(zkEVMBatchData.BatchL2Data), common.HexToHash(zkEVMBatchData.GlobalExitRoot), + common.HexToHash(zkEVMBatchData.LocalExitRoot), common.HexToHash(zkEVMBatchData.StateRoot), + common.HexToAddress(zkEVMBatchData.Coinbase), zkEVMBatchData.Closed) if len(zkEVMBatchData.Blocks) > 0 { - lastL2BlockTimestamp, err := getL2BlockTimestampFromRPC(addr, zkEVMBatchData.Blocks[len(zkEVMBatchData.Blocks)-1]) + lastL2BlockTimestamp, err := b.GetL2BlockTimestamp(zkEVMBatchData.Blocks[len(zkEVMBatchData.Blocks)-1]) if err != nil { return nil, fmt.Errorf("error getting the last l2 block timestamp from the rpc: %w", err) } @@ -67,14 +85,14 @@ func getBatchFromRPC(addr string, batchNumber uint64) (*rpcbatch.RPCBatch, error return rpcBatch, nil } -func getL2BlockTimestampFromRPC(addr, blockHash string) (uint64, error) { +func (b *BatchEndpoints) GetL2BlockTimestamp(blockHash string) (uint64, error) { type zkeEVML2Block struct { Timestamp string `json:"timestamp"` } log.Infof("Getting l2 block timestamp from RPC. Block hash: %s", blockHash) - response, err := rpc.JSONRPCCall(addr, "eth_getBlockByHash", blockHash, false) + response, err := rpc.JSONRPCCall(b.url, "eth_getBlockByHash", blockHash, false) if err != nil { return 0, err } @@ -93,3 +111,39 @@ func getL2BlockTimestampFromRPC(addr, blockHash string) (uint64, error) { return new(big.Int).SetBytes(common.FromHex(l2Block.Timestamp)).Uint64(), nil } + +func (b *BatchEndpoints) GetWitness(batchNumber uint64, fullWitness bool) ([]byte, error) { + var ( + witness string + response rpc.Response + err error + ) + + witnessType := "trimmed" + if fullWitness { + witnessType = "full" + } + + log.Infof("Requesting witness for batch %d of type %s", batchNumber, witnessType) + + response, err = rpc.JSONRPCCall(b.url, "zkevm_getBatchWitness", batchNumber, witnessType) + if err != nil { + return nil, err + } + + // Check if the response is an error + if response.Error != nil { + if response.Error.Message == busyResponse { + return nil, ErrBusy + } + + return nil, fmt.Errorf("error from witness for batch %d: %v", batchNumber, response.Error) + } + + err = json.Unmarshal(response.Result, &witness) + if err != nil { + return nil, err + } + + return common.FromHex(witness), nil +} diff --git a/rpc/batch_test.go b/rpc/batch_test.go new file mode 100644 index 00000000..d6940bf3 --- /dev/null +++ b/rpc/batch_test.go @@ -0,0 +1,265 @@ +package rpc + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "net/http/httptest" + "testing" + + "github.com/0xPolygon/cdk-rpc/rpc" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func Test_getBatchFromRPC(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + batch uint64 + getBatchByNumberResp string + getBlockByHasResp string + getBatchByNumberErr error + getBlockByHashErr error + expectBlocks int + expectData []byte + expectTimestamp uint64 + expectErr error + }{ + { + name: "successfully fetched", + getBatchByNumberResp: `{"jsonrpc":"2.0","id":1,"result":{"blocks":["1", "2", "3"],"batchL2Data":"0x1234567"}}`, + getBlockByHasResp: `{"jsonrpc":"2.0","id":1,"result":{"timestamp":"0x123456"}}`, + batch: 0, + expectBlocks: 3, + expectData: common.FromHex("0x1234567"), + expectTimestamp: 1193046, + expectErr: nil, + }, + { + name: "invalid json", + getBatchByNumberResp: `{"jsonrpc":"2.0","id":1,"result":{"blocks":invalid,"batchL2Data":"test"}}`, + batch: 0, + expectBlocks: 3, + expectData: nil, + expectErr: errors.New("invalid character 'i' looking for beginning of value"), + }, + { + name: "wrong json", + getBatchByNumberResp: `{"jsonrpc":"2.0","id":1,"result":{"blocks":"invalid","batchL2Data":"test"}}`, + batch: 0, + expectBlocks: 3, + expectData: nil, + expectErr: errors.New("error unmarshalling the batch from the response calling zkevm_getBatchByNumber: json: cannot unmarshal string into Go struct field zkEVMBatch.blocks of type []string"), + }, + { + name: "error in the response", + getBatchByNumberResp: `{"jsonrpc":"2.0","id":1,"result":null,"error":{"code":-32602,"message":"Invalid params"}}`, + batch: 0, + expectBlocks: 0, + expectData: nil, + expectErr: errors.New("error in the response calling zkevm_getBatchByNumber: &{-32602 Invalid params }"), + }, + { + name: "http failed", + getBatchByNumberErr: errors.New("failed to fetch"), + batch: 0, + expectBlocks: 0, + expectData: nil, + expectErr: errors.New("invalid status code, expected: 200, found: 500"), + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var req rpc.Request + err := json.NewDecoder(r.Body).Decode(&req) + require.NoError(t, err) + + switch req.Method { + case "zkevm_getBatchByNumber": + if tt.getBatchByNumberErr != nil { + http.Error(w, tt.getBatchByNumberErr.Error(), http.StatusInternalServerError) + return + } + + _, _ = w.Write([]byte(tt.getBatchByNumberResp)) + case "eth_getBlockByHash": + if tt.getBlockByHashErr != nil { + http.Error(w, tt.getBlockByHashErr.Error(), http.StatusInternalServerError) + return + } + _, _ = w.Write([]byte(tt.getBlockByHasResp)) + default: + http.Error(w, "method not found", http.StatusNotFound) + } + })) + defer srv.Close() + + rcpBatchClient := NewBatchEndpoints(srv.URL) + rpcBatch, err := rcpBatchClient.GetBatch(tt.batch) + if rpcBatch != nil { + copiedrpcBatch := rpcBatch.DeepCopy() + require.NotNil(t, copiedrpcBatch) + str := copiedrpcBatch.String() + require.NotEmpty(t, str) + } + if tt.expectErr != nil { + require.Equal(t, tt.expectErr.Error(), err.Error()) + } else { + require.NoError(t, err) + require.Equal(t, tt.expectTimestamp, rpcBatch.LastL2BLockTimestamp()) + require.Equal(t, tt.expectData, rpcBatch.L2Data()) + } + }) + } +} + +func Test_getBatchWitnessRPC(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + batch uint64 + getBatchWitnessResp string + getBatchWitnessErr error + expectData []byte + expectErr error + full bool + }{ + { + name: "get batch trimmed witness success", + batch: 1, + getBatchWitnessResp: `{"jsonrpc":"2.0","id":1,"result":"0x0123456"}`, + getBatchWitnessErr: nil, + expectData: common.FromHex("0x0123456"), + expectErr: nil, + full: false, + }, + { + name: "get batch full witness success", + batch: 1, + getBatchWitnessResp: `{"jsonrpc":"2.0","id":1,"result":"0x0123456"}`, + getBatchWitnessErr: nil, + expectData: common.FromHex("0x0123456"), + expectErr: nil, + full: true, + }, + { + name: "get batch witness busy", + batch: 1, + getBatchWitnessResp: `{"jsonrpc":"2.0","id":1,"result":"", "error":{"code":-32000,"message":"busy"}}`, + getBatchWitnessErr: nil, + expectData: []byte{}, + expectErr: ErrBusy, + full: false, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var req rpc.Request + err := json.NewDecoder(r.Body).Decode(&req) + require.NoError(t, err) + + switch req.Method { + case "zkevm_getBatchWitness": + if tt.getBatchWitnessErr != nil { + http.Error(w, tt.getBatchWitnessErr.Error(), http.StatusInternalServerError) + return + } + _, _ = w.Write([]byte(tt.getBatchWitnessResp)) + default: + http.Error(w, "method not found", http.StatusNotFound) + } + })) + defer srv.Close() + + rcpBatchClient := NewBatchEndpoints(srv.URL) + witness, err := rcpBatchClient.GetWitness(tt.batch, false) + if tt.expectErr != nil { + require.Equal(t, tt.expectErr.Error(), err.Error()) + } else { + require.NoError(t, err) + require.Equal(t, tt.expectData, witness) + } + }) + } +} + +func Test_getGetL2BlockTimestamp(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + blockHash []byte + response string + error error + expectData uint64 + expectErr error + }{ + { + name: "success", + blockHash: []byte{1}, + response: `{"jsonrpc":"2.0","id":1,"result":{"timestamp":"0x123456"}}`, + error: nil, + expectData: uint64(0x123456), + expectErr: nil, + }, + { + name: "fail", + blockHash: []byte{2}, + response: `{"jsonrpc":"2.0","id":1,"result":{"timestamp":"0x123456"}}`, + error: fmt.Errorf("error"), + expectData: 0, + expectErr: fmt.Errorf("invalid status code, expected: 200, found: 500"), + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var req rpc.Request + err := json.NewDecoder(r.Body).Decode(&req) + require.NoError(t, err) + + switch req.Method { + case "eth_getBlockByHash": + if tt.error != nil { + http.Error(w, tt.error.Error(), http.StatusInternalServerError) + return + } + _, _ = w.Write([]byte(tt.response)) + default: + http.Error(w, "method not found", http.StatusNotFound) + } + })) + defer srv.Close() + + rcpBatchClient := NewBatchEndpoints(srv.URL) + timestamp, err := rcpBatchClient.GetL2BlockTimestamp(string(tt.blockHash)) + if tt.expectErr != nil { + require.Equal(t, tt.expectErr.Error(), err.Error()) + } else { + require.NoError(t, err) + require.Equal(t, tt.expectData, timestamp) + } + }) + } +} diff --git a/sequencesender/seqsendertypes/rpcbatch/rpcbatch.go b/rpc/types/rpcbatch.go similarity index 70% rename from sequencesender/seqsendertypes/rpcbatch/rpcbatch.go rename to rpc/types/rpcbatch.go index fafc1841..93e1158d 100644 --- a/sequencesender/seqsendertypes/rpcbatch/rpcbatch.go +++ b/rpc/types/rpcbatch.go @@ -1,4 +1,4 @@ -package rpcbatch +package types import ( "fmt" @@ -8,35 +8,45 @@ import ( ) type RPCBatch struct { - batchNumber uint64 `json:"batchNumber"` - blockHashes []string `json:"blocks"` - batchL2Data []byte `json:"batchL2Data"` - globalExitRoot common.Hash `json:"globalExitRoot"` - coinbase common.Address `json:"coinbase"` - closed bool `json:"closed"` - lastL2BlockTimestamp uint64 `json:"lastL2BlockTimestamp"` - l1InfoTreeIndex uint32 `json:"l1InfoTreeIndex"` -} - -func New(batchNumber uint64, blockHashes []string, batchL2Data []byte, globalExitRoot common.Hash, - coinbase common.Address, closed bool) (*RPCBatch, error) { + batchNumber uint64 + accInputHash common.Hash + blockHashes []string + batchL2Data []byte + globalExitRoot common.Hash + localExitRoot common.Hash + stateRoot common.Hash + coinbase common.Address + closed bool + lastL2BlockTimestamp uint64 + l1InfoTreeIndex uint32 +} + +func NewRPCBatch(batchNumber uint64, accInputHash common.Hash, blockHashes []string, batchL2Data []byte, + globalExitRoot common.Hash, localExitRoot common.Hash, stateRoot common.Hash, + coinbase common.Address, closed bool) *RPCBatch { return &RPCBatch{ batchNumber: batchNumber, + accInputHash: accInputHash, blockHashes: blockHashes, batchL2Data: batchL2Data, globalExitRoot: globalExitRoot, + localExitRoot: localExitRoot, + stateRoot: stateRoot, coinbase: coinbase, closed: closed, - }, nil + } } // DeepCopy func (b *RPCBatch) DeepCopy() seqsendertypes.Batch { return &RPCBatch{ + accInputHash: b.accInputHash, batchNumber: b.batchNumber, blockHashes: b.blockHashes, batchL2Data: b.batchL2Data, globalExitRoot: b.globalExitRoot, + localExitRoot: b.localExitRoot, + stateRoot: b.stateRoot, coinbase: b.coinbase, closed: b.closed, lastL2BlockTimestamp: b.lastL2BlockTimestamp, @@ -84,6 +94,21 @@ func (b *RPCBatch) GlobalExitRoot() common.Hash { return b.globalExitRoot } +// LocalExitRoot +func (b *RPCBatch) LocalExitRoot() common.Hash { + return b.localExitRoot +} + +// StateRoot +func (b *RPCBatch) StateRoot() common.Hash { + return b.stateRoot +} + +// AccInputHash +func (b *RPCBatch) AccInputHash() common.Hash { + return b.accInputHash +} + // L1InfoTreeIndex func (b *RPCBatch) L1InfoTreeIndex() uint32 { return b.l1InfoTreeIndex diff --git a/scripts/local_config b/scripts/local_config index 9a1f55cf..6922f15e 100755 --- a/scripts/local_config +++ b/scripts/local_config @@ -313,7 +313,7 @@ echo " " echo "- Add next configuration to vscode launch.json" cat << EOF { - "name": "Debug cdk"", + "name": "Debug cdk", "type": "go", "request": "launch", "mode": "auto", diff --git a/sequencesender.json b/sequencesender.json deleted file mode 100644 index 0967ef42..00000000 --- a/sequencesender.json +++ /dev/null @@ -1 +0,0 @@ -{} diff --git a/sequencesender/config.go b/sequencesender/config.go index f264f904..4f77500b 100644 --- a/sequencesender/config.go +++ b/sequencesender/config.go @@ -71,11 +71,3 @@ type Config struct { // GetBatchWaitInterval is the time to wait to query for a new batch when there are no more batches available GetBatchWaitInterval types.Duration `mapstructure:"GetBatchWaitInterval"` } - -// StreamClientCfg contains the data streamer's configuration properties -type StreamClientCfg struct { - // Datastream server to connect - Server string `mapstructure:"Server"` - // Log is the log configuration - Log log.Config `mapstructure:"Log"` -} diff --git a/sequencesender/mocks/mock_etherman.go b/sequencesender/mocks/mock_etherman.go index 46a70170..539ef9a7 100644 --- a/sequencesender/mocks/mock_etherman.go +++ b/sequencesender/mocks/mock_etherman.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.40.1. DO NOT EDIT. +// Code generated by mockery v2.39.0. DO NOT EDIT. package mocks diff --git a/sequencesender/mocks/mock_ethtxmanager.go b/sequencesender/mocks/mock_ethtxmanager.go index f3b456a4..16de5be6 100644 --- a/sequencesender/mocks/mock_ethtxmanager.go +++ b/sequencesender/mocks/mock_ethtxmanager.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.40.1. DO NOT EDIT. +// Code generated by mockery v2.39.0. DO NOT EDIT. package mocks @@ -6,13 +6,13 @@ import ( context "context" big "math/big" - ethtxtypes "github.com/0xPolygon/zkevm-ethtx-manager/types" - common "github.com/ethereum/go-ethereum/common" mock "github.com/stretchr/testify/mock" types "github.com/ethereum/go-ethereum/core/types" + + zkevm_ethtx_managertypes "github.com/0xPolygon/zkevm-ethtx-manager/types" ) // EthTxManagerMock is an autogenerated mock type for the EthTxManager type @@ -69,22 +69,22 @@ func (_m *EthTxManagerMock) Remove(ctx context.Context, hash common.Hash) error } // Result provides a mock function with given fields: ctx, hash -func (_m *EthTxManagerMock) Result(ctx context.Context, hash common.Hash) (ethtxtypes.MonitoredTxResult, error) { +func (_m *EthTxManagerMock) Result(ctx context.Context, hash common.Hash) (zkevm_ethtx_managertypes.MonitoredTxResult, error) { ret := _m.Called(ctx, hash) if len(ret) == 0 { panic("no return value specified for Result") } - var r0 ethtxtypes.MonitoredTxResult + var r0 zkevm_ethtx_managertypes.MonitoredTxResult var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (ethtxtypes.MonitoredTxResult, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (zkevm_ethtx_managertypes.MonitoredTxResult, error)); ok { return rf(ctx, hash) } - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) ethtxtypes.MonitoredTxResult); ok { + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) zkevm_ethtx_managertypes.MonitoredTxResult); ok { r0 = rf(ctx, hash) } else { - r0 = ret.Get(0).(ethtxtypes.MonitoredTxResult) + r0 = ret.Get(0).(zkevm_ethtx_managertypes.MonitoredTxResult) } if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { @@ -97,27 +97,27 @@ func (_m *EthTxManagerMock) Result(ctx context.Context, hash common.Hash) (ethtx } // ResultsByStatus provides a mock function with given fields: ctx, status -func (_m *EthTxManagerMock) ResultsByStatus(ctx context.Context, status []ethtxtypes.MonitoredTxStatus) ([]ethtxtypes.MonitoredTxResult, error) { +func (_m *EthTxManagerMock) ResultsByStatus(ctx context.Context, status []zkevm_ethtx_managertypes.MonitoredTxStatus) ([]zkevm_ethtx_managertypes.MonitoredTxResult, error) { ret := _m.Called(ctx, status) if len(ret) == 0 { panic("no return value specified for ResultsByStatus") } - var r0 []ethtxtypes.MonitoredTxResult + var r0 []zkevm_ethtx_managertypes.MonitoredTxResult var r1 error - if rf, ok := ret.Get(0).(func(context.Context, []ethtxtypes.MonitoredTxStatus) ([]ethtxtypes.MonitoredTxResult, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, []zkevm_ethtx_managertypes.MonitoredTxStatus) ([]zkevm_ethtx_managertypes.MonitoredTxResult, error)); ok { return rf(ctx, status) } - if rf, ok := ret.Get(0).(func(context.Context, []ethtxtypes.MonitoredTxStatus) []ethtxtypes.MonitoredTxResult); ok { + if rf, ok := ret.Get(0).(func(context.Context, []zkevm_ethtx_managertypes.MonitoredTxStatus) []zkevm_ethtx_managertypes.MonitoredTxResult); ok { r0 = rf(ctx, status) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]ethtxtypes.MonitoredTxResult) + r0 = ret.Get(0).([]zkevm_ethtx_managertypes.MonitoredTxResult) } } - if rf, ok := ret.Get(1).(func(context.Context, []ethtxtypes.MonitoredTxStatus) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, []zkevm_ethtx_managertypes.MonitoredTxStatus) error); ok { r1 = rf(ctx, status) } else { r1 = ret.Error(1) diff --git a/sequencesender/mocks/mock_rpc.go b/sequencesender/mocks/mock_rpc.go new file mode 100644 index 00000000..e06378a6 --- /dev/null +++ b/sequencesender/mocks/mock_rpc.go @@ -0,0 +1,88 @@ +// Code generated by mockery v2.39.0. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + + types "github.com/0xPolygon/cdk/rpc/types" +) + +// RPCInterfaceMock is an autogenerated mock type for the RPCInterface type +type RPCInterfaceMock struct { + mock.Mock +} + +// GetBatch provides a mock function with given fields: batchNumber +func (_m *RPCInterfaceMock) GetBatch(batchNumber uint64) (*types.RPCBatch, error) { + ret := _m.Called(batchNumber) + + if len(ret) == 0 { + panic("no return value specified for GetBatch") + } + + var r0 *types.RPCBatch + var r1 error + if rf, ok := ret.Get(0).(func(uint64) (*types.RPCBatch, error)); ok { + return rf(batchNumber) + } + if rf, ok := ret.Get(0).(func(uint64) *types.RPCBatch); ok { + r0 = rf(batchNumber) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.RPCBatch) + } + } + + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(batchNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetWitness provides a mock function with given fields: batchNumber, fullWitness +func (_m *RPCInterfaceMock) GetWitness(batchNumber uint64, fullWitness bool) ([]byte, error) { + ret := _m.Called(batchNumber, fullWitness) + + if len(ret) == 0 { + panic("no return value specified for GetWitness") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(uint64, bool) ([]byte, error)); ok { + return rf(batchNumber, fullWitness) + } + if rf, ok := ret.Get(0).(func(uint64, bool) []byte); ok { + r0 = rf(batchNumber, fullWitness) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(uint64, bool) error); ok { + r1 = rf(batchNumber, fullWitness) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewRPCInterfaceMock creates a new instance of RPCInterfaceMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewRPCInterfaceMock(t interface { + mock.TestingT + Cleanup(func()) +}) *RPCInterfaceMock { + mock := &RPCInterfaceMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/sequencesender/rpc_test.go b/sequencesender/rpc_test.go deleted file mode 100644 index 4774b237..00000000 --- a/sequencesender/rpc_test.go +++ /dev/null @@ -1,115 +0,0 @@ -package sequencesender - -import ( - "encoding/json" - "errors" - "net/http" - "net/http/httptest" - "testing" - - "github.com/0xPolygon/cdk-rpc/rpc" - "github.com/ethereum/go-ethereum/common" - "github.com/stretchr/testify/require" -) - -func Test_getBatchFromRPC(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - batch uint64 - getBatchByNumberResp string - getBlockByHasResp string - getBatchByNumberErr error - getBlockByHashErr error - expectBlocks int - expectData []byte - expectTimestamp uint64 - expectErr error - }{ - { - name: "successfully fetched", - getBatchByNumberResp: `{"jsonrpc":"2.0","id":1,"result":{"blocks":["1", "2", "3"],"batchL2Data":"0x1234567"}}`, - getBlockByHasResp: `{"jsonrpc":"2.0","id":1,"result":{"timestamp":"0x123456"}}`, - batch: 0, - expectBlocks: 3, - expectData: common.FromHex("0x1234567"), - expectTimestamp: 1193046, - expectErr: nil, - }, - { - name: "invalid json", - getBatchByNumberResp: `{"jsonrpc":"2.0","id":1,"result":{"blocks":invalid,"batchL2Data":"test"}}`, - batch: 0, - expectBlocks: 3, - expectData: nil, - expectErr: errors.New("invalid character 'i' looking for beginning of value"), - }, - { - name: "wrong json", - getBatchByNumberResp: `{"jsonrpc":"2.0","id":1,"result":{"blocks":"invalid","batchL2Data":"test"}}`, - batch: 0, - expectBlocks: 3, - expectData: nil, - expectErr: errors.New("error unmarshalling the batch from the response calling zkevm_getBatchByNumber: json: cannot unmarshal string into Go struct field zkEVMBatch.blocks of type []string"), - }, - { - name: "error in the response", - getBatchByNumberResp: `{"jsonrpc":"2.0","id":1,"result":null,"error":{"code":-32602,"message":"Invalid params"}}`, - batch: 0, - expectBlocks: 0, - expectData: nil, - expectErr: errors.New("error in the response calling zkevm_getBatchByNumber: &{-32602 Invalid params }"), - }, - { - name: "http failed", - getBatchByNumberErr: errors.New("failed to fetch"), - batch: 0, - expectBlocks: 0, - expectData: nil, - expectErr: errors.New("invalid status code, expected: 200, found: 500"), - }, - } - - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - var req rpc.Request - err := json.NewDecoder(r.Body).Decode(&req) - require.NoError(t, err) - - switch req.Method { - case "zkevm_getBatchByNumber": - if tt.getBatchByNumberErr != nil { - http.Error(w, tt.getBatchByNumberErr.Error(), http.StatusInternalServerError) - return - } - - _, _ = w.Write([]byte(tt.getBatchByNumberResp)) - case "eth_getBlockByHash": - if tt.getBlockByHashErr != nil { - http.Error(w, tt.getBlockByHashErr.Error(), http.StatusInternalServerError) - return - } - _, _ = w.Write([]byte(tt.getBlockByHasResp)) - default: - http.Error(w, "method not found", http.StatusNotFound) - } - })) - defer srv.Close() - - rpcBatch, err := getBatchFromRPC(srv.URL, tt.batch) - if tt.expectErr != nil { - require.Equal(t, tt.expectErr.Error(), err.Error()) - } else { - require.NoError(t, err) - require.Equal(t, tt.expectTimestamp, rpcBatch.LastL2BLockTimestamp()) - require.Equal(t, tt.expectData, rpcBatch.L2Data()) - } - }) - } -} diff --git a/sequencesender/sequencesender.go b/sequencesender/sequencesender.go index 0a044356..432b3777 100644 --- a/sequencesender/sequencesender.go +++ b/sequencesender/sequencesender.go @@ -12,15 +12,16 @@ import ( "github.com/0xPolygon/cdk/etherman" "github.com/0xPolygon/cdk/log" + "github.com/0xPolygon/cdk/rpc" + "github.com/0xPolygon/cdk/rpc/types" "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" - "github.com/0xPolygon/cdk/sequencesender/seqsendertypes/rpcbatch" "github.com/0xPolygon/cdk/sequencesender/txbuilder" "github.com/0xPolygon/cdk/state" "github.com/0xPolygon/zkevm-ethtx-manager/ethtxmanager" ethtxlog "github.com/0xPolygon/zkevm-ethtx-manager/log" ethtxtypes "github.com/0xPolygon/zkevm-ethtx-manager/types" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" + ethtypes "github.com/ethereum/go-ethereum/core/types" ) const ten = 10 @@ -34,7 +35,7 @@ type EthTxManager interface { value *big.Int, data []byte, gasOffset uint64, - sidecar *types.BlobTxSidecar, + sidecar *ethtypes.BlobTxSidecar, gas uint64, ) (common.Hash, error) Remove(ctx context.Context, hash common.Hash) error @@ -45,11 +46,17 @@ type EthTxManager interface { // Etherman represents the etherman behaviour type Etherman interface { CurrentNonce(ctx context.Context, address common.Address) (uint64, error) - GetLatestBlockHeader(ctx context.Context) (*types.Header, error) + GetLatestBlockHeader(ctx context.Context) (*ethtypes.Header, error) EstimateGas(ctx context.Context, from common.Address, to *common.Address, value *big.Int, data []byte) (uint64, error) GetLatestBatchNumber() (uint64, error) } +// RPCInterface represents the RPC interface +type RPCInterface interface { + GetBatch(batchNumber uint64) (*types.RPCBatch, error) + GetWitness(batchNumber uint64, fullWitness bool) ([]byte, error) +} + // SequenceSender represents a sequence sender type SequenceSender struct { cfg Config @@ -69,6 +76,7 @@ type SequenceSender struct { validStream bool // Not valid while receiving data before the desired batch seqSendingStopped uint32 // If there is a critical error TxBuilder txbuilder.TxBuilder + rpcClient RPCInterface } type sequenceData struct { @@ -90,6 +98,7 @@ func New(cfg Config, logger *log.Logger, sequenceData: make(map[uint64]*sequenceData), validStream: false, TxBuilder: txBuilder, + rpcClient: rpc.NewBatchEndpoints(cfg.RPCURL), } logger.Infof("TxBuilder configuration: %s", txBuilder.String()) @@ -162,7 +171,7 @@ func (s *SequenceSender) batchRetrieval(ctx context.Context) error { return ctx.Err() default: // Try to retrieve batch from RPC - rpcBatch, err := getBatchFromRPC(s.cfg.RPCURL, currentBatchNumber) + rpcBatch, err := s.rpcClient.GetBatch(currentBatchNumber) if err != nil { if errors.Is(err, ethtxmanager.ErrNotFound) { s.logger.Infof("batch %d not found in RPC", currentBatchNumber) @@ -191,7 +200,7 @@ func (s *SequenceSender) batchRetrieval(ctx context.Context) error { } } -func (s *SequenceSender) populateSequenceData(rpcBatch *rpcbatch.RPCBatch, batchNumber uint64) error { +func (s *SequenceSender) populateSequenceData(rpcBatch *types.RPCBatch, batchNumber uint64) error { s.mutexSequence.Lock() defer s.mutexSequence.Unlock() diff --git a/sequencesender/sequencesender_test.go b/sequencesender/sequencesender_test.go index 3db4a803..e1d694e5 100644 --- a/sequencesender/sequencesender_test.go +++ b/sequencesender/sequencesender_test.go @@ -10,6 +10,7 @@ import ( types2 "github.com/0xPolygon/cdk/config/types" "github.com/0xPolygon/cdk/etherman" "github.com/0xPolygon/cdk/log" + rpctypes "github.com/0xPolygon/cdk/rpc/types" "github.com/0xPolygon/cdk/sequencesender/mocks" "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" "github.com/0xPolygon/cdk/sequencesender/txbuilder" @@ -98,6 +99,7 @@ func Test_Start(t *testing.T) { name string getEthTxManager func(t *testing.T) *mocks.EthTxManagerMock getEtherman func(t *testing.T) *mocks.EthermanMock + getRPC func(t *testing.T) *mocks.RPCInterfaceMock batchWaitDuration types2.Duration expectNonce uint64 expectLastVirtualBatch uint64 @@ -122,6 +124,14 @@ func Test_Start(t *testing.T) { mngr.On("ResultsByStatus", mock.Anything, []ethtxtypes.MonitoredTxStatus(nil)).Return(nil, nil) return mngr }, + getRPC: func(t *testing.T) *mocks.RPCInterfaceMock { + t.Helper() + + mngr := mocks.NewRPCInterfaceMock(t) + mngr.On("GetBatch", mock.Anything).Return(&rpctypes.RPCBatch{}, nil) + return mngr + }, + batchWaitDuration: types2.NewDuration(time.Millisecond), expectNonce: 3, expectLastVirtualBatch: 1, @@ -149,7 +159,8 @@ func Test_Start(t *testing.T) { GetBatchWaitInterval: tt.batchWaitDuration, WaitPeriodSendSequence: types2.NewDuration(1 * time.Millisecond), }, - logger: log.GetDefaultLogger(), + logger: log.GetDefaultLogger(), + rpcClient: tt.getRPC(t), } ctx, cancel := context.WithCancel(context.Background()) diff --git a/state/datastream.go b/state/datastream.go deleted file mode 100644 index 7687dba7..00000000 --- a/state/datastream.go +++ /dev/null @@ -1,12 +0,0 @@ -package state - -import ( - "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" -) - -const ( - // StreamTypeSequencer represents a Sequencer stream - StreamTypeSequencer datastreamer.StreamType = 1 - // EntryTypeBookMark represents a bookmark entry - EntryTypeBookMark datastreamer.EntryType = datastreamer.EtBookmark -) diff --git a/state/interfaces.go b/state/interfaces.go index ce825685..fc4eb495 100644 --- a/state/interfaces.go +++ b/state/interfaces.go @@ -23,8 +23,4 @@ type storage interface { CleanupGeneratedProofs(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) error CleanupLockedProofs(ctx context.Context, duration string, dbTx pgx.Tx) (int64, error) CheckProofExistsForBatch(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) (bool, error) - AddBatch(ctx context.Context, dbBatch *DBBatch, dbTx pgx.Tx) error - GetBatch(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) (*DBBatch, error) - DeleteBatchesOlderThanBatchNumber(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) error - DeleteBatchesNewerThanBatchNumber(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) error } diff --git a/state/pgstatestorage/batch.go b/state/pgstatestorage/batch.go deleted file mode 100644 index 6273f064..00000000 --- a/state/pgstatestorage/batch.go +++ /dev/null @@ -1,66 +0,0 @@ -package pgstatestorage - -import ( - "context" - "errors" - - "github.com/0xPolygon/cdk/state" - "github.com/ethereum/go-ethereum/common" - "github.com/jackc/pgx/v4" -) - -// AddBatch stores a batch -func (p *PostgresStorage) AddBatch(ctx context.Context, dbBatch *state.DBBatch, dbTx pgx.Tx) error { - const addInputHashSQL = ` - INSERT INTO aggregator.batch (batch_num, batch, datastream, witness) - VALUES ($1, $2, $3, $4) - ON CONFLICT (batch_num) DO UPDATE - SET batch = $2, datastream = $3, witness = $4 - ` - e := p.getExecQuerier(dbTx) - _, err := e.Exec( - ctx, addInputHashSQL, dbBatch.Batch.BatchNumber, &dbBatch.Batch, - common.Bytes2Hex(dbBatch.Datastream), common.Bytes2Hex(dbBatch.Witness), - ) - return err -} - -// GetBatch gets a batch by a given batch number -func (p *PostgresStorage) GetBatch(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) (*state.DBBatch, error) { - const getInputHashSQL = "SELECT batch, datastream, witness FROM aggregator.batch WHERE batch_num = $1" - e := p.getExecQuerier(dbTx) - var batch state.Batch - var streamStr string - var witnessStr string - err := e.QueryRow(ctx, getInputHashSQL, batchNumber).Scan(&batch, &streamStr, &witnessStr) - if errors.Is(err, pgx.ErrNoRows) { - return nil, state.ErrNotFound - } else if err != nil { - return nil, err - } - return &state.DBBatch{ - Batch: batch, - Datastream: common.Hex2Bytes(streamStr), - Witness: common.Hex2Bytes(witnessStr), - }, nil -} - -// DeleteBatchesOlderThanBatchNumber deletes batches previous to the given batch number -func (p *PostgresStorage) DeleteBatchesOlderThanBatchNumber( - ctx context.Context, batchNumber uint64, dbTx pgx.Tx, -) error { - const deleteBatchesSQL = "DELETE FROM aggregator.batch WHERE batch_num < $1" - e := p.getExecQuerier(dbTx) - _, err := e.Exec(ctx, deleteBatchesSQL, batchNumber) - return err -} - -// DeleteBatchesNewerThanBatchNumber deletes batches previous to the given batch number -func (p *PostgresStorage) DeleteBatchesNewerThanBatchNumber( - ctx context.Context, batchNumber uint64, dbTx pgx.Tx, -) error { - const deleteBatchesSQL = "DELETE FROM aggregator.batch WHERE batch_num > $1" - e := p.getExecQuerier(dbTx) - _, err := e.Exec(ctx, deleteBatchesSQL, batchNumber) - return err -} diff --git a/test/Makefile b/test/Makefile index a1b51bb1..d173c423 100644 --- a/test/Makefile +++ b/test/Makefile @@ -17,9 +17,9 @@ COMMON_MOCKERY_PARAMS=--disable-version-string --with-expecter --exported generate-mocks-sequencesender: ## Generates mocks for sequencesender, using mockery tool rm -Rf ../sequencesender/txbuilder/mocks_txbuilder export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --all --case snake --dir ../sequencesender/txbuilder --output ../sequencesender/txbuilder/mocks_txbuilder --outpkg mocks_txbuilder ${COMMON_MOCKERY_PARAMS} - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=EthTxManager --dir=../sequencesender --output=../sequencesender/mocks --outpkg=mocks --structname=EthTxMngrMock --filename=mock_ethtxmanager.go + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=EthTxManager --dir=../sequencesender --output=../sequencesender/mocks --outpkg=mocks --structname=EthTxManagerMock --filename=mock_ethtxmanager.go export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=Etherman --dir=../sequencesender --output=../sequencesender/mocks --outpkg=mocks --structname=EthermanMock --filename=mock_etherman.go - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=StreamClient --dir=../sequencesender --output=../sequencesender/mocks --outpkg=mocks --structname=StreamClientMock --filename=mock_streamclient.go + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=RPCInterface --dir=../sequencesender --output=../sequencesender/mocks --outpkg=mocks --structname=RPCInterfaceMock --filename=mock_rpc.go .PHONY: generate-mocks-da generate-mocks-da: ## Generates mocks for dataavailability, using mockery tool @@ -55,9 +55,10 @@ generate-mocks-aggregator: ## Generates mocks for aggregator, using mockery tool export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=StateInterface --dir=../aggregator --output=../aggregator/mocks --outpkg=mocks --structname=StateInterfaceMock --filename=mock_state.go export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=AgglayerClientInterface --dir=../aggregator/agglayer --output=../aggregator/mocks --outpkg=mocks --structname=AgglayerClientInterfaceMock --filename=mock_agglayer_client.go export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=Synchronizer --srcpkg=github.com/0xPolygonHermez/zkevm-synchronizer-l1/synchronizer --output=../aggregator/mocks --outpkg=mocks --structname=SynchronizerInterfaceMock --filename=mock_synchronizer.go - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=StreamClient --dir=../aggregator --output=../aggregator/mocks --outpkg=mocks --structname=StreamClientMock --filename=mock_streamclient.go export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=EthTxManagerClient --dir=../aggregator --output=../aggregator/mocks --outpkg=mocks --structname=EthTxManagerClientMock --filename=mock_eth_tx_manager.go export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=Tx --srcpkg=github.com/jackc/pgx/v4 --output=../aggregator/mocks --outpkg=mocks --structname=DbTxMock --filename=mock_dbtx.go + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=RPCInterface --dir=../aggregator --output=../aggregator/mocks --outpkg=mocks --structname=RPCInterfaceMock --filename=mock_rpc.go + .PHONY: test-e2e-fork9-validium test-e2e-fork9-validium: stop diff --git a/test/config/kurtosis-cdk-node-config.toml.template b/test/config/kurtosis-cdk-node-config.toml.template index 8fd9e82b..fa01b528 100644 --- a/test/config/kurtosis-cdk-node-config.toml.template +++ b/test/config/kurtosis-cdk-node-config.toml.template @@ -22,10 +22,9 @@ SenderProofToL1Addr = "{{.zkevm_l2_agglayer_address}}" - +RPCURL = "http://{{.l2_rpc_name}}{{.deployment_suffix}}:{{.zkevm_rpc_http_port}}" WitnessURL = "http://{{.l2_rpc_name}}{{.deployment_suffix}}:{{.zkevm_rpc_http_port}}" AggLayerURL = "http://agglayer:{{.agglayer_port}}" -StreamServer = "{{.sequencer_name}}{{.deployment_suffix}}:{{.zkevm_data_streamer_port}}" diff --git a/test/config/test.config.toml b/test/config/test.config.toml index 61fd4401..94940469 100644 --- a/test/config/test.config.toml +++ b/test/config/test.config.toml @@ -15,8 +15,6 @@ WaitPeriodPurgeTxFile = "60m" MaxPendingTx = 1 RPCURL = "http://127.0.0.1:8123" GetBatchWaitInterval = "10s" - [SequenceSender.StreamClient] - Server = "127.0.0.1:6900" [SequenceSender.EthTxManager] FrequencyToMonitorTxs = "1s" WaitTxToBeMined = "2m" @@ -52,12 +50,11 @@ GeneratingProofCleanupThreshold = "10m" BatchProofSanityCheckEnabled = true ForkId = 9 GasOffset = 0 -WitnessURL = "http://zkevm-erigon-seq:8123" -UseL1BatchData = true +RPCURL = "http://127.0.0.1:8123" +WitnessURL = "http://127.0.0.1:8123" SettlementBackend = "l1" AggLayerTxTimeout = "5m" AggLayerURL = "" -MaxWitnessRetrievalWorkers = 2 SyncModeOnlyEnabled = false UseFullWitness = false SequencerPrivateKey = {} @@ -73,8 +70,6 @@ SequencerPrivateKey = {} Environment = "development" # "production" or "development" Level = "info" Outputs = ["stderr"] - [Aggregator.StreamClient] - Server = "zkevm-erigon-seq:6900" [Aggregator.EthTxManager] FrequencyToMonitorTxs = "1s" WaitTxToBeMined = "2m" From b43c062b530e0efd423583d0e19a1065453a6d36 Mon Sep 17 00:00:00 2001 From: Victor Castell Date: Thu, 31 Oct 2024 11:02:53 +0100 Subject: [PATCH 04/33] ci: remove commit linting (#143) --- .github/workflows/lint-pr.yml | 9 --------- 1 file changed, 9 deletions(-) diff --git a/.github/workflows/lint-pr.yml b/.github/workflows/lint-pr.yml index 780405d7..9a24eaf2 100644 --- a/.github/workflows/lint-pr.yml +++ b/.github/workflows/lint-pr.yml @@ -20,12 +20,3 @@ jobs: with: ignoreLabels: | release - - commits: - name: Validate PR commits - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - uses: wagoid/commitlint-github-action@v6 From 2a76deb0e333de6ab79a815565ee0daf8caf5d57 Mon Sep 17 00:00:00 2001 From: Goran Rojovic <100121253+goran-ethernal@users.noreply.github.com> Date: Thu, 31 Oct 2024 11:04:52 +0100 Subject: [PATCH 05/33] feat: agg-sender (#22) Co-authored-by: Goran Rojovic Co-authored-by: Victor Castell <0x@vcastellm.xyz> Co-authored-by: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Co-authored-by: Arnau Bennassar --- .../agglayer_client.go => agglayer/client.go | 42 + agglayer/mock_agglayer_client.go | 138 ++ .../agglayer/agglayer_tx.go => agglayer/tx.go | 0 agglayer/types.go | 387 +++++ agglayer/types_test.go | 66 + aggregator/aggregator.go | 5 +- aggregator/aggregator_test.go | 7 +- aggregator/config.go | 21 - aggregator/mocks/mock_agglayer_client.go | 79 - aggsender/aggsender.go | 502 ++++++ aggsender/aggsender_test.go | 1407 +++++++++++++++++ aggsender/config.go | 23 + aggsender/db/aggsender_db_storage.go | 215 +++ aggsender/db/aggsender_db_storage_test.go | 204 +++ aggsender/db/migrations/0001.sql | 12 + aggsender/db/migrations/migrations.go | 22 + aggsender/mocks/mock_aggsender_storage.go | 354 +++++ aggsender/mocks/mock_eth_client.go | 154 ++ aggsender/mocks/mock_l1infotree_syncer.go | 217 +++ aggsender/mocks/mock_l2bridge_syncer.go | 423 +++++ aggsender/mocks/mock_logger.go | 290 ++++ aggsender/types/types.go | 65 + bridgesync/bridgesync.go | 47 +- bridgesync/bridgesync_test.go | 81 + bridgesync/claimcalldata_test.go | 3 + bridgesync/config.go | 2 + bridgesync/downloader.go | 3 + bridgesync/e2e_test.go | 2 +- bridgesync/migrations/bridgesync0001.sql | 6 +- bridgesync/mocks/bridge_contractor.go | 93 ++ bridgesync/mocks/eth_clienter.go | 1136 +++++++++++++ bridgesync/mocks/reorg_detector.go | 147 ++ bridgesync/processor.go | 78 +- bridgesync/processor_test.go | 287 +++- claimsponsor/e2e_test.go | 2 +- cmd/main.go | 3 +- cmd/run.go | 55 +- common/common.go | 21 + common/components.go | 2 + config/config.go | 5 +- config/default.go | 29 +- l1infotree/tree.go | 12 +- l1infotree/tree_test.go | 54 + l1infotreesync/processor.go | 2 +- l1infotreesync/processor_test.go | 99 +- scripts/local_config | 55 +- sonar-project.properties | 4 +- test/Makefile | 23 +- .../kurtosis-cdk-node-config.toml.template | 15 +- test/helpers/lxly-bridge-test.bash | 1 - tree/tree.go | 24 +- 51 files changed, 6741 insertions(+), 183 deletions(-) rename aggregator/agglayer/agglayer_client.go => agglayer/client.go (63%) create mode 100644 agglayer/mock_agglayer_client.go rename aggregator/agglayer/agglayer_tx.go => agglayer/tx.go (100%) create mode 100644 agglayer/types.go create mode 100644 agglayer/types_test.go delete mode 100644 aggregator/mocks/mock_agglayer_client.go create mode 100644 aggsender/aggsender.go create mode 100644 aggsender/aggsender_test.go create mode 100644 aggsender/config.go create mode 100644 aggsender/db/aggsender_db_storage.go create mode 100644 aggsender/db/aggsender_db_storage_test.go create mode 100644 aggsender/db/migrations/0001.sql create mode 100644 aggsender/db/migrations/migrations.go create mode 100644 aggsender/mocks/mock_aggsender_storage.go create mode 100644 aggsender/mocks/mock_eth_client.go create mode 100644 aggsender/mocks/mock_l1infotree_syncer.go create mode 100644 aggsender/mocks/mock_l2bridge_syncer.go create mode 100644 aggsender/mocks/mock_logger.go create mode 100644 aggsender/types/types.go create mode 100644 bridgesync/bridgesync_test.go create mode 100644 bridgesync/mocks/bridge_contractor.go create mode 100644 bridgesync/mocks/eth_clienter.go create mode 100644 bridgesync/mocks/reorg_detector.go diff --git a/aggregator/agglayer/agglayer_client.go b/agglayer/client.go similarity index 63% rename from aggregator/agglayer/agglayer_client.go rename to agglayer/client.go index a5222571..132c2716 100644 --- a/aggregator/agglayer/agglayer_client.go +++ b/agglayer/client.go @@ -21,6 +21,8 @@ var ErrAgglayerRateLimitExceeded = fmt.Errorf("agglayer rate limit exceeded") type AgglayerClientInterface interface { SendTx(signedTx SignedTx) (common.Hash, error) WaitTxToBeMined(hash common.Hash, ctx context.Context) error + SendCertificate(certificate *SignedCertificate) (common.Hash, error) + GetCertificateHeader(certificateHash common.Hash) (*CertificateHeader, error) } // AggLayerClient is the client that will be used to interact with the AggLayer @@ -86,3 +88,43 @@ func (c *AggLayerClient) WaitTxToBeMined(hash common.Hash, ctx context.Context) } } } + +// SendCertificate sends a certificate to the AggLayer +func (c *AggLayerClient) SendCertificate(certificate *SignedCertificate) (common.Hash, error) { + response, err := rpc.JSONRPCCall(c.url, "interop_sendCertificate", certificate) + if err != nil { + return common.Hash{}, err + } + + if response.Error != nil { + return common.Hash{}, fmt.Errorf("%d %s", response.Error.Code, response.Error.Message) + } + + var result types.ArgHash + err = json.Unmarshal(response.Result, &result) + if err != nil { + return common.Hash{}, err + } + + return result.Hash(), nil +} + +// GetCertificateHeader returns the certificate header associated to the hash +func (c *AggLayerClient) GetCertificateHeader(certificateHash common.Hash) (*CertificateHeader, error) { + response, err := rpc.JSONRPCCall(c.url, "interop_getCertificateHeader", certificateHash) + if err != nil { + return nil, err + } + + if response.Error != nil { + return nil, fmt.Errorf("%d %s", response.Error.Code, response.Error.Message) + } + + var result *CertificateHeader + err = json.Unmarshal(response.Result, &result) + if err != nil { + return nil, err + } + + return result, nil +} diff --git a/agglayer/mock_agglayer_client.go b/agglayer/mock_agglayer_client.go new file mode 100644 index 00000000..43100a2e --- /dev/null +++ b/agglayer/mock_agglayer_client.go @@ -0,0 +1,138 @@ +// Code generated by mockery v2.45.0. DO NOT EDIT. + +package agglayer + +import ( + context "context" + + common "github.com/ethereum/go-ethereum/common" + + mock "github.com/stretchr/testify/mock" +) + +// AgglayerClientMock is an autogenerated mock type for the AgglayerClientInterface type +type AgglayerClientMock struct { + mock.Mock +} + +// GetCertificateHeader provides a mock function with given fields: certificateHash +func (_m *AgglayerClientMock) GetCertificateHeader(certificateHash common.Hash) (*CertificateHeader, error) { + ret := _m.Called(certificateHash) + + if len(ret) == 0 { + panic("no return value specified for GetCertificateHeader") + } + + var r0 *CertificateHeader + var r1 error + if rf, ok := ret.Get(0).(func(common.Hash) (*CertificateHeader, error)); ok { + return rf(certificateHash) + } + if rf, ok := ret.Get(0).(func(common.Hash) *CertificateHeader); ok { + r0 = rf(certificateHash) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*CertificateHeader) + } + } + + if rf, ok := ret.Get(1).(func(common.Hash) error); ok { + r1 = rf(certificateHash) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SendCertificate provides a mock function with given fields: certificate +func (_m *AgglayerClientMock) SendCertificate(certificate *SignedCertificate) (common.Hash, error) { + ret := _m.Called(certificate) + + if len(ret) == 0 { + panic("no return value specified for SendCertificate") + } + + var r0 common.Hash + var r1 error + if rf, ok := ret.Get(0).(func(*SignedCertificate) (common.Hash, error)); ok { + return rf(certificate) + } + if rf, ok := ret.Get(0).(func(*SignedCertificate) common.Hash); ok { + r0 = rf(certificate) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Hash) + } + } + + if rf, ok := ret.Get(1).(func(*SignedCertificate) error); ok { + r1 = rf(certificate) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SendTx provides a mock function with given fields: signedTx +func (_m *AgglayerClientMock) SendTx(signedTx SignedTx) (common.Hash, error) { + ret := _m.Called(signedTx) + + if len(ret) == 0 { + panic("no return value specified for SendTx") + } + + var r0 common.Hash + var r1 error + if rf, ok := ret.Get(0).(func(SignedTx) (common.Hash, error)); ok { + return rf(signedTx) + } + if rf, ok := ret.Get(0).(func(SignedTx) common.Hash); ok { + r0 = rf(signedTx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Hash) + } + } + + if rf, ok := ret.Get(1).(func(SignedTx) error); ok { + r1 = rf(signedTx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WaitTxToBeMined provides a mock function with given fields: hash, ctx +func (_m *AgglayerClientMock) WaitTxToBeMined(hash common.Hash, ctx context.Context) error { + ret := _m.Called(hash, ctx) + + if len(ret) == 0 { + panic("no return value specified for WaitTxToBeMined") + } + + var r0 error + if rf, ok := ret.Get(0).(func(common.Hash, context.Context) error); ok { + r0 = rf(hash, ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewAgglayerClientMock creates a new instance of AgglayerClientMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewAgglayerClientMock(t interface { + mock.TestingT + Cleanup(func()) +}) *AgglayerClientMock { + mock := &AgglayerClientMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggregator/agglayer/agglayer_tx.go b/agglayer/tx.go similarity index 100% rename from aggregator/agglayer/agglayer_tx.go rename to agglayer/tx.go diff --git a/agglayer/types.go b/agglayer/types.go new file mode 100644 index 00000000..e8bdb254 --- /dev/null +++ b/agglayer/types.go @@ -0,0 +1,387 @@ +package agglayer + +import ( + "encoding/json" + "fmt" + "math/big" + "strings" + + "github.com/0xPolygon/cdk/bridgesync" + cdkcommon "github.com/0xPolygon/cdk/common" + "github.com/0xPolygon/cdk/tree/types" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" +) + +type CertificateStatus int + +const ( + Pending CertificateStatus = iota + Proven + Candidate + InError + Settled +) + +// String representation of the enum +func (c CertificateStatus) String() string { + return [...]string{"Pending", "Proven", "Candidate", "InError", "Settled"}[c] +} + +// UnmarshalJSON is the implementation of the json.Unmarshaler interface +func (c *CertificateStatus) UnmarshalJSON(data []byte) error { + dataStr := string(data) + + var status string + if strings.Contains(dataStr, "InError") { + status = "InError" + } else { + err := json.Unmarshal(data, &status) + if err != nil { + return err + } + } + + switch status { + case "Pending": + *c = Pending + case "InError": + *c = InError + case "Proven": + *c = Proven + case "Candidate": + *c = Candidate + case "Settled": + *c = Settled + default: + return fmt.Errorf("invalid status: %s", status) + } + + return nil +} + +type LeafType uint8 + +func (l LeafType) Uint8() uint8 { + return uint8(l) +} + +func (l LeafType) String() string { + return [...]string{"Transfer", "Message"}[l] +} + +const ( + LeafTypeAsset LeafType = iota + LeafTypeMessage +) + +// Certificate is the data structure that will be sent to the agglayer +type Certificate struct { + NetworkID uint32 `json:"network_id"` + Height uint64 `json:"height"` + PrevLocalExitRoot [32]byte `json:"prev_local_exit_root"` + NewLocalExitRoot [32]byte `json:"new_local_exit_root"` + BridgeExits []*BridgeExit `json:"bridge_exits"` + ImportedBridgeExits []*ImportedBridgeExit `json:"imported_bridge_exits"` +} + +// Hash returns a hash that uniquely identifies the certificate +func (c *Certificate) Hash() common.Hash { + bridgeExitsHashes := make([][]byte, len(c.BridgeExits)) + for i, bridgeExit := range c.BridgeExits { + bridgeExitsHashes[i] = bridgeExit.Hash().Bytes() + } + + importedBridgeExitsHashes := make([][]byte, len(c.ImportedBridgeExits)) + for i, importedBridgeExit := range c.ImportedBridgeExits { + importedBridgeExitsHashes[i] = importedBridgeExit.Hash().Bytes() + } + + bridgeExitsPart := crypto.Keccak256(bridgeExitsHashes...) + importedBridgeExitsPart := crypto.Keccak256(importedBridgeExitsHashes...) + + return crypto.Keccak256Hash( + cdkcommon.Uint32ToBytes(c.NetworkID), + cdkcommon.Uint64ToBytes(c.Height), + c.PrevLocalExitRoot[:], + c.NewLocalExitRoot[:], + bridgeExitsPart, + importedBridgeExitsPart, + ) +} + +// SignedCertificate is the struct that contains the certificate and the signature of the signer +type SignedCertificate struct { + *Certificate + Signature *Signature `json:"signature"` +} + +// Signature is the data structure that will hold the signature of the given certificate +type Signature struct { + R common.Hash `json:"r"` + S common.Hash `json:"s"` + OddParity bool `json:"odd_y_parity"` +} + +// TokenInfo encapsulates the information to uniquely identify a token on the origin network. +type TokenInfo struct { + OriginNetwork uint32 `json:"origin_network"` + OriginTokenAddress common.Address `json:"origin_token_address"` +} + +// GlobalIndex represents the global index of an imported bridge exit +type GlobalIndex struct { + MainnetFlag bool `json:"mainnet_flag"` + RollupIndex uint32 `json:"rollup_index"` + LeafIndex uint32 `json:"leaf_index"` +} + +func (g *GlobalIndex) Hash() common.Hash { + return crypto.Keccak256Hash( + bridgesync.GenerateGlobalIndex(g.MainnetFlag, g.RollupIndex, g.LeafIndex).Bytes()) +} + +// BridgeExit represents a token bridge exit +type BridgeExit struct { + LeafType LeafType `json:"leaf_type"` + TokenInfo *TokenInfo `json:"token_info"` + DestinationNetwork uint32 `json:"dest_network"` + DestinationAddress common.Address `json:"dest_address"` + Amount *big.Int `json:"amount"` + Metadata []byte `json:"metadata"` +} + +// Hash returns a hash that uniquely identifies the bridge exit +func (b *BridgeExit) Hash() common.Hash { + if b.Amount == nil { + b.Amount = big.NewInt(0) + } + + return crypto.Keccak256Hash( + []byte{b.LeafType.Uint8()}, + cdkcommon.Uint32ToBytes(b.TokenInfo.OriginNetwork), + b.TokenInfo.OriginTokenAddress.Bytes(), + cdkcommon.Uint32ToBytes(b.DestinationNetwork), + b.DestinationAddress.Bytes(), + b.Amount.Bytes(), + crypto.Keccak256(b.Metadata), + ) +} + +// MarshalJSON is the implementation of the json.Marshaler interface +func (b *BridgeExit) MarshalJSON() ([]byte, error) { + return json.Marshal(&struct { + LeafType string `json:"leaf_type"` + TokenInfo *TokenInfo `json:"token_info"` + DestinationNetwork uint32 `json:"dest_network"` + DestinationAddress common.Address `json:"dest_address"` + Amount string `json:"amount"` + Metadata []uint `json:"metadata"` + }{ + LeafType: b.LeafType.String(), + TokenInfo: b.TokenInfo, + DestinationNetwork: b.DestinationNetwork, + DestinationAddress: b.DestinationAddress, + Amount: b.Amount.String(), + Metadata: bytesToUints(b.Metadata), + }) +} + +// bytesToUints converts a byte slice to a slice of uints +func bytesToUints(data []byte) []uint { + uints := make([]uint, len(data)) + for i, b := range data { + uints[i] = uint(b) + } + return uints +} + +// MerkleProof represents an inclusion proof of a leaf in a Merkle tree +type MerkleProof struct { + Root common.Hash `json:"root"` + Proof [types.DefaultHeight]common.Hash `json:"proof"` +} + +// MarshalJSON is the implementation of the json.Marshaler interface +func (m *MerkleProof) MarshalJSON() ([]byte, error) { + proofsAsBytes := [types.DefaultHeight][types.DefaultHeight]byte{} + for i, proof := range m.Proof { + proofsAsBytes[i] = proof + } + + return json.Marshal(&struct { + Root [types.DefaultHeight]byte `json:"root"` + Proof map[string][types.DefaultHeight][types.DefaultHeight]byte `json:"proof"` + }{ + Root: m.Root, + Proof: map[string][types.DefaultHeight][types.DefaultHeight]byte{ + "siblings": proofsAsBytes, + }, + }) +} + +// Hash returns the hash of the Merkle proof struct +func (m *MerkleProof) Hash() common.Hash { + proofsAsSingleSlice := make([]byte, 0) + + for _, proof := range m.Proof { + proofsAsSingleSlice = append(proofsAsSingleSlice, proof.Bytes()...) + } + + return crypto.Keccak256Hash( + m.Root.Bytes(), + proofsAsSingleSlice, + ) +} + +// L1InfoTreeLeafInner represents the inner part of the L1 info tree leaf +type L1InfoTreeLeafInner struct { + GlobalExitRoot common.Hash `json:"global_exit_root"` + BlockHash common.Hash `json:"block_hash"` + Timestamp uint64 `json:"timestamp"` +} + +// Hash returns the hash of the L1InfoTreeLeafInner struct +func (l *L1InfoTreeLeafInner) Hash() common.Hash { + return crypto.Keccak256Hash( + l.GlobalExitRoot.Bytes(), + l.BlockHash.Bytes(), + cdkcommon.Uint64ToBytes(l.Timestamp), + ) +} + +// MarshalJSON is the implementation of the json.Marshaler interface +func (l *L1InfoTreeLeafInner) MarshalJSON() ([]byte, error) { + return json.Marshal(&struct { + GlobalExitRoot [types.DefaultHeight]byte `json:"global_exit_root"` + BlockHash [types.DefaultHeight]byte `json:"block_hash"` + Timestamp uint64 `json:"timestamp"` + }{ + GlobalExitRoot: l.GlobalExitRoot, + BlockHash: l.BlockHash, + Timestamp: l.Timestamp, + }) +} + +// L1InfoTreeLeaf represents the leaf of the L1 info tree +type L1InfoTreeLeaf struct { + L1InfoTreeIndex uint32 `json:"l1_info_tree_index"` + RollupExitRoot [32]byte `json:"rer"` + MainnetExitRoot [32]byte `json:"mer"` + Inner *L1InfoTreeLeafInner `json:"inner"` +} + +// Hash returns the hash of the L1InfoTreeLeaf struct +func (l *L1InfoTreeLeaf) Hash() common.Hash { + return l.Inner.Hash() +} + +// Claim is the interface that will be implemented by the different types of claims +type Claim interface { + Type() string + Hash() common.Hash + MarshalJSON() ([]byte, error) +} + +// ClaimFromMainnnet represents a claim originating from the mainnet +type ClaimFromMainnnet struct { + ProofLeafMER *MerkleProof `json:"proof_leaf_mer"` + ProofGERToL1Root *MerkleProof `json:"proof_ger_l1root"` + L1Leaf *L1InfoTreeLeaf `json:"l1_leaf"` +} + +// Type is the implementation of Claim interface +func (c ClaimFromMainnnet) Type() string { + return "Mainnet" +} + +// MarshalJSON is the implementation of Claim interface +func (c *ClaimFromMainnnet) MarshalJSON() ([]byte, error) { + return json.Marshal(&struct { + Child map[string]interface{} `json:"Mainnet"` + }{ + Child: map[string]interface{}{ + "proof_leaf_mer": c.ProofLeafMER, + "proof_ger_l1root": c.ProofGERToL1Root, + "l1_leaf": c.L1Leaf, + }, + }) +} + +// Hash is the implementation of Claim interface +func (c *ClaimFromMainnnet) Hash() common.Hash { + return crypto.Keccak256Hash( + c.ProofLeafMER.Hash().Bytes(), + c.ProofGERToL1Root.Hash().Bytes(), + c.L1Leaf.Hash().Bytes(), + ) +} + +// ClaimFromRollup represents a claim originating from a rollup +type ClaimFromRollup struct { + ProofLeafLER *MerkleProof `json:"proof_leaf_ler"` + ProofLERToRER *MerkleProof `json:"proof_ler_rer"` + ProofGERToL1Root *MerkleProof `json:"proof_ger_l1root"` + L1Leaf *L1InfoTreeLeaf `json:"l1_leaf"` +} + +// Type is the implementation of Claim interface +func (c ClaimFromRollup) Type() string { + return "Rollup" +} + +// MarshalJSON is the implementation of Claim interface +func (c *ClaimFromRollup) MarshalJSON() ([]byte, error) { + return json.Marshal(&struct { + Child map[string]interface{} `json:"Rollup"` + }{ + Child: map[string]interface{}{ + "proof_leaf_ler": c.ProofLeafLER, + "proof_ler_rer": c.ProofLERToRER, + "proof_ger_l1root": c.ProofGERToL1Root, + "l1_leaf": c.L1Leaf, + }, + }) +} + +// Hash is the implementation of Claim interface +func (c *ClaimFromRollup) Hash() common.Hash { + return crypto.Keccak256Hash( + c.ProofLeafLER.Hash().Bytes(), + c.ProofLERToRER.Hash().Bytes(), + c.ProofGERToL1Root.Hash().Bytes(), + c.L1Leaf.Hash().Bytes(), + ) +} + +// ImportedBridgeExit represents a token bridge exit originating on another network but claimed on the current network. +type ImportedBridgeExit struct { + BridgeExit *BridgeExit `json:"bridge_exit"` + ClaimData Claim `json:"claim_data"` + GlobalIndex *GlobalIndex `json:"global_index"` +} + +// Hash returns a hash that uniquely identifies the imported bridge exit +func (c *ImportedBridgeExit) Hash() common.Hash { + return crypto.Keccak256Hash( + c.BridgeExit.Hash().Bytes(), + c.ClaimData.Hash().Bytes(), + c.GlobalIndex.Hash().Bytes(), + ) +} + +// CertificateHeader is the structure returned by the interop_getCertificateHeader RPC call +type CertificateHeader struct { + NetworkID uint32 `json:"network_id"` + Height uint64 `json:"height"` + EpochNumber *uint64 `json:"epoch_number"` + CertificateIndex *uint64 `json:"certificate_index"` + CertificateID common.Hash `json:"certificate_id"` + NewLocalExitRoot common.Hash `json:"new_local_exit_root"` + Status CertificateStatus `json:"status"` +} + +func (c CertificateHeader) String() string { + return fmt.Sprintf("Height: %d, CertificateID: %s, NewLocalExitRoot: %s", + c.Height, c.CertificateID.String(), c.NewLocalExitRoot.String()) +} diff --git a/agglayer/types_test.go b/agglayer/types_test.go new file mode 100644 index 00000000..1df1f20f --- /dev/null +++ b/agglayer/types_test.go @@ -0,0 +1,66 @@ +package agglayer + +import ( + "encoding/json" + "math/big" + "testing" + + "github.com/0xPolygon/cdk/log" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +const ( + expectedSignedCertificateEmptyMetadataJSON = `{"network_id":1,"height":1,"prev_local_exit_root":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"new_local_exit_root":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"bridge_exits":[{"leaf_type":"Transfer","token_info":null,"dest_network":0,"dest_address":"0x0000000000000000000000000000000000000000","amount":"1","metadata":[]}],"imported_bridge_exits":[{"bridge_exit":{"leaf_type":"Transfer","token_info":null,"dest_network":0,"dest_address":"0x0000000000000000000000000000000000000000","amount":"1","metadata":[]},"claim_data":null,"global_index":{"mainnet_flag":false,"rollup_index":1,"leaf_index":1}}],"signature":{"r":"0x0000000000000000000000000000000000000000000000000000000000000000","s":"0x0000000000000000000000000000000000000000000000000000000000000000","odd_y_parity":false}}` + expectedSignedCertificateyMetadataJSON = `{"network_id":1,"height":1,"prev_local_exit_root":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"new_local_exit_root":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"bridge_exits":[{"leaf_type":"Transfer","token_info":null,"dest_network":0,"dest_address":"0x0000000000000000000000000000000000000000","amount":"1","metadata":[1,2,3]}],"imported_bridge_exits":[{"bridge_exit":{"leaf_type":"Transfer","token_info":null,"dest_network":0,"dest_address":"0x0000000000000000000000000000000000000000","amount":"1","metadata":[]},"claim_data":null,"global_index":{"mainnet_flag":false,"rollup_index":1,"leaf_index":1}}],"signature":{"r":"0x0000000000000000000000000000000000000000000000000000000000000000","s":"0x0000000000000000000000000000000000000000000000000000000000000000","odd_y_parity":false}}` +) + +func TestMarshalJSON(t *testing.T) { + cert := SignedCertificate{ + Certificate: &Certificate{ + NetworkID: 1, + Height: 1, + PrevLocalExitRoot: common.Hash{}, + NewLocalExitRoot: common.Hash{}, + BridgeExits: []*BridgeExit{ + { + LeafType: LeafTypeAsset, + DestinationAddress: common.Address{}, + Amount: big.NewInt(1), + }, + }, + ImportedBridgeExits: []*ImportedBridgeExit{ + { + BridgeExit: &BridgeExit{ + LeafType: LeafTypeAsset, + DestinationAddress: common.Address{}, + Amount: big.NewInt(1), + Metadata: []byte{}, + }, + ClaimData: nil, + GlobalIndex: &GlobalIndex{ + MainnetFlag: false, + RollupIndex: 1, + LeafIndex: 1, + }, + }, + }, + }, + + Signature: &Signature{ + R: common.Hash{}, + S: common.Hash{}, + OddParity: false, + }, + } + data, err := json.Marshal(cert) + require.NoError(t, err) + log.Info(string(data)) + require.Equal(t, expectedSignedCertificateEmptyMetadataJSON, string(data)) + + cert.BridgeExits[0].Metadata = []byte{1, 2, 3} + data, err = json.Marshal(cert) + require.NoError(t, err) + log.Info(string(data)) + require.Equal(t, expectedSignedCertificateyMetadataJSON, string(data)) +} diff --git a/aggregator/aggregator.go b/aggregator/aggregator.go index 1998e842..8aa78011 100644 --- a/aggregator/aggregator.go +++ b/aggregator/aggregator.go @@ -15,7 +15,7 @@ import ( "unicode" cdkTypes "github.com/0xPolygon/cdk-rpc/types" - "github.com/0xPolygon/cdk/aggregator/agglayer" + "github.com/0xPolygon/cdk/agglayer" ethmanTypes "github.com/0xPolygon/cdk/aggregator/ethmantypes" "github.com/0xPolygon/cdk/aggregator/prover" cdkcommon "github.com/0xPolygon/cdk/common" @@ -141,7 +141,7 @@ func New( if !cfg.SyncModeOnlyEnabled && cfg.SettlementBackend == AggLayer { aggLayerClient = agglayer.NewAggLayerClient(cfg.AggLayerURL) - sequencerPrivateKey, err = newKeyFromKeystore(cfg.SequencerPrivateKey) + sequencerPrivateKey, err = cdkcommon.NewKeyFromKeystore(cfg.SequencerPrivateKey) if err != nil { return nil, err } @@ -476,7 +476,6 @@ func (a *Aggregator) settleWithAggLayer( inputs ethmanTypes.FinalProofInputs) bool { proofStrNo0x := strings.TrimPrefix(inputs.FinalProof.Proof, "0x") proofBytes := common.Hex2Bytes(proofStrNo0x) - tx := agglayer.Tx{ LastVerifiedBatch: cdkTypes.ArgUint64(proof.BatchNumber - 1), NewVerifiedBatch: cdkTypes.ArgUint64(proof.BatchNumberFinal), diff --git a/aggregator/aggregator_test.go b/aggregator/aggregator_test.go index f6e27b0f..fd03315f 100644 --- a/aggregator/aggregator_test.go +++ b/aggregator/aggregator_test.go @@ -16,6 +16,7 @@ import ( "testing" "time" + "github.com/0xPolygon/cdk/agglayer" mocks "github.com/0xPolygon/cdk/aggregator/mocks" "github.com/0xPolygon/cdk/aggregator/prover" "github.com/0xPolygon/cdk/config/types" @@ -53,7 +54,7 @@ type mox struct { ethTxManager *mocks.EthTxManagerClientMock etherman *mocks.EthermanMock proverMock *mocks.ProverInterfaceMock - aggLayerClientMock *mocks.AgglayerClientInterfaceMock + aggLayerClientMock *agglayer.AgglayerClientMock synchronizerMock *mocks.SynchronizerInterfaceMock rpcMock *mocks.RPCInterfaceMock } @@ -300,7 +301,7 @@ func Test_sendFinalProofSuccess(t *testing.T) { stateMock := mocks.NewStateInterfaceMock(t) ethTxManager := mocks.NewEthTxManagerClientMock(t) etherman := mocks.NewEthermanMock(t) - aggLayerClient := mocks.NewAgglayerClientInterfaceMock(t) + aggLayerClient := agglayer.NewAgglayerClientMock(t) rpcMock := mocks.NewRPCInterfaceMock(t) curve := elliptic.P256() @@ -489,7 +490,7 @@ func Test_sendFinalProofError(t *testing.T) { stateMock := mocks.NewStateInterfaceMock(t) ethTxManager := mocks.NewEthTxManagerClientMock(t) etherman := mocks.NewEthermanMock(t) - aggLayerClient := mocks.NewAgglayerClientInterfaceMock(t) + aggLayerClient := agglayer.NewAgglayerClientMock(t) rpcMock := mocks.NewRPCInterfaceMock(t) curve := elliptic.P256() diff --git a/aggregator/config.go b/aggregator/config.go index cdef80fd..2d7178f7 100644 --- a/aggregator/config.go +++ b/aggregator/config.go @@ -1,18 +1,14 @@ package aggregator import ( - "crypto/ecdsa" "fmt" "math/big" - "os" - "path/filepath" "github.com/0xPolygon/cdk/aggregator/db" "github.com/0xPolygon/cdk/config/types" "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/zkevm-ethtx-manager/ethtxmanager" syncronizerConfig "github.com/0xPolygonHermez/zkevm-synchronizer-l1/config" - "github.com/ethereum/go-ethereum/accounts/keystore" ) // SettlementBackend is the type of the settlement backend @@ -150,20 +146,3 @@ type Config struct { // When enabled, the aggregator will sync data only from L1 and will not generate or read the data stream. SyncModeOnlyEnabled bool `mapstructure:"SyncModeOnlyEnabled"` } - -// newKeyFromKeystore creates a private key from a keystore file -func newKeyFromKeystore(cfg types.KeystoreFileConfig) (*ecdsa.PrivateKey, error) { - if cfg.Path == "" && cfg.Password == "" { - return nil, nil - } - keystoreEncrypted, err := os.ReadFile(filepath.Clean(cfg.Path)) - if err != nil { - return nil, err - } - key, err := keystore.DecryptKey(keystoreEncrypted, cfg.Password) - if err != nil { - return nil, err - } - - return key.PrivateKey, nil -} diff --git a/aggregator/mocks/mock_agglayer_client.go b/aggregator/mocks/mock_agglayer_client.go deleted file mode 100644 index 2923ebe0..00000000 --- a/aggregator/mocks/mock_agglayer_client.go +++ /dev/null @@ -1,79 +0,0 @@ -// Code generated by mockery v2.39.0. DO NOT EDIT. - -package mocks - -import ( - agglayer "github.com/0xPolygon/cdk/aggregator/agglayer" - common "github.com/ethereum/go-ethereum/common" - - context "context" - - mock "github.com/stretchr/testify/mock" -) - -// AgglayerClientInterfaceMock is an autogenerated mock type for the AgglayerClientInterface type -type AgglayerClientInterfaceMock struct { - mock.Mock -} - -// SendTx provides a mock function with given fields: signedTx -func (_m *AgglayerClientInterfaceMock) SendTx(signedTx agglayer.SignedTx) (common.Hash, error) { - ret := _m.Called(signedTx) - - if len(ret) == 0 { - panic("no return value specified for SendTx") - } - - var r0 common.Hash - var r1 error - if rf, ok := ret.Get(0).(func(agglayer.SignedTx) (common.Hash, error)); ok { - return rf(signedTx) - } - if rf, ok := ret.Get(0).(func(agglayer.SignedTx) common.Hash); ok { - r0 = rf(signedTx) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(common.Hash) - } - } - - if rf, ok := ret.Get(1).(func(agglayer.SignedTx) error); ok { - r1 = rf(signedTx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// WaitTxToBeMined provides a mock function with given fields: hash, ctx -func (_m *AgglayerClientInterfaceMock) WaitTxToBeMined(hash common.Hash, ctx context.Context) error { - ret := _m.Called(hash, ctx) - - if len(ret) == 0 { - panic("no return value specified for WaitTxToBeMined") - } - - var r0 error - if rf, ok := ret.Get(0).(func(common.Hash, context.Context) error); ok { - r0 = rf(hash, ctx) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// NewAgglayerClientInterfaceMock creates a new instance of AgglayerClientInterfaceMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewAgglayerClientInterfaceMock(t interface { - mock.TestingT - Cleanup(func()) -}) *AgglayerClientInterfaceMock { - mock := &AgglayerClientInterfaceMock{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/aggsender/aggsender.go b/aggsender/aggsender.go new file mode 100644 index 00000000..a228e1a9 --- /dev/null +++ b/aggsender/aggsender.go @@ -0,0 +1,502 @@ +package aggsender + +import ( + "context" + "crypto/ecdsa" + "encoding/json" + "errors" + "fmt" + "os" + "time" + + "github.com/0xPolygon/cdk/agglayer" + "github.com/0xPolygon/cdk/aggsender/db" + aggsendertypes "github.com/0xPolygon/cdk/aggsender/types" + "github.com/0xPolygon/cdk/bridgesync" + cdkcommon "github.com/0xPolygon/cdk/common" + "github.com/0xPolygon/cdk/l1infotreesync" + "github.com/0xPolygon/cdk/log" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" +) + +const signatureSize = 65 + +var ( + errNoBridgesAndClaims = errors.New("no bridges and claims to build certificate") + errInvalidSignatureSize = errors.New("invalid signature size") + + zeroLER = common.HexToHash("0x27ae5ba08d7291c96c8cbddcc148bf48a6d68c7974b94356f53754ef6171d757") +) + +// AggSender is a component that will send certificates to the aggLayer +type AggSender struct { + log aggsendertypes.Logger + + l2Syncer aggsendertypes.L2BridgeSyncer + l1infoTreeSyncer aggsendertypes.L1InfoTreeSyncer + + storage db.AggSenderStorage + aggLayerClient agglayer.AgglayerClientInterface + + cfg Config + + sequencerKey *ecdsa.PrivateKey +} + +// New returns a new AggSender +func New( + ctx context.Context, + logger *log.Logger, + cfg Config, + aggLayerClient agglayer.AgglayerClientInterface, + l1InfoTreeSyncer *l1infotreesync.L1InfoTreeSync, + l2Syncer *bridgesync.BridgeSync) (*AggSender, error) { + storage, err := db.NewAggSenderSQLStorage(logger, cfg.StoragePath) + if err != nil { + return nil, err + } + + sequencerPrivateKey, err := cdkcommon.NewKeyFromKeystore(cfg.AggsenderPrivateKey) + if err != nil { + return nil, err + } + + return &AggSender{ + cfg: cfg, + log: logger, + storage: storage, + l2Syncer: l2Syncer, + aggLayerClient: aggLayerClient, + l1infoTreeSyncer: l1InfoTreeSyncer, + sequencerKey: sequencerPrivateKey, + }, nil +} + +// Start starts the AggSender +func (a *AggSender) Start(ctx context.Context) { + go a.sendCertificates(ctx) + go a.checkIfCertificatesAreSettled(ctx) +} + +// sendCertificates sends certificates to the aggLayer +func (a *AggSender) sendCertificates(ctx context.Context) { + ticker := time.NewTicker(a.cfg.BlockGetInterval.Duration) + + for { + select { + case <-ticker.C: + if err := a.sendCertificate(ctx); err != nil { + log.Error(err) + } + case <-ctx.Done(): + a.log.Info("AggSender stopped") + return + } + } +} + +// sendCertificate sends certificate for a network +func (a *AggSender) sendCertificate(ctx context.Context) error { + a.log.Infof("trying to send a new certificate...") + + shouldSend, err := a.shouldSendCertificate(ctx) + if err != nil { + return err + } + + if !shouldSend { + a.log.Infof("waiting for pending certificates to be settled") + return nil + } + + lasL2BlockSynced, err := a.l2Syncer.GetLastProcessedBlock(ctx) + if err != nil { + return fmt.Errorf("error getting last processed block from l2: %w", err) + } + + lastSentCertificateInfo, err := a.storage.GetLastSentCertificate(ctx) + if err != nil { + return err + } + + previousToBlock := lastSentCertificateInfo.ToBlock + if lastSentCertificateInfo.Status == agglayer.InError { + // if the last certificate was in error, we need to resend it + // from the block before the error + previousToBlock = lastSentCertificateInfo.FromBlock - 1 + } + + if previousToBlock >= lasL2BlockSynced { + a.log.Infof("no new blocks to send a certificate, last certificate block: %d, last L2 block: %d", + previousToBlock, lasL2BlockSynced) + return nil + } + + fromBlock := previousToBlock + 1 + toBlock := lasL2BlockSynced + + bridges, err := a.l2Syncer.GetBridgesPublished(ctx, fromBlock, toBlock) + if err != nil { + return fmt.Errorf("error getting bridges: %w", err) + } + + if len(bridges) == 0 { + a.log.Infof("no bridges consumed, no need to send a certificate from block: %d to block: %d", fromBlock, toBlock) + return nil + } + + claims, err := a.l2Syncer.GetClaims(ctx, fromBlock, toBlock) + if err != nil { + return fmt.Errorf("error getting claims: %w", err) + } + + a.log.Infof("building certificate for block: %d to block: %d", fromBlock, toBlock) + + certificate, err := a.buildCertificate(ctx, bridges, claims, lastSentCertificateInfo) + if err != nil { + return fmt.Errorf("error building certificate: %w", err) + } + + signedCertificate, err := a.signCertificate(certificate) + if err != nil { + return fmt.Errorf("error signing certificate: %w", err) + } + + a.saveCertificateToFile(signedCertificate) + + certificateHash, err := a.aggLayerClient.SendCertificate(signedCertificate) + if err != nil { + return fmt.Errorf("error sending certificate: %w", err) + } + log.Infof("certificate send: Height: %d hash: %s", signedCertificate.Height, certificateHash.String()) + + if err := a.storage.SaveLastSentCertificate(ctx, aggsendertypes.CertificateInfo{ + Height: certificate.Height, + CertificateID: certificateHash, + NewLocalExitRoot: certificate.NewLocalExitRoot, + FromBlock: fromBlock, + ToBlock: toBlock, + }); err != nil { + return fmt.Errorf("error saving last sent certificate in db: %w", err) + } + + a.log.Infof("certificate: %s sent successfully for range of l2 blocks (from block: %d, to block: %d)", + certificateHash, fromBlock, toBlock) + + return nil +} + +// saveCertificate saves the certificate to a tmp file +func (a *AggSender) saveCertificateToFile(signedCertificate *agglayer.SignedCertificate) { + if signedCertificate == nil || !a.cfg.SaveCertificatesToFiles { + return + } + + fn := fmt.Sprintf("/tmp/certificate_%04d.json", signedCertificate.Height) + a.log.Infof("saving certificate to file: %s", fn) + jsonData, err := json.Marshal(signedCertificate) + if err != nil { + a.log.Errorf("error marshalling certificate: %w", err) + } + + if err = os.WriteFile(fn, jsonData, 0644); err != nil { //nolint:gosec,mnd // we are writing to a tmp file + a.log.Errorf("error writing certificate to file: %w", err) + } +} + +// buildCertificate builds a certificate from the bridge events +func (a *AggSender) buildCertificate(ctx context.Context, + bridges []bridgesync.Bridge, + claims []bridgesync.Claim, + lastSentCertificateInfo aggsendertypes.CertificateInfo) (*agglayer.Certificate, error) { + if len(bridges) == 0 && len(claims) == 0 { + return nil, errNoBridgesAndClaims + } + + bridgeExits := a.getBridgeExits(bridges) + importedBridgeExits, err := a.getImportedBridgeExits(ctx, claims) + if err != nil { + return nil, fmt.Errorf("error getting imported bridge exits: %w", err) + } + + var depositCount uint32 + if len(bridges) > 0 { + depositCount = bridges[len(bridges)-1].DepositCount + } + + exitRoot, err := a.l2Syncer.GetExitRootByIndex(ctx, depositCount) + if err != nil { + return nil, fmt.Errorf("error getting exit root by index: %d. Error: %w", depositCount, err) + } + + height := lastSentCertificateInfo.Height + 1 + previousLER := lastSentCertificateInfo.NewLocalExitRoot + if lastSentCertificateInfo.NewLocalExitRoot == (common.Hash{}) { + // meaning this is the first certificate + height = 0 + previousLER = zeroLER + } + + return &agglayer.Certificate{ + NetworkID: a.l2Syncer.OriginNetwork(), + PrevLocalExitRoot: previousLER, + NewLocalExitRoot: exitRoot.Hash, + BridgeExits: bridgeExits, + ImportedBridgeExits: importedBridgeExits, + Height: height, + }, nil +} + +// convertClaimToImportedBridgeExit converts a claim to an ImportedBridgeExit object +func (a *AggSender) convertClaimToImportedBridgeExit(claim bridgesync.Claim) (*agglayer.ImportedBridgeExit, error) { + leafType := agglayer.LeafTypeAsset + if claim.IsMessage { + leafType = agglayer.LeafTypeMessage + } + + bridgeExit := &agglayer.BridgeExit{ + LeafType: leafType, + TokenInfo: &agglayer.TokenInfo{ + OriginNetwork: claim.OriginNetwork, + OriginTokenAddress: claim.OriginAddress, + }, + DestinationNetwork: claim.DestinationNetwork, + DestinationAddress: claim.DestinationAddress, + Amount: claim.Amount, + Metadata: claim.Metadata, + } + + mainnetFlag, rollupIndex, leafIndex, err := bridgesync.DecodeGlobalIndex(claim.GlobalIndex) + if err != nil { + return nil, fmt.Errorf("error decoding global index: %w", err) + } + + return &agglayer.ImportedBridgeExit{ + BridgeExit: bridgeExit, + GlobalIndex: &agglayer.GlobalIndex{ + MainnetFlag: mainnetFlag, + RollupIndex: rollupIndex, + LeafIndex: leafIndex, + }, + }, nil +} + +// getBridgeExits converts bridges to agglayer.BridgeExit objects +func (a *AggSender) getBridgeExits(bridges []bridgesync.Bridge) []*agglayer.BridgeExit { + bridgeExits := make([]*agglayer.BridgeExit, 0, len(bridges)) + + for _, bridge := range bridges { + bridgeExits = append(bridgeExits, &agglayer.BridgeExit{ + LeafType: agglayer.LeafType(bridge.LeafType), + TokenInfo: &agglayer.TokenInfo{ + OriginNetwork: bridge.OriginNetwork, + OriginTokenAddress: bridge.OriginAddress, + }, + DestinationNetwork: bridge.DestinationNetwork, + DestinationAddress: bridge.DestinationAddress, + Amount: bridge.Amount, + Metadata: bridge.Metadata, + }) + } + + return bridgeExits +} + +// getImportedBridgeExits converts claims to agglayer.ImportedBridgeExit objects and calculates necessary proofs +func (a *AggSender) getImportedBridgeExits( + ctx context.Context, claims []bridgesync.Claim, +) ([]*agglayer.ImportedBridgeExit, error) { + if len(claims) == 0 { + // no claims to convert + return nil, nil + } + + var ( + greatestL1InfoTreeIndexUsed uint32 + importedBridgeExits = make([]*agglayer.ImportedBridgeExit, 0, len(claims)) + claimL1Info = make([]*l1infotreesync.L1InfoTreeLeaf, 0, len(claims)) + ) + + for _, claim := range claims { + info, err := a.l1infoTreeSyncer.GetInfoByGlobalExitRoot(claim.GlobalExitRoot) + if err != nil { + return nil, fmt.Errorf("error getting info by global exit root: %w", err) + } + + claimL1Info = append(claimL1Info, info) + + if info.L1InfoTreeIndex > greatestL1InfoTreeIndexUsed { + greatestL1InfoTreeIndexUsed = info.L1InfoTreeIndex + } + } + + rootToProve, err := a.l1infoTreeSyncer.GetL1InfoTreeRootByIndex(ctx, greatestL1InfoTreeIndexUsed) + if err != nil { + return nil, fmt.Errorf("error getting L1 Info tree root by index: %d. Error: %w", greatestL1InfoTreeIndexUsed, err) + } + + for i, claim := range claims { + l1Info := claimL1Info[i] + + a.log.Debugf("claim[%d]: destAddr: %s GER:%s", i, claim.DestinationAddress.String(), claim.GlobalExitRoot.String()) + ibe, err := a.convertClaimToImportedBridgeExit(claim) + if err != nil { + return nil, fmt.Errorf("error converting claim to imported bridge exit: %w", err) + } + + importedBridgeExits = append(importedBridgeExits, ibe) + + gerToL1Proof, err := a.l1infoTreeSyncer.GetL1InfoTreeMerkleProofFromIndexToRoot( + ctx, l1Info.L1InfoTreeIndex, rootToProve.Hash, + ) + if err != nil { + return nil, fmt.Errorf( + "error getting L1 Info tree merkle proof for leaf index: %d and root: %s. Error: %w", + l1Info.L1InfoTreeIndex, rootToProve.Hash, err, + ) + } + + claim := claims[i] + if ibe.GlobalIndex.MainnetFlag { + ibe.ClaimData = &agglayer.ClaimFromMainnnet{ + L1Leaf: &agglayer.L1InfoTreeLeaf{ + L1InfoTreeIndex: l1Info.L1InfoTreeIndex, + RollupExitRoot: claim.RollupExitRoot, + MainnetExitRoot: claim.MainnetExitRoot, + Inner: &agglayer.L1InfoTreeLeafInner{ + GlobalExitRoot: l1Info.GlobalExitRoot, + Timestamp: l1Info.Timestamp, + BlockHash: l1Info.PreviousBlockHash, + }, + }, + ProofLeafMER: &agglayer.MerkleProof{ + Root: claim.MainnetExitRoot, + Proof: claim.ProofLocalExitRoot, + }, + ProofGERToL1Root: &agglayer.MerkleProof{ + Root: rootToProve.Hash, + Proof: gerToL1Proof, + }, + } + } else { + ibe.ClaimData = &agglayer.ClaimFromRollup{ + L1Leaf: &agglayer.L1InfoTreeLeaf{ + L1InfoTreeIndex: l1Info.L1InfoTreeIndex, + RollupExitRoot: claim.RollupExitRoot, + MainnetExitRoot: claim.MainnetExitRoot, + Inner: &agglayer.L1InfoTreeLeafInner{ + GlobalExitRoot: l1Info.GlobalExitRoot, + Timestamp: l1Info.Timestamp, + BlockHash: l1Info.PreviousBlockHash, + }, + }, + ProofLeafLER: &agglayer.MerkleProof{ + Root: claim.MainnetExitRoot, + Proof: claim.ProofLocalExitRoot, + }, + ProofLERToRER: &agglayer.MerkleProof{ + Root: claim.RollupExitRoot, + Proof: claim.ProofRollupExitRoot, + }, + ProofGERToL1Root: &agglayer.MerkleProof{ + Root: rootToProve.Hash, + Proof: gerToL1Proof, + }, + } + } + } + + return importedBridgeExits, nil +} + +// signCertificate signs a certificate with the sequencer key +func (a *AggSender) signCertificate(certificate *agglayer.Certificate) (*agglayer.SignedCertificate, error) { + hashToSign := certificate.Hash() + + sig, err := crypto.Sign(hashToSign.Bytes(), a.sequencerKey) + if err != nil { + return nil, err + } + + r, s, isOddParity, err := extractSignatureData(sig) + if err != nil { + return nil, err + } + + return &agglayer.SignedCertificate{ + Certificate: certificate, + Signature: &agglayer.Signature{ + R: r, + S: s, + OddParity: isOddParity, + }, + }, nil +} + +// checkIfCertificatesAreSettled checks if certificates are settled +func (a *AggSender) checkIfCertificatesAreSettled(ctx context.Context) { + ticker := time.NewTicker(a.cfg.CheckSettledInterval.Duration) + for { + select { + case <-ticker.C: + a.checkPendingCertificatesStatus(ctx) + case <-ctx.Done(): + return + } + } +} + +// checkPendingCertificatesStatus checks the status of pending certificates +// and updates in the storage if it changed on agglayer +func (a *AggSender) checkPendingCertificatesStatus(ctx context.Context) { + pendingCertificates, err := a.storage.GetCertificatesByStatus(ctx, []agglayer.CertificateStatus{agglayer.Pending}) + if err != nil { + a.log.Errorf("error getting pending certificates: %w", err) + } + + for _, certificate := range pendingCertificates { + certificateHeader, err := a.aggLayerClient.GetCertificateHeader(certificate.CertificateID) + if err != nil { + a.log.Errorf("error getting header of certificate %s with height: %d from agglayer: %w", + certificate.CertificateID, certificate.Height, err) + continue + } + + if certificateHeader.Status != agglayer.Pending { + certificate.Status = certificateHeader.Status + + a.log.Infof("certificate %s changed status to %s", certificateHeader.String(), certificate.Status) + + if err := a.storage.UpdateCertificateStatus(ctx, *certificate); err != nil { + a.log.Errorf("error updating certificate status in storage: %w", err) + continue + } + } + } +} + +// shouldSendCertificate checks if a certificate should be sent at given time +// if we have pending certificates, then we wait until they are settled +func (a *AggSender) shouldSendCertificate(ctx context.Context) (bool, error) { + pendingCertificates, err := a.storage.GetCertificatesByStatus(ctx, []agglayer.CertificateStatus{agglayer.Pending}) + if err != nil { + return false, fmt.Errorf("error getting pending certificates: %w", err) + } + + return len(pendingCertificates) == 0, nil +} + +// extractSignatureData extracts the R, S, and V from a 65-byte signature +func extractSignatureData(signature []byte) (r, s common.Hash, isOddParity bool, err error) { + if len(signature) != signatureSize { + err = errInvalidSignatureSize + return + } + + r = common.BytesToHash(signature[:32]) // First 32 bytes are R + s = common.BytesToHash(signature[32:64]) // Next 32 bytes are S + isOddParity = signature[64]%2 == 1 //nolint:mnd // Last byte is V + + return +} diff --git a/aggsender/aggsender_test.go b/aggsender/aggsender_test.go new file mode 100644 index 00000000..69dc6ed1 --- /dev/null +++ b/aggsender/aggsender_test.go @@ -0,0 +1,1407 @@ +package aggsender + +import ( + "context" + "crypto/ecdsa" + "encoding/json" + "errors" + "fmt" + "math/big" + "os" + "testing" + "time" + + "github.com/0xPolygon/cdk/agglayer" + "github.com/0xPolygon/cdk/aggsender/mocks" + aggsendertypes "github.com/0xPolygon/cdk/aggsender/types" + "github.com/0xPolygon/cdk/bridgesync" + "github.com/0xPolygon/cdk/config/types" + "github.com/0xPolygon/cdk/l1infotreesync" + "github.com/0xPolygon/cdk/log" + treeTypes "github.com/0xPolygon/cdk/tree/types" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestExploratoryGetCertificateHeader(t *testing.T) { + t.Skip("This test is exploratory and should be skipped") + aggLayerClient := agglayer.NewAggLayerClient("http://localhost:32795") + certificateID := common.HexToHash("0xf153e75e24591432ac5deafaeaafba3fec0fd851261c86051b9c0d540b38c369") + certificateHeader, err := aggLayerClient.GetCertificateHeader(certificateID) + require.NoError(t, err) + fmt.Print(certificateHeader) +} + +func TestConvertClaimToImportedBridgeExit(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + claim bridgesync.Claim + expectedError bool + expectedExit *agglayer.ImportedBridgeExit + }{ + { + name: "Asset claim", + claim: bridgesync.Claim{ + IsMessage: false, + OriginNetwork: 1, + OriginAddress: common.HexToAddress("0x123"), + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x456"), + Amount: big.NewInt(100), + Metadata: []byte("metadata"), + GlobalIndex: big.NewInt(1), + }, + expectedError: false, + expectedExit: &agglayer.ImportedBridgeExit{ + BridgeExit: &agglayer.BridgeExit{ + LeafType: agglayer.LeafTypeAsset, + TokenInfo: &agglayer.TokenInfo{ + OriginNetwork: 1, + OriginTokenAddress: common.HexToAddress("0x123"), + }, + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x456"), + Amount: big.NewInt(100), + Metadata: []byte("metadata"), + }, + GlobalIndex: &agglayer.GlobalIndex{ + MainnetFlag: false, + RollupIndex: 0, + LeafIndex: 1, + }, + }, + }, + { + name: "Message claim", + claim: bridgesync.Claim{ + IsMessage: true, + OriginNetwork: 1, + OriginAddress: common.HexToAddress("0x123"), + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x456"), + Amount: big.NewInt(100), + Metadata: []byte("metadata"), + GlobalIndex: big.NewInt(2), + }, + expectedError: false, + expectedExit: &agglayer.ImportedBridgeExit{ + BridgeExit: &agglayer.BridgeExit{ + LeafType: agglayer.LeafTypeMessage, + TokenInfo: &agglayer.TokenInfo{ + OriginNetwork: 1, + OriginTokenAddress: common.HexToAddress("0x123"), + }, + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x456"), + Amount: big.NewInt(100), + Metadata: []byte("metadata"), + }, + GlobalIndex: &agglayer.GlobalIndex{ + MainnetFlag: false, + RollupIndex: 0, + LeafIndex: 2, + }, + }, + }, + { + name: "Invalid global index", + claim: bridgesync.Claim{ + IsMessage: false, + OriginNetwork: 1, + OriginAddress: common.HexToAddress("0x123"), + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x456"), + Amount: big.NewInt(100), + Metadata: []byte("metadata"), + GlobalIndex: new(big.Int).SetBytes([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}), + }, + expectedError: true, + expectedExit: nil, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + aggSender := &AggSender{} + exit, err := aggSender.convertClaimToImportedBridgeExit(tt.claim) + + if tt.expectedError { + require.Error(t, err) + } else { + require.NoError(t, err) + require.Equal(t, tt.expectedExit, exit) + } + }) + } +} + +func TestGetBridgeExits(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + bridges []bridgesync.Bridge + expectedExits []*agglayer.BridgeExit + }{ + { + name: "Single bridge", + bridges: []bridgesync.Bridge{ + { + LeafType: agglayer.LeafTypeAsset.Uint8(), + OriginNetwork: 1, + OriginAddress: common.HexToAddress("0x123"), + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x456"), + Amount: big.NewInt(100), + Metadata: []byte("metadata"), + }, + }, + expectedExits: []*agglayer.BridgeExit{ + { + LeafType: agglayer.LeafTypeAsset, + TokenInfo: &agglayer.TokenInfo{ + OriginNetwork: 1, + OriginTokenAddress: common.HexToAddress("0x123"), + }, + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x456"), + Amount: big.NewInt(100), + Metadata: []byte("metadata"), + }, + }, + }, + { + name: "Multiple bridges", + bridges: []bridgesync.Bridge{ + { + LeafType: agglayer.LeafTypeAsset.Uint8(), + OriginNetwork: 1, + OriginAddress: common.HexToAddress("0x123"), + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x456"), + Amount: big.NewInt(100), + Metadata: []byte("metadata"), + }, + { + LeafType: agglayer.LeafTypeMessage.Uint8(), + OriginNetwork: 3, + OriginAddress: common.HexToAddress("0x789"), + DestinationNetwork: 4, + DestinationAddress: common.HexToAddress("0xabc"), + Amount: big.NewInt(200), + Metadata: []byte("data"), + }, + }, + expectedExits: []*agglayer.BridgeExit{ + { + LeafType: agglayer.LeafTypeAsset, + TokenInfo: &agglayer.TokenInfo{ + OriginNetwork: 1, + OriginTokenAddress: common.HexToAddress("0x123"), + }, + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x456"), + Amount: big.NewInt(100), + Metadata: []byte("metadata"), + }, + { + LeafType: agglayer.LeafTypeMessage, + TokenInfo: &agglayer.TokenInfo{ + OriginNetwork: 3, + OriginTokenAddress: common.HexToAddress("0x789"), + }, + DestinationNetwork: 4, + DestinationAddress: common.HexToAddress("0xabc"), + Amount: big.NewInt(200), + Metadata: []byte("data"), + }, + }, + }, + { + name: "No bridges", + bridges: []bridgesync.Bridge{}, + expectedExits: []*agglayer.BridgeExit{}, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + aggSender := &AggSender{} + exits := aggSender.getBridgeExits(tt.bridges) + + require.Equal(t, tt.expectedExits, exits) + }) + } +} + +//nolint:dupl +func TestGetImportedBridgeExits(t *testing.T) { + t.Parallel() + + mockProof := generateTestProof(t) + mockL1InfoTreeSyncer := mocks.NewL1InfoTreeSyncerMock(t) + mockL1InfoTreeSyncer.On("GetInfoByGlobalExitRoot", mock.Anything).Return(&l1infotreesync.L1InfoTreeLeaf{ + L1InfoTreeIndex: 1, + Timestamp: 123456789, + PreviousBlockHash: common.HexToHash("0xabc"), + GlobalExitRoot: common.HexToHash("0x7891"), + }, nil) + mockL1InfoTreeSyncer.On("GetL1InfoTreeRootByIndex", mock.Anything, mock.Anything).Return( + treeTypes.Root{Hash: common.HexToHash("0x7891")}, nil) + mockL1InfoTreeSyncer.On("GetL1InfoTreeMerkleProofFromIndexToRoot", mock.Anything, + mock.Anything, mock.Anything).Return(mockProof, nil) + + tests := []struct { + name string + claims []bridgesync.Claim + expectedError bool + expectedExits []*agglayer.ImportedBridgeExit + }{ + { + name: "Single claim", + claims: []bridgesync.Claim{ + { + IsMessage: false, + OriginNetwork: 1, + OriginAddress: common.HexToAddress("0x1234"), + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x4567"), + Amount: big.NewInt(111), + Metadata: []byte("metadata1"), + GlobalIndex: bridgesync.GenerateGlobalIndex(false, 1, 1), + GlobalExitRoot: common.HexToHash("0x7891"), + RollupExitRoot: common.HexToHash("0xaaab"), + MainnetExitRoot: common.HexToHash("0xbbba"), + ProofLocalExitRoot: mockProof, + ProofRollupExitRoot: mockProof, + }, + }, + expectedError: false, + expectedExits: []*agglayer.ImportedBridgeExit{ + { + BridgeExit: &agglayer.BridgeExit{ + LeafType: agglayer.LeafTypeAsset, + TokenInfo: &agglayer.TokenInfo{ + OriginNetwork: 1, + OriginTokenAddress: common.HexToAddress("0x1234"), + }, + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x4567"), + Amount: big.NewInt(111), + Metadata: []byte("metadata1"), + }, + GlobalIndex: &agglayer.GlobalIndex{ + MainnetFlag: false, + RollupIndex: 1, + LeafIndex: 1, + }, + ClaimData: &agglayer.ClaimFromRollup{ + L1Leaf: &agglayer.L1InfoTreeLeaf{ + L1InfoTreeIndex: 1, + RollupExitRoot: common.HexToHash("0xaaab"), + MainnetExitRoot: common.HexToHash("0xbbba"), + Inner: &agglayer.L1InfoTreeLeafInner{ + GlobalExitRoot: common.HexToHash("0x7891"), + Timestamp: 123456789, + BlockHash: common.HexToHash("0xabc"), + }, + }, + ProofLeafLER: &agglayer.MerkleProof{ + Root: common.HexToHash("0xbbba"), + Proof: mockProof, + }, + ProofLERToRER: &agglayer.MerkleProof{ + Root: common.HexToHash("0xaaab"), + Proof: mockProof, + }, + ProofGERToL1Root: &agglayer.MerkleProof{ + Root: common.HexToHash("0x7891"), + Proof: mockProof, + }, + }, + }, + }, + }, + { + name: "Multiple claims", + claims: []bridgesync.Claim{ + { + IsMessage: false, + OriginNetwork: 1, + OriginAddress: common.HexToAddress("0x123"), + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x456"), + Amount: big.NewInt(100), + Metadata: []byte("metadata"), + GlobalIndex: big.NewInt(1), + GlobalExitRoot: common.HexToHash("0x7891"), + RollupExitRoot: common.HexToHash("0xaaa"), + MainnetExitRoot: common.HexToHash("0xbbb"), + ProofLocalExitRoot: mockProof, + ProofRollupExitRoot: mockProof, + }, + { + IsMessage: true, + OriginNetwork: 3, + OriginAddress: common.HexToAddress("0x789"), + DestinationNetwork: 4, + DestinationAddress: common.HexToAddress("0xabc"), + Amount: big.NewInt(200), + Metadata: []byte("data"), + GlobalIndex: bridgesync.GenerateGlobalIndex(true, 0, 2), + GlobalExitRoot: common.HexToHash("0x7891"), + RollupExitRoot: common.HexToHash("0xbbb"), + MainnetExitRoot: common.HexToHash("0xccc"), + ProofLocalExitRoot: mockProof, + ProofRollupExitRoot: mockProof, + }, + }, + expectedError: false, + expectedExits: []*agglayer.ImportedBridgeExit{ + { + BridgeExit: &agglayer.BridgeExit{ + LeafType: agglayer.LeafTypeAsset, + TokenInfo: &agglayer.TokenInfo{ + OriginNetwork: 1, + OriginTokenAddress: common.HexToAddress("0x123"), + }, + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x456"), + Amount: big.NewInt(100), + Metadata: []byte("metadata"), + }, + GlobalIndex: &agglayer.GlobalIndex{ + MainnetFlag: false, + RollupIndex: 0, + LeafIndex: 1, + }, + ClaimData: &agglayer.ClaimFromRollup{ + L1Leaf: &agglayer.L1InfoTreeLeaf{ + L1InfoTreeIndex: 1, + RollupExitRoot: common.HexToHash("0xaaa"), + MainnetExitRoot: common.HexToHash("0xbbb"), + Inner: &agglayer.L1InfoTreeLeafInner{ + GlobalExitRoot: common.HexToHash("0x7891"), + Timestamp: 123456789, + BlockHash: common.HexToHash("0xabc"), + }, + }, + ProofLeafLER: &agglayer.MerkleProof{ + Root: common.HexToHash("0xbbb"), + Proof: mockProof, + }, + ProofLERToRER: &agglayer.MerkleProof{ + Root: common.HexToHash("0xaaa"), + Proof: mockProof, + }, + ProofGERToL1Root: &agglayer.MerkleProof{ + Root: common.HexToHash("0x7891"), + Proof: mockProof, + }, + }, + }, + { + BridgeExit: &agglayer.BridgeExit{ + LeafType: agglayer.LeafTypeMessage, + TokenInfo: &agglayer.TokenInfo{ + OriginNetwork: 3, + OriginTokenAddress: common.HexToAddress("0x789"), + }, + DestinationNetwork: 4, + DestinationAddress: common.HexToAddress("0xabc"), + Amount: big.NewInt(200), + Metadata: []byte("data"), + }, + GlobalIndex: &agglayer.GlobalIndex{ + MainnetFlag: true, + RollupIndex: 0, + LeafIndex: 2, + }, + ClaimData: &agglayer.ClaimFromMainnnet{ + L1Leaf: &agglayer.L1InfoTreeLeaf{ + L1InfoTreeIndex: 1, + RollupExitRoot: common.HexToHash("0xbbb"), + MainnetExitRoot: common.HexToHash("0xccc"), + Inner: &agglayer.L1InfoTreeLeafInner{ + GlobalExitRoot: common.HexToHash("0x7891"), + Timestamp: 123456789, + BlockHash: common.HexToHash("0xabc"), + }, + }, + ProofLeafMER: &agglayer.MerkleProof{ + Root: common.HexToHash("0xccc"), + Proof: mockProof, + }, + ProofGERToL1Root: &agglayer.MerkleProof{ + Root: common.HexToHash("0x7891"), + Proof: mockProof, + }, + }, + }, + }, + }, + { + name: "No claims", + claims: []bridgesync.Claim{}, + expectedError: false, + expectedExits: nil, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + aggSender := &AggSender{ + l1infoTreeSyncer: mockL1InfoTreeSyncer, + log: log.WithFields("test", "unittest"), + } + exits, err := aggSender.getImportedBridgeExits(context.Background(), tt.claims) + + if tt.expectedError { + require.Error(t, err) + require.Nil(t, exits) + } else { + require.NoError(t, err) + require.Equal(t, tt.expectedExits, exits) + } + }) + } +} + +func TestBuildCertificate(t *testing.T) { + mockL2BridgeSyncer := mocks.NewL2BridgeSyncerMock(t) + mockL1InfoTreeSyncer := mocks.NewL1InfoTreeSyncerMock(t) + mockProof := generateTestProof(t) + + tests := []struct { + name string + bridges []bridgesync.Bridge + claims []bridgesync.Claim + lastSentCertificateInfo aggsendertypes.CertificateInfo + mockFn func() + expectedCert *agglayer.Certificate + expectedError bool + }{ + { + name: "Valid certificate with bridges and claims", + bridges: []bridgesync.Bridge{ + { + LeafType: agglayer.LeafTypeAsset.Uint8(), + OriginNetwork: 1, + OriginAddress: common.HexToAddress("0x123"), + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x456"), + Amount: big.NewInt(100), + Metadata: []byte("metadata"), + DepositCount: 1, + }, + }, + claims: []bridgesync.Claim{ + { + IsMessage: false, + OriginNetwork: 1, + OriginAddress: common.HexToAddress("0x1234"), + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x4567"), + Amount: big.NewInt(111), + Metadata: []byte("metadata1"), + GlobalIndex: big.NewInt(1), + GlobalExitRoot: common.HexToHash("0x7891"), + RollupExitRoot: common.HexToHash("0xaaab"), + MainnetExitRoot: common.HexToHash("0xbbba"), + ProofLocalExitRoot: mockProof, + ProofRollupExitRoot: mockProof, + }, + }, + lastSentCertificateInfo: aggsendertypes.CertificateInfo{ + NewLocalExitRoot: common.HexToHash("0x123"), + Height: 1, + }, + expectedCert: &agglayer.Certificate{ + NetworkID: 1, + PrevLocalExitRoot: common.HexToHash("0x123"), + NewLocalExitRoot: common.HexToHash("0x789"), + BridgeExits: []*agglayer.BridgeExit{ + { + LeafType: agglayer.LeafTypeAsset, + TokenInfo: &agglayer.TokenInfo{ + OriginNetwork: 1, + OriginTokenAddress: common.HexToAddress("0x123"), + }, + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x456"), + Amount: big.NewInt(100), + Metadata: []byte("metadata"), + }, + }, + ImportedBridgeExits: []*agglayer.ImportedBridgeExit{ + { + BridgeExit: &agglayer.BridgeExit{ + LeafType: agglayer.LeafTypeAsset, + TokenInfo: &agglayer.TokenInfo{ + OriginNetwork: 1, + OriginTokenAddress: common.HexToAddress("0x1234"), + }, + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x4567"), + Amount: big.NewInt(111), + Metadata: []byte("metadata1"), + }, + GlobalIndex: &agglayer.GlobalIndex{ + MainnetFlag: false, + RollupIndex: 0, + LeafIndex: 1, + }, + ClaimData: &agglayer.ClaimFromRollup{ + L1Leaf: &agglayer.L1InfoTreeLeaf{ + L1InfoTreeIndex: 1, + RollupExitRoot: common.HexToHash("0xaaab"), + MainnetExitRoot: common.HexToHash("0xbbba"), + Inner: &agglayer.L1InfoTreeLeafInner{ + GlobalExitRoot: common.HexToHash("0x7891"), + Timestamp: 123456789, + BlockHash: common.HexToHash("0xabc"), + }, + }, + ProofLeafLER: &agglayer.MerkleProof{ + Root: common.HexToHash("0xbbba"), + Proof: mockProof, + }, + ProofLERToRER: &agglayer.MerkleProof{ + Root: common.HexToHash("0xaaab"), + Proof: mockProof, + }, + ProofGERToL1Root: &agglayer.MerkleProof{ + Root: common.HexToHash("0x7891"), + Proof: mockProof, + }, + }, + }, + }, + Height: 2, + }, + mockFn: func() { + mockL2BridgeSyncer.On("OriginNetwork").Return(uint32(1)) + mockL2BridgeSyncer.On("GetExitRootByIndex", mock.Anything, mock.Anything).Return(treeTypes.Root{Hash: common.HexToHash("0x789")}, nil) + + mockL1InfoTreeSyncer.On("GetInfoByGlobalExitRoot", mock.Anything).Return(&l1infotreesync.L1InfoTreeLeaf{ + L1InfoTreeIndex: 1, + Timestamp: 123456789, + PreviousBlockHash: common.HexToHash("0xabc"), + GlobalExitRoot: common.HexToHash("0x7891"), + }, nil) + mockL1InfoTreeSyncer.On("GetL1InfoTreeRootByIndex", mock.Anything, mock.Anything).Return(treeTypes.Root{Hash: common.HexToHash("0x7891")}, nil) + mockL1InfoTreeSyncer.On("GetL1InfoTreeMerkleProofFromIndexToRoot", mock.Anything, mock.Anything, mock.Anything).Return(mockProof, nil) + }, + expectedError: false, + }, + { + name: "No bridges or claims", + bridges: []bridgesync.Bridge{}, + claims: []bridgesync.Claim{}, + lastSentCertificateInfo: aggsendertypes.CertificateInfo{ + NewLocalExitRoot: common.HexToHash("0x123"), + Height: 1, + }, + expectedCert: nil, + expectedError: true, + }, + { + name: "Error getting imported bridge exits", + bridges: []bridgesync.Bridge{ + { + LeafType: agglayer.LeafTypeAsset.Uint8(), + OriginNetwork: 1, + OriginAddress: common.HexToAddress("0x123"), + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x456"), + Amount: big.NewInt(100), + Metadata: []byte("metadata"), + DepositCount: 1, + }, + }, + claims: []bridgesync.Claim{ + { + IsMessage: false, + OriginNetwork: 1, + OriginAddress: common.HexToAddress("0x1234"), + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x4567"), + Amount: big.NewInt(111), + Metadata: []byte("metadata1"), + GlobalIndex: new(big.Int).SetBytes([]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}), + GlobalExitRoot: common.HexToHash("0x7891"), + RollupExitRoot: common.HexToHash("0xaaab"), + MainnetExitRoot: common.HexToHash("0xbbba"), + ProofLocalExitRoot: mockProof, + }, + }, + lastSentCertificateInfo: aggsendertypes.CertificateInfo{ + NewLocalExitRoot: common.HexToHash("0x123"), + Height: 1, + }, + mockFn: func() { + mockL1InfoTreeSyncer.On("GetInfoByGlobalExitRoot", mock.Anything).Return(&l1infotreesync.L1InfoTreeLeaf{ + L1InfoTreeIndex: 1, + Timestamp: 123456789, + PreviousBlockHash: common.HexToHash("0xabc"), + GlobalExitRoot: common.HexToHash("0x7891"), + }, nil) + mockL1InfoTreeSyncer.On("GetL1InfoTreeRootByIndex", mock.Anything, mock.Anything).Return( + treeTypes.Root{Hash: common.HexToHash("0x7891")}, nil) + }, + expectedCert: nil, + expectedError: true, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + mockL1InfoTreeSyncer.ExpectedCalls = nil + mockL2BridgeSyncer.ExpectedCalls = nil + + if tt.mockFn != nil { + tt.mockFn() + } + + aggSender := &AggSender{ + l2Syncer: mockL2BridgeSyncer, + l1infoTreeSyncer: mockL1InfoTreeSyncer, + log: log.WithFields("test", "unittest"), + } + cert, err := aggSender.buildCertificate(context.Background(), tt.bridges, tt.claims, tt.lastSentCertificateInfo) + + if tt.expectedError { + require.Error(t, err) + require.Nil(t, cert) + } else { + require.NoError(t, err) + require.Equal(t, tt.expectedCert, cert) + } + }) + } +} + +func generateTestProof(t *testing.T) treeTypes.Proof { + t.Helper() + + proof := treeTypes.Proof{} + + for i := 0; i < int(treeTypes.DefaultHeight) && i < 10; i++ { + proof[i] = common.HexToHash(fmt.Sprintf("0x%d", i)) + } + + return proof +} + +func TestCheckIfCertificatesAreSettled(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + pendingCertificates []*aggsendertypes.CertificateInfo + certificateHeaders map[common.Hash]*agglayer.CertificateHeader + getFromDBError error + clientError error + updateDBError error + expectedErrorLogMessages []string + expectedInfoMessages []string + }{ + { + name: "All certificates settled - update successful", + pendingCertificates: []*aggsendertypes.CertificateInfo{ + {CertificateID: common.HexToHash("0x1"), Height: 1}, + {CertificateID: common.HexToHash("0x2"), Height: 2}, + }, + certificateHeaders: map[common.Hash]*agglayer.CertificateHeader{ + common.HexToHash("0x1"): {Status: agglayer.Settled}, + common.HexToHash("0x2"): {Status: agglayer.Settled}, + }, + expectedInfoMessages: []string{ + "certificate %s changed status to %s", + }, + }, + { + name: "Some certificates in error - update successful", + pendingCertificates: []*aggsendertypes.CertificateInfo{ + {CertificateID: common.HexToHash("0x1"), Height: 1}, + {CertificateID: common.HexToHash("0x2"), Height: 2}, + }, + certificateHeaders: map[common.Hash]*agglayer.CertificateHeader{ + common.HexToHash("0x1"): {Status: agglayer.InError}, + common.HexToHash("0x2"): {Status: agglayer.Settled}, + }, + expectedInfoMessages: []string{ + "certificate %s changed status to %s", + }, + }, + { + name: "Error getting pending certificates", + getFromDBError: fmt.Errorf("storage error"), + expectedErrorLogMessages: []string{ + "error getting pending certificates: %w", + }, + }, + { + name: "Error getting certificate header", + pendingCertificates: []*aggsendertypes.CertificateInfo{ + {CertificateID: common.HexToHash("0x1"), Height: 1}, + }, + certificateHeaders: map[common.Hash]*agglayer.CertificateHeader{ + common.HexToHash("0x1"): {Status: agglayer.InError}, + }, + clientError: fmt.Errorf("client error"), + expectedErrorLogMessages: []string{ + "error getting header of certificate %s with height: %d from agglayer: %w", + }, + }, + { + name: "Error updating certificate status", + pendingCertificates: []*aggsendertypes.CertificateInfo{ + {CertificateID: common.HexToHash("0x1"), Height: 1}, + }, + certificateHeaders: map[common.Hash]*agglayer.CertificateHeader{ + common.HexToHash("0x1"): {Status: agglayer.Settled}, + }, + updateDBError: fmt.Errorf("update error"), + expectedErrorLogMessages: []string{ + "error updating certificate status in storage: %w", + }, + expectedInfoMessages: []string{ + "certificate %s changed status to %s", + }, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + mockStorage := mocks.NewAggSenderStorageMock(t) + mockAggLayerClient := agglayer.NewAgglayerClientMock(t) + mockLogger := mocks.NewLoggerMock(t) + + mockStorage.On("GetCertificatesByStatus", mock.Anything, []agglayer.CertificateStatus{agglayer.Pending}).Return(tt.pendingCertificates, tt.getFromDBError) + for certID, header := range tt.certificateHeaders { + mockAggLayerClient.On("GetCertificateHeader", certID).Return(header, tt.clientError) + } + if tt.updateDBError != nil { + mockStorage.On("UpdateCertificateStatus", mock.Anything, mock.Anything).Return(tt.updateDBError) + } else if tt.clientError == nil && tt.getFromDBError == nil { + mockStorage.On("UpdateCertificateStatus", mock.Anything, mock.Anything).Return(nil) + } + + if tt.clientError != nil { + for _, msg := range tt.expectedErrorLogMessages { + mockLogger.On("Errorf", msg, mock.Anything, mock.Anything, mock.Anything).Return() + } + } else { + for _, msg := range tt.expectedErrorLogMessages { + mockLogger.On("Errorf", msg, mock.Anything).Return() + } + + for _, msg := range tt.expectedInfoMessages { + mockLogger.On("Infof", msg, mock.Anything, mock.Anything).Return() + } + } + + aggSender := &AggSender{ + log: mockLogger, + storage: mockStorage, + aggLayerClient: mockAggLayerClient, + cfg: Config{ + BlockGetInterval: types.Duration{Duration: time.Second}, + CheckSettledInterval: types.Duration{Duration: time.Second}, + }, + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + go aggSender.checkIfCertificatesAreSettled(ctx) + + time.Sleep(2 * time.Second) + cancel() + + mockLogger.AssertExpectations(t) + mockAggLayerClient.AssertExpectations(t) + mockStorage.AssertExpectations(t) + }) + } +} + +func TestSendCertificate(t *testing.T) { + t.Parallel() + + privateKey, err := crypto.GenerateKey() + require.NoError(t, err) + + type testCfg struct { + name string + sequencerKey *ecdsa.PrivateKey + shouldSendCertificate []interface{} + getLastSentCertificate []interface{} + lastL2BlockProcessed []interface{} + getBridges []interface{} + getClaims []interface{} + getInfoByGlobalExitRoot []interface{} + getL1InfoTreeRootByIndex []interface{} + getL1InfoTreeMerkleProofFromIndexToRoot []interface{} + getExitRootByIndex []interface{} + originNetwork []interface{} + sendCertificate []interface{} + saveLastSentCertificate []interface{} + expectedError string + } + + setupTest := func(cfg testCfg) (*AggSender, *mocks.AggSenderStorageMock, *mocks.L2BridgeSyncerMock, + *agglayer.AgglayerClientMock, *mocks.L1InfoTreeSyncerMock) { + var ( + aggsender = &AggSender{ + log: log.WithFields("aggsender", 1), + cfg: Config{}, + sequencerKey: cfg.sequencerKey, + } + mockStorage *mocks.AggSenderStorageMock + mockL2Syncer *mocks.L2BridgeSyncerMock + mockAggLayerClient *agglayer.AgglayerClientMock + mockL1InfoTreeSyncer *mocks.L1InfoTreeSyncerMock + ) + + if cfg.shouldSendCertificate != nil || cfg.getLastSentCertificate != nil || + cfg.saveLastSentCertificate != nil { + mockStorage = mocks.NewAggSenderStorageMock(t) + mockStorage.On("GetCertificatesByStatus", mock.Anything, []agglayer.CertificateStatus{agglayer.Pending}). + Return(cfg.shouldSendCertificate...).Once() + + aggsender.storage = mockStorage + + if cfg.getLastSentCertificate != nil { + mockStorage.On("GetLastSentCertificate", mock.Anything).Return(cfg.getLastSentCertificate...).Once() + } + + if cfg.saveLastSentCertificate != nil { + mockStorage.On("SaveLastSentCertificate", mock.Anything, mock.Anything).Return(cfg.saveLastSentCertificate...).Once() + } + } + + if cfg.lastL2BlockProcessed != nil || cfg.originNetwork != nil || + cfg.getBridges != nil || cfg.getClaims != nil || cfg.getInfoByGlobalExitRoot != nil { + mockL2Syncer = mocks.NewL2BridgeSyncerMock(t) + + mockL2Syncer.On("GetLastProcessedBlock", mock.Anything).Return(cfg.lastL2BlockProcessed...).Once() + + if cfg.getBridges != nil { + mockL2Syncer.On("GetBridgesPublished", mock.Anything, mock.Anything, mock.Anything).Return(cfg.getBridges...).Once() + } + + if cfg.getClaims != nil { + mockL2Syncer.On("GetClaims", mock.Anything, mock.Anything, mock.Anything).Return(cfg.getClaims...).Once() + } + + if cfg.getExitRootByIndex != nil { + mockL2Syncer.On("GetExitRootByIndex", mock.Anything, mock.Anything).Return(cfg.getExitRootByIndex...).Once() + } + + if cfg.originNetwork != nil { + mockL2Syncer.On("OriginNetwork").Return(cfg.originNetwork...).Once() + } + + aggsender.l2Syncer = mockL2Syncer + } + + if cfg.sendCertificate != nil { + mockAggLayerClient = agglayer.NewAgglayerClientMock(t) + mockAggLayerClient.On("SendCertificate", mock.Anything).Return(cfg.sendCertificate...).Once() + + aggsender.aggLayerClient = mockAggLayerClient + } + + if cfg.getInfoByGlobalExitRoot != nil || + cfg.getL1InfoTreeRootByIndex != nil || cfg.getL1InfoTreeMerkleProofFromIndexToRoot != nil { + mockL1InfoTreeSyncer = mocks.NewL1InfoTreeSyncerMock(t) + mockL1InfoTreeSyncer.On("GetInfoByGlobalExitRoot", mock.Anything).Return(cfg.getInfoByGlobalExitRoot...).Once() + + if cfg.getL1InfoTreeRootByIndex != nil { + mockL1InfoTreeSyncer.On("GetL1InfoTreeRootByIndex", mock.Anything, mock.Anything).Return(cfg.getL1InfoTreeRootByIndex...).Once() + } + + if cfg.getL1InfoTreeMerkleProofFromIndexToRoot != nil { + mockL1InfoTreeSyncer.On("GetL1InfoTreeMerkleProofFromIndexToRoot", mock.Anything, mock.Anything, mock.Anything). + Return(cfg.getL1InfoTreeMerkleProofFromIndexToRoot...).Once() + } + + aggsender.l1infoTreeSyncer = mockL1InfoTreeSyncer + } + + return aggsender, mockStorage, mockL2Syncer, mockAggLayerClient, mockL1InfoTreeSyncer + } + + tests := []testCfg{ + { + name: "error getting pending certificates", + shouldSendCertificate: []interface{}{nil, errors.New("error getting pending")}, + expectedError: "error getting pending", + }, + { + name: "should not send certificate", + shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{ + {Status: agglayer.Pending}, + }, nil}, + }, + { + name: "error getting last sent certificate", + shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, + lastL2BlockProcessed: []interface{}{uint64(8), nil}, + getLastSentCertificate: []interface{}{aggsendertypes.CertificateInfo{}, errors.New("error getting last sent certificate")}, + expectedError: "error getting last sent certificate", + }, + { + name: "no new blocks to send certificate", + shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, + lastL2BlockProcessed: []interface{}{uint64(41), nil}, + getLastSentCertificate: []interface{}{aggsendertypes.CertificateInfo{ + Height: 41, + CertificateID: common.HexToHash("0x111"), + NewLocalExitRoot: common.HexToHash("0x13223"), + FromBlock: 31, + ToBlock: 41, + }, nil}, + }, + { + name: "get bridges error", + shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, + lastL2BlockProcessed: []interface{}{uint64(59), nil}, + getLastSentCertificate: []interface{}{aggsendertypes.CertificateInfo{ + Height: 50, + CertificateID: common.HexToHash("0x1111"), + NewLocalExitRoot: common.HexToHash("0x132233"), + FromBlock: 40, + ToBlock: 41, + }, nil}, + getBridges: []interface{}{nil, errors.New("error getting bridges")}, + expectedError: "error getting bridges", + }, + { + name: "no bridges", + shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, + lastL2BlockProcessed: []interface{}{uint64(69), nil}, + getLastSentCertificate: []interface{}{aggsendertypes.CertificateInfo{ + Height: 60, + CertificateID: common.HexToHash("0x11111"), + NewLocalExitRoot: common.HexToHash("0x1322233"), + FromBlock: 50, + ToBlock: 51, + }, nil}, + getBridges: []interface{}{[]bridgesync.Bridge{}, nil}, + }, + { + name: "get claims error", + shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, + lastL2BlockProcessed: []interface{}{uint64(79), nil}, + getLastSentCertificate: []interface{}{aggsendertypes.CertificateInfo{ + Height: 70, + CertificateID: common.HexToHash("0x121111"), + NewLocalExitRoot: common.HexToHash("0x13122233"), + FromBlock: 60, + ToBlock: 61, + }, nil}, + getBridges: []interface{}{[]bridgesync.Bridge{ + { + BlockNum: 61, + BlockPos: 0, + LeafType: agglayer.LeafTypeAsset.Uint8(), + OriginNetwork: 1, + }, + }, nil}, + getClaims: []interface{}{nil, errors.New("error getting claims")}, + expectedError: "error getting claims", + }, + { + name: "error getting info by global exit root", + shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, + lastL2BlockProcessed: []interface{}{uint64(89), nil}, + getLastSentCertificate: []interface{}{aggsendertypes.CertificateInfo{ + Height: 80, + CertificateID: common.HexToHash("0x1321111"), + NewLocalExitRoot: common.HexToHash("0x131122233"), + FromBlock: 70, + ToBlock: 71, + }, nil}, + getBridges: []interface{}{[]bridgesync.Bridge{ + { + BlockNum: 71, + BlockPos: 0, + LeafType: agglayer.LeafTypeAsset.Uint8(), + OriginNetwork: 1, + }, + }, nil}, + getClaims: []interface{}{[]bridgesync.Claim{ + { + IsMessage: false, + }, + }, nil}, + getInfoByGlobalExitRoot: []interface{}{nil, errors.New("error getting info by global exit root")}, + expectedError: "error getting info by global exit root", + }, + { + name: "error getting L1 Info tree root by index", + shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, + lastL2BlockProcessed: []interface{}{uint64(89), nil}, + getLastSentCertificate: []interface{}{aggsendertypes.CertificateInfo{ + Height: 80, + CertificateID: common.HexToHash("0x1321111"), + NewLocalExitRoot: common.HexToHash("0x131122233"), + FromBlock: 70, + ToBlock: 71, + }, nil}, + getBridges: []interface{}{[]bridgesync.Bridge{ + { + BlockNum: 71, + BlockPos: 0, + LeafType: agglayer.LeafTypeAsset.Uint8(), + OriginNetwork: 1, + }, + }, nil}, + getClaims: []interface{}{[]bridgesync.Claim{ + { + IsMessage: false, + }, + }, nil}, + getInfoByGlobalExitRoot: []interface{}{&l1infotreesync.L1InfoTreeLeaf{ + L1InfoTreeIndex: 1, + BlockNumber: 1, + BlockPosition: 0, + PreviousBlockHash: common.HexToHash("0x123"), + Timestamp: 123456789, + MainnetExitRoot: common.HexToHash("0xccc"), + RollupExitRoot: common.HexToHash("0xddd"), + GlobalExitRoot: common.HexToHash("0xeee"), + }, nil}, + getL1InfoTreeRootByIndex: []interface{}{treeTypes.Root{}, errors.New("error getting L1 Info tree root by index")}, + expectedError: "error getting L1 Info tree root by index", + }, + { + name: "error getting L1 Info tree merkle proof from index to root", + shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, + lastL2BlockProcessed: []interface{}{uint64(89), nil}, + getLastSentCertificate: []interface{}{aggsendertypes.CertificateInfo{ + Height: 80, + CertificateID: common.HexToHash("0x1321111"), + NewLocalExitRoot: common.HexToHash("0x131122233"), + FromBlock: 70, + ToBlock: 71, + }, nil}, + getBridges: []interface{}{[]bridgesync.Bridge{ + { + BlockNum: 71, + BlockPos: 0, + LeafType: agglayer.LeafTypeAsset.Uint8(), + OriginNetwork: 1, + }, + }, nil}, + getClaims: []interface{}{[]bridgesync.Claim{ + { + IsMessage: false, + GlobalIndex: big.NewInt(1), + }, + }, nil}, + getInfoByGlobalExitRoot: []interface{}{&l1infotreesync.L1InfoTreeLeaf{ + L1InfoTreeIndex: 1, + BlockNumber: 1, + BlockPosition: 0, + PreviousBlockHash: common.HexToHash("0x123"), + Timestamp: 123456789, + MainnetExitRoot: common.HexToHash("0xccc"), + RollupExitRoot: common.HexToHash("0xddd"), + GlobalExitRoot: common.HexToHash("0xeee"), + }, nil}, + getL1InfoTreeRootByIndex: []interface{}{treeTypes.Root{Hash: common.HexToHash("0xeee")}, nil}, + getL1InfoTreeMerkleProofFromIndexToRoot: []interface{}{treeTypes.Proof{}, errors.New("error getting L1 Info tree merkle proof")}, + expectedError: "error getting L1 Info tree merkle proof for leaf index", + }, + { + name: "send certificate error", + shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, + lastL2BlockProcessed: []interface{}{uint64(99), nil}, + getLastSentCertificate: []interface{}{aggsendertypes.CertificateInfo{ + Height: 90, + CertificateID: common.HexToHash("0x1121111"), + NewLocalExitRoot: common.HexToHash("0x111122211"), + FromBlock: 80, + ToBlock: 81, + }, nil}, + getBridges: []interface{}{[]bridgesync.Bridge{ + { + BlockNum: 81, + BlockPos: 0, + LeafType: agglayer.LeafTypeAsset.Uint8(), + OriginNetwork: 1, + DepositCount: 1, + }, + }, nil}, + getClaims: []interface{}{[]bridgesync.Claim{}, nil}, + getExitRootByIndex: []interface{}{treeTypes.Root{}, nil}, + originNetwork: []interface{}{uint32(1), nil}, + sendCertificate: []interface{}{common.Hash{}, errors.New("error sending certificate")}, + sequencerKey: privateKey, + expectedError: "error sending certificate", + }, + { + name: "store last sent certificate error", + shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, + lastL2BlockProcessed: []interface{}{uint64(109), nil}, + getLastSentCertificate: []interface{}{aggsendertypes.CertificateInfo{ + Height: 100, + CertificateID: common.HexToHash("0x11121111"), + NewLocalExitRoot: common.HexToHash("0x1211122211"), + FromBlock: 90, + ToBlock: 91, + }, nil}, + getBridges: []interface{}{[]bridgesync.Bridge{ + { + BlockNum: 91, + BlockPos: 0, + LeafType: agglayer.LeafTypeAsset.Uint8(), + OriginNetwork: 1, + DepositCount: 1, + }, + }, nil}, + getClaims: []interface{}{[]bridgesync.Claim{}, nil}, + getExitRootByIndex: []interface{}{treeTypes.Root{}, nil}, + originNetwork: []interface{}{uint32(1), nil}, + sendCertificate: []interface{}{common.Hash{}, nil}, + saveLastSentCertificate: []interface{}{errors.New("error saving last sent certificate in db")}, + sequencerKey: privateKey, + expectedError: "error saving last sent certificate in db", + }, + { + name: "successful sending of certificate", + shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, + lastL2BlockProcessed: []interface{}{uint64(119), nil}, + getLastSentCertificate: []interface{}{aggsendertypes.CertificateInfo{ + Height: 110, + CertificateID: common.HexToHash("0x12121111"), + NewLocalExitRoot: common.HexToHash("0x1221122211"), + FromBlock: 100, + ToBlock: 101, + }, nil}, + getBridges: []interface{}{[]bridgesync.Bridge{ + { + BlockNum: 101, + BlockPos: 0, + LeafType: agglayer.LeafTypeAsset.Uint8(), + OriginNetwork: 1, + DepositCount: 1, + }, + }, nil}, + getClaims: []interface{}{[]bridgesync.Claim{}, nil}, + getExitRootByIndex: []interface{}{treeTypes.Root{}, nil}, + originNetwork: []interface{}{uint32(1), nil}, + sendCertificate: []interface{}{common.Hash{}, nil}, + saveLastSentCertificate: []interface{}{nil}, + sequencerKey: privateKey, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + aggsender, mockStorage, mockL2Syncer, + mockAggLayerClient, mockL1InfoTreeSyncer := setupTest(tt) + + err := aggsender.sendCertificate(context.Background()) + + if tt.expectedError != "" { + require.ErrorContains(t, err, tt.expectedError) + } else { + require.NoError(t, err) + } + + if mockStorage != nil { + mockStorage.AssertExpectations(t) + } + + if mockL2Syncer != nil { + mockL2Syncer.AssertExpectations(t) + } + + if mockAggLayerClient != nil { + mockAggLayerClient.AssertExpectations(t) + } + + if mockL1InfoTreeSyncer != nil { + mockL1InfoTreeSyncer.AssertExpectations(t) + } + }) + } +} + +func TestExtractSignatureData(t *testing.T) { + t.Parallel() + + testR := common.HexToHash("0x1") + testV := common.HexToHash("0x2") + + tests := []struct { + name string + signature []byte + expectedR common.Hash + expectedS common.Hash + expectedOddParity bool + expectedError error + }{ + { + name: "Valid signature - odd parity", + signature: append(append(testR.Bytes(), testV.Bytes()...), 1), + expectedR: testR, + expectedS: testV, + expectedOddParity: true, + expectedError: nil, + }, + { + name: "Valid signature - even parity", + signature: append(append(testR.Bytes(), testV.Bytes()...), 2), + expectedR: testR, + expectedS: testV, + expectedOddParity: false, + expectedError: nil, + }, + { + name: "Invalid signature size", + signature: make([]byte, 64), // Invalid size + expectedError: errInvalidSignatureSize, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + r, s, isOddParity, err := extractSignatureData(tt.signature) + + if tt.expectedError != nil { + require.Error(t, err) + require.Equal(t, tt.expectedError, err) + } else { + require.NoError(t, err) + require.Equal(t, tt.expectedR, r) + require.Equal(t, tt.expectedS, s) + require.Equal(t, tt.expectedOddParity, isOddParity) + } + }) + } +} + +func TestExploratoryGenerateCert(t *testing.T) { + t.Skip("This test is only for exploratory purposes, to generate json format of the certificate") + + key, err := crypto.GenerateKey() + require.NoError(t, err) + + signature, err := crypto.Sign(common.HexToHash("0x1").Bytes(), key) + require.NoError(t, err) + + r, s, v, err := extractSignatureData(signature) + require.NoError(t, err) + + certificate := &agglayer.SignedCertificate{ + Certificate: &agglayer.Certificate{ + NetworkID: 1, + Height: 1, + PrevLocalExitRoot: common.HexToHash("0x1"), + NewLocalExitRoot: common.HexToHash("0x2"), + BridgeExits: []*agglayer.BridgeExit{ + { + LeafType: agglayer.LeafTypeAsset, + TokenInfo: &agglayer.TokenInfo{ + OriginNetwork: 1, + OriginTokenAddress: common.HexToAddress("0x11"), + }, + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x22"), + Amount: big.NewInt(100), + Metadata: []byte("metadata"), + }, + }, + ImportedBridgeExits: []*agglayer.ImportedBridgeExit{ + { + GlobalIndex: &agglayer.GlobalIndex{ + MainnetFlag: false, + RollupIndex: 1, + LeafIndex: 11, + }, + BridgeExit: &agglayer.BridgeExit{ + LeafType: agglayer.LeafTypeAsset, + TokenInfo: &agglayer.TokenInfo{ + OriginNetwork: 1, + OriginTokenAddress: common.HexToAddress("0x11"), + }, + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x22"), + Amount: big.NewInt(100), + Metadata: []byte("metadata"), + }, + ClaimData: &agglayer.ClaimFromMainnnet{ + ProofLeafMER: &agglayer.MerkleProof{ + Root: common.HexToHash("0x1"), + Proof: [32]common.Hash{}, + }, + ProofGERToL1Root: &agglayer.MerkleProof{ + Root: common.HexToHash("0x3"), + Proof: [32]common.Hash{}, + }, + L1Leaf: &agglayer.L1InfoTreeLeaf{ + L1InfoTreeIndex: 1, + RollupExitRoot: common.HexToHash("0x4"), + MainnetExitRoot: common.HexToHash("0x5"), + Inner: &agglayer.L1InfoTreeLeafInner{ + GlobalExitRoot: common.HexToHash("0x6"), + BlockHash: common.HexToHash("0x7"), + Timestamp: 1231, + }, + }, + }, + }, + }, + }, + Signature: &agglayer.Signature{ + R: r, + S: s, + OddParity: v, + }, + } + + file, err := os.Create("test.json") + require.NoError(t, err) + + defer file.Close() + + encoder := json.NewEncoder(file) + encoder.SetIndent("", " ") + require.NoError(t, encoder.Encode(certificate)) +} diff --git a/aggsender/config.go b/aggsender/config.go new file mode 100644 index 00000000..506b4e9a --- /dev/null +++ b/aggsender/config.go @@ -0,0 +1,23 @@ +package aggsender + +import ( + "github.com/0xPolygon/cdk/config/types" +) + +// Config is the configuration for the AggSender +type Config struct { + // StoragePath is the path of the sqlite db on which the AggSender will store the data + StoragePath string `mapstructure:"StoragePath"` + // AggLayerURL is the URL of the AggLayer + AggLayerURL string `mapstructure:"AggLayerURL"` + // BlockGetInterval is the interval at which the AggSender will get the blocks from L1 + BlockGetInterval types.Duration `mapstructure:"BlockGetInterval"` + // CheckSettledInterval is the interval at which the AggSender will check if the blocks are settled + CheckSettledInterval types.Duration `mapstructure:"CheckSettledInterval"` + // AggsenderPrivateKey is the private key which is used to sign certificates + AggsenderPrivateKey types.KeystoreFileConfig `mapstructure:"AggsenderPrivateKey"` + // URLRPCL2 is the URL of the L2 RPC node + URLRPCL2 string `mapstructure:"URLRPCL2"` + // SaveCertificatesToFiles is a flag which tells the AggSender to save the certificates to a file + SaveCertificatesToFiles bool `mapstructure:"SaveCertificatesToFiles"` +} diff --git a/aggsender/db/aggsender_db_storage.go b/aggsender/db/aggsender_db_storage.go new file mode 100644 index 00000000..25b31392 --- /dev/null +++ b/aggsender/db/aggsender_db_storage.go @@ -0,0 +1,215 @@ +package db + +import ( + "context" + "database/sql" + "errors" + "fmt" + "strings" + + "github.com/0xPolygon/cdk/agglayer" + "github.com/0xPolygon/cdk/aggsender/db/migrations" + "github.com/0xPolygon/cdk/aggsender/types" + "github.com/0xPolygon/cdk/db" + "github.com/0xPolygon/cdk/log" + "github.com/ethereum/go-ethereum/common" + "github.com/russross/meddler" +) + +const errWhileRollbackFormat = "error while rolling back tx: %w" + +// AggSenderStorage is the interface that defines the methods to interact with the storage +type AggSenderStorage interface { + // GetCertificateByHeight returns a certificate by its height + GetCertificateByHeight(ctx context.Context, height uint64) (types.CertificateInfo, error) + // GetLastSentCertificate returns the last certificate sent to the aggLayer + GetLastSentCertificate(ctx context.Context) (types.CertificateInfo, error) + // SaveLastSentCertificate saves the last certificate sent to the aggLayer + SaveLastSentCertificate(ctx context.Context, certificate types.CertificateInfo) error + // DeleteCertificate deletes a certificate from the storage + DeleteCertificate(ctx context.Context, certificateID common.Hash) error + // GetCertificatesByStatus returns a list of certificates by their status + GetCertificatesByStatus(ctx context.Context, status []agglayer.CertificateStatus) ([]*types.CertificateInfo, error) + // UpdateCertificateStatus updates the status of a certificate + UpdateCertificateStatus(ctx context.Context, certificate types.CertificateInfo) error +} + +var _ AggSenderStorage = (*AggSenderSQLStorage)(nil) + +// AggSenderSQLStorage is the struct that implements the AggSenderStorage interface +type AggSenderSQLStorage struct { + logger *log.Logger + db *sql.DB +} + +// NewAggSenderSQLStorage creates a new AggSenderSQLStorage +func NewAggSenderSQLStorage(logger *log.Logger, dbPath string) (*AggSenderSQLStorage, error) { + if err := migrations.RunMigrations(dbPath); err != nil { + return nil, err + } + + db, err := db.NewSQLiteDB(dbPath) + if err != nil { + return nil, err + } + + return &AggSenderSQLStorage{ + db: db, + logger: logger, + }, nil +} + +func (a *AggSenderSQLStorage) GetCertificatesByStatus(ctx context.Context, + statuses []agglayer.CertificateStatus) ([]*types.CertificateInfo, error) { + query := "SELECT * FROM certificate_info" + args := make([]interface{}, len(statuses)) + + if len(statuses) > 0 { + placeholders := make([]string, len(statuses)) + // Build the WHERE clause for status filtering + for i := range statuses { + placeholders[i] = fmt.Sprintf("$%d", i+1) + args[i] = statuses[i] + } + + // Build the WHERE clause with the joined placeholders + query += " WHERE status IN (" + strings.Join(placeholders, ", ") + ")" + } + + // Add ordering by creation date (oldest first) + query += " ORDER BY height ASC" + + var certificates []*types.CertificateInfo + if err := meddler.QueryAll(a.db, &certificates, query, args...); err != nil { + return nil, err + } + + return certificates, nil +} + +// GetCertificateByHeight returns a certificate by its height +func (a *AggSenderSQLStorage) GetCertificateByHeight(ctx context.Context, + height uint64) (types.CertificateInfo, error) { + var certificateInfo types.CertificateInfo + if err := meddler.QueryRow(a.db, &certificateInfo, + "SELECT * FROM certificate_info WHERE height = $1;", height); err != nil { + return types.CertificateInfo{}, getSelectQueryError(height, err) + } + + return certificateInfo, nil +} + +// GetLastSentCertificate returns the last certificate sent to the aggLayer +func (a *AggSenderSQLStorage) GetLastSentCertificate(ctx context.Context) (types.CertificateInfo, error) { + var certificateInfo types.CertificateInfo + if err := meddler.QueryRow(a.db, &certificateInfo, + "SELECT * FROM certificate_info ORDER BY height DESC LIMIT 1;"); err != nil { + return types.CertificateInfo{}, getSelectQueryError(0, err) + } + + return certificateInfo, nil +} + +// SaveLastSentCertificate saves the last certificate sent to the aggLayer +func (a *AggSenderSQLStorage) SaveLastSentCertificate(ctx context.Context, certificate types.CertificateInfo) error { + tx, err := db.NewTx(ctx, a.db) + if err != nil { + return err + } + defer func() { + if err != nil { + if errRllbck := tx.Rollback(); errRllbck != nil { + a.logger.Errorf(errWhileRollbackFormat, errRllbck) + } + } + }() + + if err := meddler.Insert(tx, "certificate_info", &certificate); err != nil { + return fmt.Errorf("error inserting certificate info: %w", err) + } + if err := tx.Commit(); err != nil { + return err + } + + a.logger.Debugf("inserted certificate - Height: %d. Hash: %s", certificate.Height, certificate.CertificateID) + + return nil +} + +// DeleteCertificate deletes a certificate from the storage +func (a *AggSenderSQLStorage) DeleteCertificate(ctx context.Context, certificateID common.Hash) error { + tx, err := db.NewTx(ctx, a.db) + if err != nil { + return err + } + defer func() { + if err != nil { + if errRllbck := tx.Rollback(); errRllbck != nil { + a.logger.Errorf(errWhileRollbackFormat, errRllbck) + } + } + }() + + if _, err := tx.Exec(`DELETE FROM certificate_info WHERE certificate_id = $1;`, certificateID); err != nil { + return fmt.Errorf("error deleting certificate info: %w", err) + } + if err := tx.Commit(); err != nil { + return err + } + + a.logger.Debugf("deleted certificate - CertificateID: %s", certificateID) + + return nil +} + +// UpdateCertificateStatus updates the status of a certificate +func (a *AggSenderSQLStorage) UpdateCertificateStatus(ctx context.Context, certificate types.CertificateInfo) error { + tx, err := db.NewTx(ctx, a.db) + if err != nil { + return err + } + defer func() { + if err != nil { + if errRllbck := tx.Rollback(); errRllbck != nil { + a.logger.Errorf(errWhileRollbackFormat, errRllbck) + } + } + }() + + if _, err := tx.Exec(`UPDATE certificate_info SET status = $1 WHERE certificate_id = $2;`, + certificate.Status, certificate.CertificateID); err != nil { + return fmt.Errorf("error updating certificate info: %w", err) + } + if err := tx.Commit(); err != nil { + return err + } + + a.logger.Debugf("updated certificate status - CertificateID: %s", certificate.CertificateID) + + return nil +} + +// clean deletes all the data from the storage +// NOTE: Used only in tests +func (a *AggSenderSQLStorage) clean() error { + if _, err := a.db.Exec(`DELETE FROM certificate_info;`); err != nil { + return err + } + + return nil +} + +func getSelectQueryError(height uint64, err error) error { + errToReturn := err + if errors.Is(err, sql.ErrNoRows) { + if height == 0 { + // height 0 is never sent to the aggLayer + // so we don't return an error in this case + errToReturn = nil + } else { + errToReturn = db.ErrNotFound + } + } + + return errToReturn +} diff --git a/aggsender/db/aggsender_db_storage_test.go b/aggsender/db/aggsender_db_storage_test.go new file mode 100644 index 00000000..cfb7af7c --- /dev/null +++ b/aggsender/db/aggsender_db_storage_test.go @@ -0,0 +1,204 @@ +package db + +import ( + "context" + "path" + "testing" + + "github.com/0xPolygon/cdk/agglayer" + "github.com/0xPolygon/cdk/aggsender/db/migrations" + "github.com/0xPolygon/cdk/aggsender/types" + "github.com/0xPolygon/cdk/db" + "github.com/0xPolygon/cdk/log" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func Test_Storage(t *testing.T) { + ctx := context.Background() + + path := path.Join(t.TempDir(), "file::memory:?cache=shared") + log.Debugf("sqlite path: %s", path) + require.NoError(t, migrations.RunMigrations(path)) + + storage, err := NewAggSenderSQLStorage(log.WithFields("aggsender-db"), path) + require.NoError(t, err) + + t.Run("SaveLastSentCertificate", func(t *testing.T) { + certificate := types.CertificateInfo{ + Height: 1, + CertificateID: common.HexToHash("0x1"), + NewLocalExitRoot: common.HexToHash("0x2"), + FromBlock: 1, + ToBlock: 2, + Status: agglayer.Settled, + } + require.NoError(t, storage.SaveLastSentCertificate(ctx, certificate)) + + certificateFromDB, err := storage.GetCertificateByHeight(ctx, certificate.Height) + require.NoError(t, err) + + require.Equal(t, certificate, certificateFromDB) + require.NoError(t, storage.clean()) + }) + + t.Run("DeleteCertificate", func(t *testing.T) { + certificate := types.CertificateInfo{ + Height: 2, + CertificateID: common.HexToHash("0x3"), + NewLocalExitRoot: common.HexToHash("0x4"), + FromBlock: 3, + ToBlock: 4, + Status: agglayer.Settled, + } + require.NoError(t, storage.SaveLastSentCertificate(ctx, certificate)) + + require.NoError(t, storage.DeleteCertificate(ctx, certificate.CertificateID)) + + certificateFromDB, err := storage.GetCertificateByHeight(ctx, certificate.Height) + require.ErrorIs(t, err, db.ErrNotFound) + require.Equal(t, types.CertificateInfo{}, certificateFromDB) + require.NoError(t, storage.clean()) + }) + + t.Run("GetLastSentCertificate", func(t *testing.T) { + // try getting a certificate that doesn't exist + certificateFromDB, err := storage.GetLastSentCertificate(ctx) + require.NoError(t, err) + require.Equal(t, types.CertificateInfo{}, certificateFromDB) + + // try getting a certificate that exists + certificate := types.CertificateInfo{ + Height: 3, + CertificateID: common.HexToHash("0x5"), + NewLocalExitRoot: common.HexToHash("0x6"), + FromBlock: 5, + ToBlock: 6, + Status: agglayer.Pending, + } + require.NoError(t, storage.SaveLastSentCertificate(ctx, certificate)) + + certificateFromDB, err = storage.GetLastSentCertificate(ctx) + require.NoError(t, err) + + require.Equal(t, certificate, certificateFromDB) + require.NoError(t, storage.clean()) + }) + + t.Run("GetCertificateByHeight", func(t *testing.T) { + // try getting height 0 + certificateFromDB, err := storage.GetCertificateByHeight(ctx, 0) + require.NoError(t, err) + require.Equal(t, types.CertificateInfo{}, certificateFromDB) + + // try getting a certificate that doesn't exist + certificateFromDB, err = storage.GetCertificateByHeight(ctx, 4) + require.ErrorIs(t, err, db.ErrNotFound) + require.Equal(t, types.CertificateInfo{}, certificateFromDB) + + // try getting a certificate that exists + certificate := types.CertificateInfo{ + Height: 11, + CertificateID: common.HexToHash("0x17"), + NewLocalExitRoot: common.HexToHash("0x18"), + FromBlock: 17, + ToBlock: 18, + Status: agglayer.Pending, + } + require.NoError(t, storage.SaveLastSentCertificate(ctx, certificate)) + + certificateFromDB, err = storage.GetCertificateByHeight(ctx, certificate.Height) + require.NoError(t, err) + + require.Equal(t, certificate, certificateFromDB) + require.NoError(t, storage.clean()) + }) + + t.Run("GetCertificatesByStatus", func(t *testing.T) { + // Insert some certificates with different statuses + certificates := []*types.CertificateInfo{ + { + Height: 7, + CertificateID: common.HexToHash("0x7"), + NewLocalExitRoot: common.HexToHash("0x8"), + FromBlock: 7, + ToBlock: 8, + Status: agglayer.Settled, + }, + { + Height: 9, + CertificateID: common.HexToHash("0x9"), + NewLocalExitRoot: common.HexToHash("0xA"), + FromBlock: 9, + ToBlock: 10, + Status: agglayer.Pending, + }, + { + Height: 11, + CertificateID: common.HexToHash("0xB"), + NewLocalExitRoot: common.HexToHash("0xC"), + FromBlock: 11, + ToBlock: 12, + Status: agglayer.InError, + }, + } + + for _, cert := range certificates { + require.NoError(t, storage.SaveLastSentCertificate(ctx, *cert)) + } + + // Test fetching certificates with status Settled + statuses := []agglayer.CertificateStatus{agglayer.Settled} + certificatesFromDB, err := storage.GetCertificatesByStatus(ctx, statuses) + require.NoError(t, err) + require.Len(t, certificatesFromDB, 1) + require.ElementsMatch(t, []*types.CertificateInfo{certificates[0]}, certificatesFromDB) + + // Test fetching certificates with status Pending + statuses = []agglayer.CertificateStatus{agglayer.Pending} + certificatesFromDB, err = storage.GetCertificatesByStatus(ctx, statuses) + require.NoError(t, err) + require.Len(t, certificatesFromDB, 1) + require.ElementsMatch(t, []*types.CertificateInfo{certificates[1]}, certificatesFromDB) + + // Test fetching certificates with status InError + statuses = []agglayer.CertificateStatus{agglayer.InError} + certificatesFromDB, err = storage.GetCertificatesByStatus(ctx, statuses) + require.NoError(t, err) + require.Len(t, certificatesFromDB, 1) + require.ElementsMatch(t, []*types.CertificateInfo{certificates[2]}, certificatesFromDB) + + // Test fetching certificates with status InError and Pending + statuses = []agglayer.CertificateStatus{agglayer.InError, agglayer.Pending} + certificatesFromDB, err = storage.GetCertificatesByStatus(ctx, statuses) + require.NoError(t, err) + require.Len(t, certificatesFromDB, 2) + require.ElementsMatch(t, []*types.CertificateInfo{certificates[1], certificates[2]}, certificatesFromDB) + + require.NoError(t, storage.clean()) + }) + + t.Run("UpdateCertificateStatus", func(t *testing.T) { + // Insert a certificate + certificate := types.CertificateInfo{ + Height: 13, + CertificateID: common.HexToHash("0xD"), + NewLocalExitRoot: common.HexToHash("0xE"), + FromBlock: 13, + ToBlock: 14, + Status: agglayer.Pending, + } + require.NoError(t, storage.SaveLastSentCertificate(ctx, certificate)) + + // Update the status of the certificate + certificate.Status = agglayer.Settled + require.NoError(t, storage.UpdateCertificateStatus(ctx, certificate)) + + // Fetch the certificate and verify the status has been updated + certificateFromDB, err := storage.GetCertificateByHeight(ctx, certificate.Height) + require.NoError(t, err) + require.Equal(t, certificate.Status, certificateFromDB.Status) + + require.NoError(t, storage.clean()) + }) +} diff --git a/aggsender/db/migrations/0001.sql b/aggsender/db/migrations/0001.sql new file mode 100644 index 00000000..3ed7f997 --- /dev/null +++ b/aggsender/db/migrations/0001.sql @@ -0,0 +1,12 @@ +-- +migrate Down +DROP TABLE IF EXISTS certificate_info; + +-- +migrate Up +CREATE TABLE certificate_info ( + height INTEGER NOT NULL, + certificate_id VARCHAR NOT NULL PRIMARY KEY, + status INTEGER NOT NULL, + new_local_exit_root VARCHAR NOT NULL, + from_block INTEGER NOT NULL, + to_block INTEGER NOT NULL +); \ No newline at end of file diff --git a/aggsender/db/migrations/migrations.go b/aggsender/db/migrations/migrations.go new file mode 100644 index 00000000..31f16fd2 --- /dev/null +++ b/aggsender/db/migrations/migrations.go @@ -0,0 +1,22 @@ +package migrations + +import ( + _ "embed" + + "github.com/0xPolygon/cdk/db" + "github.com/0xPolygon/cdk/db/types" +) + +//go:embed 0001.sql +var mig001 string + +func RunMigrations(dbPath string) error { + migrations := []types.Migration{ + { + ID: "0001", + SQL: mig001, + }, + } + + return db.RunMigrations(dbPath, migrations) +} diff --git a/aggsender/mocks/mock_aggsender_storage.go b/aggsender/mocks/mock_aggsender_storage.go new file mode 100644 index 00000000..a5f193fc --- /dev/null +++ b/aggsender/mocks/mock_aggsender_storage.go @@ -0,0 +1,354 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + agglayer "github.com/0xPolygon/cdk/agglayer" + common "github.com/ethereum/go-ethereum/common" + + context "context" + + mock "github.com/stretchr/testify/mock" + + types "github.com/0xPolygon/cdk/aggsender/types" +) + +// AggSenderStorageMock is an autogenerated mock type for the AggSenderStorage type +type AggSenderStorageMock struct { + mock.Mock +} + +type AggSenderStorageMock_Expecter struct { + mock *mock.Mock +} + +func (_m *AggSenderStorageMock) EXPECT() *AggSenderStorageMock_Expecter { + return &AggSenderStorageMock_Expecter{mock: &_m.Mock} +} + +// DeleteCertificate provides a mock function with given fields: ctx, certificateID +func (_m *AggSenderStorageMock) DeleteCertificate(ctx context.Context, certificateID common.Hash) error { + ret := _m.Called(ctx, certificateID) + + if len(ret) == 0 { + panic("no return value specified for DeleteCertificate") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) error); ok { + r0 = rf(ctx, certificateID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// AggSenderStorageMock_DeleteCertificate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteCertificate' +type AggSenderStorageMock_DeleteCertificate_Call struct { + *mock.Call +} + +// DeleteCertificate is a helper method to define mock.On call +// - ctx context.Context +// - certificateID common.Hash +func (_e *AggSenderStorageMock_Expecter) DeleteCertificate(ctx interface{}, certificateID interface{}) *AggSenderStorageMock_DeleteCertificate_Call { + return &AggSenderStorageMock_DeleteCertificate_Call{Call: _e.mock.On("DeleteCertificate", ctx, certificateID)} +} + +func (_c *AggSenderStorageMock_DeleteCertificate_Call) Run(run func(ctx context.Context, certificateID common.Hash)) *AggSenderStorageMock_DeleteCertificate_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Hash)) + }) + return _c +} + +func (_c *AggSenderStorageMock_DeleteCertificate_Call) Return(_a0 error) *AggSenderStorageMock_DeleteCertificate_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *AggSenderStorageMock_DeleteCertificate_Call) RunAndReturn(run func(context.Context, common.Hash) error) *AggSenderStorageMock_DeleteCertificate_Call { + _c.Call.Return(run) + return _c +} + +// GetCertificateByHeight provides a mock function with given fields: ctx, height +func (_m *AggSenderStorageMock) GetCertificateByHeight(ctx context.Context, height uint64) (types.CertificateInfo, error) { + ret := _m.Called(ctx, height) + + if len(ret) == 0 { + panic("no return value specified for GetCertificateByHeight") + } + + var r0 types.CertificateInfo + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64) (types.CertificateInfo, error)); ok { + return rf(ctx, height) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64) types.CertificateInfo); ok { + r0 = rf(ctx, height) + } else { + r0 = ret.Get(0).(types.CertificateInfo) + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64) error); ok { + r1 = rf(ctx, height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// AggSenderStorageMock_GetCertificateByHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCertificateByHeight' +type AggSenderStorageMock_GetCertificateByHeight_Call struct { + *mock.Call +} + +// GetCertificateByHeight is a helper method to define mock.On call +// - ctx context.Context +// - height uint64 +func (_e *AggSenderStorageMock_Expecter) GetCertificateByHeight(ctx interface{}, height interface{}) *AggSenderStorageMock_GetCertificateByHeight_Call { + return &AggSenderStorageMock_GetCertificateByHeight_Call{Call: _e.mock.On("GetCertificateByHeight", ctx, height)} +} + +func (_c *AggSenderStorageMock_GetCertificateByHeight_Call) Run(run func(ctx context.Context, height uint64)) *AggSenderStorageMock_GetCertificateByHeight_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64)) + }) + return _c +} + +func (_c *AggSenderStorageMock_GetCertificateByHeight_Call) Return(_a0 types.CertificateInfo, _a1 error) *AggSenderStorageMock_GetCertificateByHeight_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *AggSenderStorageMock_GetCertificateByHeight_Call) RunAndReturn(run func(context.Context, uint64) (types.CertificateInfo, error)) *AggSenderStorageMock_GetCertificateByHeight_Call { + _c.Call.Return(run) + return _c +} + +// GetCertificatesByStatus provides a mock function with given fields: ctx, status +func (_m *AggSenderStorageMock) GetCertificatesByStatus(ctx context.Context, status []agglayer.CertificateStatus) ([]*types.CertificateInfo, error) { + ret := _m.Called(ctx, status) + + if len(ret) == 0 { + panic("no return value specified for GetCertificatesByStatus") + } + + var r0 []*types.CertificateInfo + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []agglayer.CertificateStatus) ([]*types.CertificateInfo, error)); ok { + return rf(ctx, status) + } + if rf, ok := ret.Get(0).(func(context.Context, []agglayer.CertificateStatus) []*types.CertificateInfo); ok { + r0 = rf(ctx, status) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*types.CertificateInfo) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []agglayer.CertificateStatus) error); ok { + r1 = rf(ctx, status) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// AggSenderStorageMock_GetCertificatesByStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCertificatesByStatus' +type AggSenderStorageMock_GetCertificatesByStatus_Call struct { + *mock.Call +} + +// GetCertificatesByStatus is a helper method to define mock.On call +// - ctx context.Context +// - status []agglayer.CertificateStatus +func (_e *AggSenderStorageMock_Expecter) GetCertificatesByStatus(ctx interface{}, status interface{}) *AggSenderStorageMock_GetCertificatesByStatus_Call { + return &AggSenderStorageMock_GetCertificatesByStatus_Call{Call: _e.mock.On("GetCertificatesByStatus", ctx, status)} +} + +func (_c *AggSenderStorageMock_GetCertificatesByStatus_Call) Run(run func(ctx context.Context, status []agglayer.CertificateStatus)) *AggSenderStorageMock_GetCertificatesByStatus_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].([]agglayer.CertificateStatus)) + }) + return _c +} + +func (_c *AggSenderStorageMock_GetCertificatesByStatus_Call) Return(_a0 []*types.CertificateInfo, _a1 error) *AggSenderStorageMock_GetCertificatesByStatus_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *AggSenderStorageMock_GetCertificatesByStatus_Call) RunAndReturn(run func(context.Context, []agglayer.CertificateStatus) ([]*types.CertificateInfo, error)) *AggSenderStorageMock_GetCertificatesByStatus_Call { + _c.Call.Return(run) + return _c +} + +// GetLastSentCertificate provides a mock function with given fields: ctx +func (_m *AggSenderStorageMock) GetLastSentCertificate(ctx context.Context) (types.CertificateInfo, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetLastSentCertificate") + } + + var r0 types.CertificateInfo + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (types.CertificateInfo, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) types.CertificateInfo); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(types.CertificateInfo) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// AggSenderStorageMock_GetLastSentCertificate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLastSentCertificate' +type AggSenderStorageMock_GetLastSentCertificate_Call struct { + *mock.Call +} + +// GetLastSentCertificate is a helper method to define mock.On call +// - ctx context.Context +func (_e *AggSenderStorageMock_Expecter) GetLastSentCertificate(ctx interface{}) *AggSenderStorageMock_GetLastSentCertificate_Call { + return &AggSenderStorageMock_GetLastSentCertificate_Call{Call: _e.mock.On("GetLastSentCertificate", ctx)} +} + +func (_c *AggSenderStorageMock_GetLastSentCertificate_Call) Run(run func(ctx context.Context)) *AggSenderStorageMock_GetLastSentCertificate_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *AggSenderStorageMock_GetLastSentCertificate_Call) Return(_a0 types.CertificateInfo, _a1 error) *AggSenderStorageMock_GetLastSentCertificate_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *AggSenderStorageMock_GetLastSentCertificate_Call) RunAndReturn(run func(context.Context) (types.CertificateInfo, error)) *AggSenderStorageMock_GetLastSentCertificate_Call { + _c.Call.Return(run) + return _c +} + +// SaveLastSentCertificate provides a mock function with given fields: ctx, certificate +func (_m *AggSenderStorageMock) SaveLastSentCertificate(ctx context.Context, certificate types.CertificateInfo) error { + ret := _m.Called(ctx, certificate) + + if len(ret) == 0 { + panic("no return value specified for SaveLastSentCertificate") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, types.CertificateInfo) error); ok { + r0 = rf(ctx, certificate) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// AggSenderStorageMock_SaveLastSentCertificate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SaveLastSentCertificate' +type AggSenderStorageMock_SaveLastSentCertificate_Call struct { + *mock.Call +} + +// SaveLastSentCertificate is a helper method to define mock.On call +// - ctx context.Context +// - certificate types.CertificateInfo +func (_e *AggSenderStorageMock_Expecter) SaveLastSentCertificate(ctx interface{}, certificate interface{}) *AggSenderStorageMock_SaveLastSentCertificate_Call { + return &AggSenderStorageMock_SaveLastSentCertificate_Call{Call: _e.mock.On("SaveLastSentCertificate", ctx, certificate)} +} + +func (_c *AggSenderStorageMock_SaveLastSentCertificate_Call) Run(run func(ctx context.Context, certificate types.CertificateInfo)) *AggSenderStorageMock_SaveLastSentCertificate_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(types.CertificateInfo)) + }) + return _c +} + +func (_c *AggSenderStorageMock_SaveLastSentCertificate_Call) Return(_a0 error) *AggSenderStorageMock_SaveLastSentCertificate_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *AggSenderStorageMock_SaveLastSentCertificate_Call) RunAndReturn(run func(context.Context, types.CertificateInfo) error) *AggSenderStorageMock_SaveLastSentCertificate_Call { + _c.Call.Return(run) + return _c +} + +// UpdateCertificateStatus provides a mock function with given fields: ctx, certificate +func (_m *AggSenderStorageMock) UpdateCertificateStatus(ctx context.Context, certificate types.CertificateInfo) error { + ret := _m.Called(ctx, certificate) + + if len(ret) == 0 { + panic("no return value specified for UpdateCertificateStatus") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, types.CertificateInfo) error); ok { + r0 = rf(ctx, certificate) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// AggSenderStorageMock_UpdateCertificateStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateCertificateStatus' +type AggSenderStorageMock_UpdateCertificateStatus_Call struct { + *mock.Call +} + +// UpdateCertificateStatus is a helper method to define mock.On call +// - ctx context.Context +// - certificate types.CertificateInfo +func (_e *AggSenderStorageMock_Expecter) UpdateCertificateStatus(ctx interface{}, certificate interface{}) *AggSenderStorageMock_UpdateCertificateStatus_Call { + return &AggSenderStorageMock_UpdateCertificateStatus_Call{Call: _e.mock.On("UpdateCertificateStatus", ctx, certificate)} +} + +func (_c *AggSenderStorageMock_UpdateCertificateStatus_Call) Run(run func(ctx context.Context, certificate types.CertificateInfo)) *AggSenderStorageMock_UpdateCertificateStatus_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(types.CertificateInfo)) + }) + return _c +} + +func (_c *AggSenderStorageMock_UpdateCertificateStatus_Call) Return(_a0 error) *AggSenderStorageMock_UpdateCertificateStatus_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *AggSenderStorageMock_UpdateCertificateStatus_Call) RunAndReturn(run func(context.Context, types.CertificateInfo) error) *AggSenderStorageMock_UpdateCertificateStatus_Call { + _c.Call.Return(run) + return _c +} + +// NewAggSenderStorageMock creates a new instance of AggSenderStorageMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewAggSenderStorageMock(t interface { + mock.TestingT + Cleanup(func()) +}) *AggSenderStorageMock { + mock := &AggSenderStorageMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggsender/mocks/mock_eth_client.go b/aggsender/mocks/mock_eth_client.go new file mode 100644 index 00000000..ebf618bf --- /dev/null +++ b/aggsender/mocks/mock_eth_client.go @@ -0,0 +1,154 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + big "math/big" + + coretypes "github.com/ethereum/go-ethereum/core/types" + + mock "github.com/stretchr/testify/mock" +) + +// EthClientMock is an autogenerated mock type for the EthClient type +type EthClientMock struct { + mock.Mock +} + +type EthClientMock_Expecter struct { + mock *mock.Mock +} + +func (_m *EthClientMock) EXPECT() *EthClientMock_Expecter { + return &EthClientMock_Expecter{mock: &_m.Mock} +} + +// BlockNumber provides a mock function with given fields: ctx +func (_m *EthClientMock) BlockNumber(ctx context.Context) (uint64, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for BlockNumber") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (uint64, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) uint64); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClientMock_BlockNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockNumber' +type EthClientMock_BlockNumber_Call struct { + *mock.Call +} + +// BlockNumber is a helper method to define mock.On call +// - ctx context.Context +func (_e *EthClientMock_Expecter) BlockNumber(ctx interface{}) *EthClientMock_BlockNumber_Call { + return &EthClientMock_BlockNumber_Call{Call: _e.mock.On("BlockNumber", ctx)} +} + +func (_c *EthClientMock_BlockNumber_Call) Run(run func(ctx context.Context)) *EthClientMock_BlockNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *EthClientMock_BlockNumber_Call) Return(_a0 uint64, _a1 error) *EthClientMock_BlockNumber_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClientMock_BlockNumber_Call) RunAndReturn(run func(context.Context) (uint64, error)) *EthClientMock_BlockNumber_Call { + _c.Call.Return(run) + return _c +} + +// HeaderByNumber provides a mock function with given fields: ctx, number +func (_m *EthClientMock) HeaderByNumber(ctx context.Context, number *big.Int) (*coretypes.Header, error) { + ret := _m.Called(ctx, number) + + if len(ret) == 0 { + panic("no return value specified for HeaderByNumber") + } + + var r0 *coretypes.Header + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) (*coretypes.Header, error)); ok { + return rf(ctx, number) + } + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) *coretypes.Header); ok { + r0 = rf(ctx, number) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.Header) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { + r1 = rf(ctx, number) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClientMock_HeaderByNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HeaderByNumber' +type EthClientMock_HeaderByNumber_Call struct { + *mock.Call +} + +// HeaderByNumber is a helper method to define mock.On call +// - ctx context.Context +// - number *big.Int +func (_e *EthClientMock_Expecter) HeaderByNumber(ctx interface{}, number interface{}) *EthClientMock_HeaderByNumber_Call { + return &EthClientMock_HeaderByNumber_Call{Call: _e.mock.On("HeaderByNumber", ctx, number)} +} + +func (_c *EthClientMock_HeaderByNumber_Call) Run(run func(ctx context.Context, number *big.Int)) *EthClientMock_HeaderByNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*big.Int)) + }) + return _c +} + +func (_c *EthClientMock_HeaderByNumber_Call) Return(_a0 *coretypes.Header, _a1 error) *EthClientMock_HeaderByNumber_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClientMock_HeaderByNumber_Call) RunAndReturn(run func(context.Context, *big.Int) (*coretypes.Header, error)) *EthClientMock_HeaderByNumber_Call { + _c.Call.Return(run) + return _c +} + +// NewEthClientMock creates a new instance of EthClientMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEthClientMock(t interface { + mock.TestingT + Cleanup(func()) +}) *EthClientMock { + mock := &EthClientMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggsender/mocks/mock_l1infotree_syncer.go b/aggsender/mocks/mock_l1infotree_syncer.go new file mode 100644 index 00000000..e113d4ed --- /dev/null +++ b/aggsender/mocks/mock_l1infotree_syncer.go @@ -0,0 +1,217 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + common "github.com/ethereum/go-ethereum/common" + + l1infotreesync "github.com/0xPolygon/cdk/l1infotreesync" + + mock "github.com/stretchr/testify/mock" + + treetypes "github.com/0xPolygon/cdk/tree/types" +) + +// L1InfoTreeSyncerMock is an autogenerated mock type for the L1InfoTreeSyncer type +type L1InfoTreeSyncerMock struct { + mock.Mock +} + +type L1InfoTreeSyncerMock_Expecter struct { + mock *mock.Mock +} + +func (_m *L1InfoTreeSyncerMock) EXPECT() *L1InfoTreeSyncerMock_Expecter { + return &L1InfoTreeSyncerMock_Expecter{mock: &_m.Mock} +} + +// GetInfoByGlobalExitRoot provides a mock function with given fields: globalExitRoot +func (_m *L1InfoTreeSyncerMock) GetInfoByGlobalExitRoot(globalExitRoot common.Hash) (*l1infotreesync.L1InfoTreeLeaf, error) { + ret := _m.Called(globalExitRoot) + + if len(ret) == 0 { + panic("no return value specified for GetInfoByGlobalExitRoot") + } + + var r0 *l1infotreesync.L1InfoTreeLeaf + var r1 error + if rf, ok := ret.Get(0).(func(common.Hash) (*l1infotreesync.L1InfoTreeLeaf, error)); ok { + return rf(globalExitRoot) + } + if rf, ok := ret.Get(0).(func(common.Hash) *l1infotreesync.L1InfoTreeLeaf); ok { + r0 = rf(globalExitRoot) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*l1infotreesync.L1InfoTreeLeaf) + } + } + + if rf, ok := ret.Get(1).(func(common.Hash) error); ok { + r1 = rf(globalExitRoot) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L1InfoTreeSyncerMock_GetInfoByGlobalExitRoot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetInfoByGlobalExitRoot' +type L1InfoTreeSyncerMock_GetInfoByGlobalExitRoot_Call struct { + *mock.Call +} + +// GetInfoByGlobalExitRoot is a helper method to define mock.On call +// - globalExitRoot common.Hash +func (_e *L1InfoTreeSyncerMock_Expecter) GetInfoByGlobalExitRoot(globalExitRoot interface{}) *L1InfoTreeSyncerMock_GetInfoByGlobalExitRoot_Call { + return &L1InfoTreeSyncerMock_GetInfoByGlobalExitRoot_Call{Call: _e.mock.On("GetInfoByGlobalExitRoot", globalExitRoot)} +} + +func (_c *L1InfoTreeSyncerMock_GetInfoByGlobalExitRoot_Call) Run(run func(globalExitRoot common.Hash)) *L1InfoTreeSyncerMock_GetInfoByGlobalExitRoot_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(common.Hash)) + }) + return _c +} + +func (_c *L1InfoTreeSyncerMock_GetInfoByGlobalExitRoot_Call) Return(_a0 *l1infotreesync.L1InfoTreeLeaf, _a1 error) *L1InfoTreeSyncerMock_GetInfoByGlobalExitRoot_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L1InfoTreeSyncerMock_GetInfoByGlobalExitRoot_Call) RunAndReturn(run func(common.Hash) (*l1infotreesync.L1InfoTreeLeaf, error)) *L1InfoTreeSyncerMock_GetInfoByGlobalExitRoot_Call { + _c.Call.Return(run) + return _c +} + +// GetL1InfoTreeMerkleProofFromIndexToRoot provides a mock function with given fields: ctx, index, root +func (_m *L1InfoTreeSyncerMock) GetL1InfoTreeMerkleProofFromIndexToRoot(ctx context.Context, index uint32, root common.Hash) (treetypes.Proof, error) { + ret := _m.Called(ctx, index, root) + + if len(ret) == 0 { + panic("no return value specified for GetL1InfoTreeMerkleProofFromIndexToRoot") + } + + var r0 treetypes.Proof + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint32, common.Hash) (treetypes.Proof, error)); ok { + return rf(ctx, index, root) + } + if rf, ok := ret.Get(0).(func(context.Context, uint32, common.Hash) treetypes.Proof); ok { + r0 = rf(ctx, index, root) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(treetypes.Proof) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint32, common.Hash) error); ok { + r1 = rf(ctx, index, root) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L1InfoTreeSyncerMock_GetL1InfoTreeMerkleProofFromIndexToRoot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetL1InfoTreeMerkleProofFromIndexToRoot' +type L1InfoTreeSyncerMock_GetL1InfoTreeMerkleProofFromIndexToRoot_Call struct { + *mock.Call +} + +// GetL1InfoTreeMerkleProofFromIndexToRoot is a helper method to define mock.On call +// - ctx context.Context +// - index uint32 +// - root common.Hash +func (_e *L1InfoTreeSyncerMock_Expecter) GetL1InfoTreeMerkleProofFromIndexToRoot(ctx interface{}, index interface{}, root interface{}) *L1InfoTreeSyncerMock_GetL1InfoTreeMerkleProofFromIndexToRoot_Call { + return &L1InfoTreeSyncerMock_GetL1InfoTreeMerkleProofFromIndexToRoot_Call{Call: _e.mock.On("GetL1InfoTreeMerkleProofFromIndexToRoot", ctx, index, root)} +} + +func (_c *L1InfoTreeSyncerMock_GetL1InfoTreeMerkleProofFromIndexToRoot_Call) Run(run func(ctx context.Context, index uint32, root common.Hash)) *L1InfoTreeSyncerMock_GetL1InfoTreeMerkleProofFromIndexToRoot_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint32), args[2].(common.Hash)) + }) + return _c +} + +func (_c *L1InfoTreeSyncerMock_GetL1InfoTreeMerkleProofFromIndexToRoot_Call) Return(_a0 treetypes.Proof, _a1 error) *L1InfoTreeSyncerMock_GetL1InfoTreeMerkleProofFromIndexToRoot_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L1InfoTreeSyncerMock_GetL1InfoTreeMerkleProofFromIndexToRoot_Call) RunAndReturn(run func(context.Context, uint32, common.Hash) (treetypes.Proof, error)) *L1InfoTreeSyncerMock_GetL1InfoTreeMerkleProofFromIndexToRoot_Call { + _c.Call.Return(run) + return _c +} + +// GetL1InfoTreeRootByIndex provides a mock function with given fields: ctx, index +func (_m *L1InfoTreeSyncerMock) GetL1InfoTreeRootByIndex(ctx context.Context, index uint32) (treetypes.Root, error) { + ret := _m.Called(ctx, index) + + if len(ret) == 0 { + panic("no return value specified for GetL1InfoTreeRootByIndex") + } + + var r0 treetypes.Root + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint32) (treetypes.Root, error)); ok { + return rf(ctx, index) + } + if rf, ok := ret.Get(0).(func(context.Context, uint32) treetypes.Root); ok { + r0 = rf(ctx, index) + } else { + r0 = ret.Get(0).(treetypes.Root) + } + + if rf, ok := ret.Get(1).(func(context.Context, uint32) error); ok { + r1 = rf(ctx, index) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L1InfoTreeSyncerMock_GetL1InfoTreeRootByIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetL1InfoTreeRootByIndex' +type L1InfoTreeSyncerMock_GetL1InfoTreeRootByIndex_Call struct { + *mock.Call +} + +// GetL1InfoTreeRootByIndex is a helper method to define mock.On call +// - ctx context.Context +// - index uint32 +func (_e *L1InfoTreeSyncerMock_Expecter) GetL1InfoTreeRootByIndex(ctx interface{}, index interface{}) *L1InfoTreeSyncerMock_GetL1InfoTreeRootByIndex_Call { + return &L1InfoTreeSyncerMock_GetL1InfoTreeRootByIndex_Call{Call: _e.mock.On("GetL1InfoTreeRootByIndex", ctx, index)} +} + +func (_c *L1InfoTreeSyncerMock_GetL1InfoTreeRootByIndex_Call) Run(run func(ctx context.Context, index uint32)) *L1InfoTreeSyncerMock_GetL1InfoTreeRootByIndex_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint32)) + }) + return _c +} + +func (_c *L1InfoTreeSyncerMock_GetL1InfoTreeRootByIndex_Call) Return(_a0 treetypes.Root, _a1 error) *L1InfoTreeSyncerMock_GetL1InfoTreeRootByIndex_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L1InfoTreeSyncerMock_GetL1InfoTreeRootByIndex_Call) RunAndReturn(run func(context.Context, uint32) (treetypes.Root, error)) *L1InfoTreeSyncerMock_GetL1InfoTreeRootByIndex_Call { + _c.Call.Return(run) + return _c +} + +// NewL1InfoTreeSyncerMock creates a new instance of L1InfoTreeSyncerMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewL1InfoTreeSyncerMock(t interface { + mock.TestingT + Cleanup(func()) +}) *L1InfoTreeSyncerMock { + mock := &L1InfoTreeSyncerMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggsender/mocks/mock_l2bridge_syncer.go b/aggsender/mocks/mock_l2bridge_syncer.go new file mode 100644 index 00000000..725184c3 --- /dev/null +++ b/aggsender/mocks/mock_l2bridge_syncer.go @@ -0,0 +1,423 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + bridgesync "github.com/0xPolygon/cdk/bridgesync" + common "github.com/ethereum/go-ethereum/common" + + context "context" + + etherman "github.com/0xPolygon/cdk/etherman" + + mock "github.com/stretchr/testify/mock" + + treetypes "github.com/0xPolygon/cdk/tree/types" +) + +// L2BridgeSyncerMock is an autogenerated mock type for the L2BridgeSyncer type +type L2BridgeSyncerMock struct { + mock.Mock +} + +type L2BridgeSyncerMock_Expecter struct { + mock *mock.Mock +} + +func (_m *L2BridgeSyncerMock) EXPECT() *L2BridgeSyncerMock_Expecter { + return &L2BridgeSyncerMock_Expecter{mock: &_m.Mock} +} + +// BlockFinality provides a mock function with given fields: +func (_m *L2BridgeSyncerMock) BlockFinality() etherman.BlockNumberFinality { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for BlockFinality") + } + + var r0 etherman.BlockNumberFinality + if rf, ok := ret.Get(0).(func() etherman.BlockNumberFinality); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(etherman.BlockNumberFinality) + } + + return r0 +} + +// L2BridgeSyncerMock_BlockFinality_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockFinality' +type L2BridgeSyncerMock_BlockFinality_Call struct { + *mock.Call +} + +// BlockFinality is a helper method to define mock.On call +func (_e *L2BridgeSyncerMock_Expecter) BlockFinality() *L2BridgeSyncerMock_BlockFinality_Call { + return &L2BridgeSyncerMock_BlockFinality_Call{Call: _e.mock.On("BlockFinality")} +} + +func (_c *L2BridgeSyncerMock_BlockFinality_Call) Run(run func()) *L2BridgeSyncerMock_BlockFinality_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *L2BridgeSyncerMock_BlockFinality_Call) Return(_a0 etherman.BlockNumberFinality) *L2BridgeSyncerMock_BlockFinality_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *L2BridgeSyncerMock_BlockFinality_Call) RunAndReturn(run func() etherman.BlockNumberFinality) *L2BridgeSyncerMock_BlockFinality_Call { + _c.Call.Return(run) + return _c +} + +// GetBlockByLER provides a mock function with given fields: ctx, ler +func (_m *L2BridgeSyncerMock) GetBlockByLER(ctx context.Context, ler common.Hash) (uint64, error) { + ret := _m.Called(ctx, ler) + + if len(ret) == 0 { + panic("no return value specified for GetBlockByLER") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (uint64, error)); ok { + return rf(ctx, ler) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) uint64); ok { + r0 = rf(ctx, ler) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(ctx, ler) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L2BridgeSyncerMock_GetBlockByLER_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBlockByLER' +type L2BridgeSyncerMock_GetBlockByLER_Call struct { + *mock.Call +} + +// GetBlockByLER is a helper method to define mock.On call +// - ctx context.Context +// - ler common.Hash +func (_e *L2BridgeSyncerMock_Expecter) GetBlockByLER(ctx interface{}, ler interface{}) *L2BridgeSyncerMock_GetBlockByLER_Call { + return &L2BridgeSyncerMock_GetBlockByLER_Call{Call: _e.mock.On("GetBlockByLER", ctx, ler)} +} + +func (_c *L2BridgeSyncerMock_GetBlockByLER_Call) Run(run func(ctx context.Context, ler common.Hash)) *L2BridgeSyncerMock_GetBlockByLER_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Hash)) + }) + return _c +} + +func (_c *L2BridgeSyncerMock_GetBlockByLER_Call) Return(_a0 uint64, _a1 error) *L2BridgeSyncerMock_GetBlockByLER_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L2BridgeSyncerMock_GetBlockByLER_Call) RunAndReturn(run func(context.Context, common.Hash) (uint64, error)) *L2BridgeSyncerMock_GetBlockByLER_Call { + _c.Call.Return(run) + return _c +} + +// GetBridgesPublished provides a mock function with given fields: ctx, fromBlock, toBlock +func (_m *L2BridgeSyncerMock) GetBridgesPublished(ctx context.Context, fromBlock uint64, toBlock uint64) ([]bridgesync.Bridge, error) { + ret := _m.Called(ctx, fromBlock, toBlock) + + if len(ret) == 0 { + panic("no return value specified for GetBridgesPublished") + } + + var r0 []bridgesync.Bridge + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64) ([]bridgesync.Bridge, error)); ok { + return rf(ctx, fromBlock, toBlock) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64) []bridgesync.Bridge); ok { + r0 = rf(ctx, fromBlock, toBlock) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]bridgesync.Bridge) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, uint64) error); ok { + r1 = rf(ctx, fromBlock, toBlock) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L2BridgeSyncerMock_GetBridgesPublished_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBridgesPublished' +type L2BridgeSyncerMock_GetBridgesPublished_Call struct { + *mock.Call +} + +// GetBridgesPublished is a helper method to define mock.On call +// - ctx context.Context +// - fromBlock uint64 +// - toBlock uint64 +func (_e *L2BridgeSyncerMock_Expecter) GetBridgesPublished(ctx interface{}, fromBlock interface{}, toBlock interface{}) *L2BridgeSyncerMock_GetBridgesPublished_Call { + return &L2BridgeSyncerMock_GetBridgesPublished_Call{Call: _e.mock.On("GetBridgesPublished", ctx, fromBlock, toBlock)} +} + +func (_c *L2BridgeSyncerMock_GetBridgesPublished_Call) Run(run func(ctx context.Context, fromBlock uint64, toBlock uint64)) *L2BridgeSyncerMock_GetBridgesPublished_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64), args[2].(uint64)) + }) + return _c +} + +func (_c *L2BridgeSyncerMock_GetBridgesPublished_Call) Return(_a0 []bridgesync.Bridge, _a1 error) *L2BridgeSyncerMock_GetBridgesPublished_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L2BridgeSyncerMock_GetBridgesPublished_Call) RunAndReturn(run func(context.Context, uint64, uint64) ([]bridgesync.Bridge, error)) *L2BridgeSyncerMock_GetBridgesPublished_Call { + _c.Call.Return(run) + return _c +} + +// GetClaims provides a mock function with given fields: ctx, fromBlock, toBlock +func (_m *L2BridgeSyncerMock) GetClaims(ctx context.Context, fromBlock uint64, toBlock uint64) ([]bridgesync.Claim, error) { + ret := _m.Called(ctx, fromBlock, toBlock) + + if len(ret) == 0 { + panic("no return value specified for GetClaims") + } + + var r0 []bridgesync.Claim + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64) ([]bridgesync.Claim, error)); ok { + return rf(ctx, fromBlock, toBlock) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64) []bridgesync.Claim); ok { + r0 = rf(ctx, fromBlock, toBlock) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]bridgesync.Claim) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, uint64) error); ok { + r1 = rf(ctx, fromBlock, toBlock) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L2BridgeSyncerMock_GetClaims_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetClaims' +type L2BridgeSyncerMock_GetClaims_Call struct { + *mock.Call +} + +// GetClaims is a helper method to define mock.On call +// - ctx context.Context +// - fromBlock uint64 +// - toBlock uint64 +func (_e *L2BridgeSyncerMock_Expecter) GetClaims(ctx interface{}, fromBlock interface{}, toBlock interface{}) *L2BridgeSyncerMock_GetClaims_Call { + return &L2BridgeSyncerMock_GetClaims_Call{Call: _e.mock.On("GetClaims", ctx, fromBlock, toBlock)} +} + +func (_c *L2BridgeSyncerMock_GetClaims_Call) Run(run func(ctx context.Context, fromBlock uint64, toBlock uint64)) *L2BridgeSyncerMock_GetClaims_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64), args[2].(uint64)) + }) + return _c +} + +func (_c *L2BridgeSyncerMock_GetClaims_Call) Return(_a0 []bridgesync.Claim, _a1 error) *L2BridgeSyncerMock_GetClaims_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L2BridgeSyncerMock_GetClaims_Call) RunAndReturn(run func(context.Context, uint64, uint64) ([]bridgesync.Claim, error)) *L2BridgeSyncerMock_GetClaims_Call { + _c.Call.Return(run) + return _c +} + +// GetExitRootByIndex provides a mock function with given fields: ctx, index +func (_m *L2BridgeSyncerMock) GetExitRootByIndex(ctx context.Context, index uint32) (treetypes.Root, error) { + ret := _m.Called(ctx, index) + + if len(ret) == 0 { + panic("no return value specified for GetExitRootByIndex") + } + + var r0 treetypes.Root + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint32) (treetypes.Root, error)); ok { + return rf(ctx, index) + } + if rf, ok := ret.Get(0).(func(context.Context, uint32) treetypes.Root); ok { + r0 = rf(ctx, index) + } else { + r0 = ret.Get(0).(treetypes.Root) + } + + if rf, ok := ret.Get(1).(func(context.Context, uint32) error); ok { + r1 = rf(ctx, index) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L2BridgeSyncerMock_GetExitRootByIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetExitRootByIndex' +type L2BridgeSyncerMock_GetExitRootByIndex_Call struct { + *mock.Call +} + +// GetExitRootByIndex is a helper method to define mock.On call +// - ctx context.Context +// - index uint32 +func (_e *L2BridgeSyncerMock_Expecter) GetExitRootByIndex(ctx interface{}, index interface{}) *L2BridgeSyncerMock_GetExitRootByIndex_Call { + return &L2BridgeSyncerMock_GetExitRootByIndex_Call{Call: _e.mock.On("GetExitRootByIndex", ctx, index)} +} + +func (_c *L2BridgeSyncerMock_GetExitRootByIndex_Call) Run(run func(ctx context.Context, index uint32)) *L2BridgeSyncerMock_GetExitRootByIndex_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint32)) + }) + return _c +} + +func (_c *L2BridgeSyncerMock_GetExitRootByIndex_Call) Return(_a0 treetypes.Root, _a1 error) *L2BridgeSyncerMock_GetExitRootByIndex_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L2BridgeSyncerMock_GetExitRootByIndex_Call) RunAndReturn(run func(context.Context, uint32) (treetypes.Root, error)) *L2BridgeSyncerMock_GetExitRootByIndex_Call { + _c.Call.Return(run) + return _c +} + +// GetLastProcessedBlock provides a mock function with given fields: ctx +func (_m *L2BridgeSyncerMock) GetLastProcessedBlock(ctx context.Context) (uint64, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetLastProcessedBlock") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (uint64, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) uint64); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L2BridgeSyncerMock_GetLastProcessedBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLastProcessedBlock' +type L2BridgeSyncerMock_GetLastProcessedBlock_Call struct { + *mock.Call +} + +// GetLastProcessedBlock is a helper method to define mock.On call +// - ctx context.Context +func (_e *L2BridgeSyncerMock_Expecter) GetLastProcessedBlock(ctx interface{}) *L2BridgeSyncerMock_GetLastProcessedBlock_Call { + return &L2BridgeSyncerMock_GetLastProcessedBlock_Call{Call: _e.mock.On("GetLastProcessedBlock", ctx)} +} + +func (_c *L2BridgeSyncerMock_GetLastProcessedBlock_Call) Run(run func(ctx context.Context)) *L2BridgeSyncerMock_GetLastProcessedBlock_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *L2BridgeSyncerMock_GetLastProcessedBlock_Call) Return(_a0 uint64, _a1 error) *L2BridgeSyncerMock_GetLastProcessedBlock_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L2BridgeSyncerMock_GetLastProcessedBlock_Call) RunAndReturn(run func(context.Context) (uint64, error)) *L2BridgeSyncerMock_GetLastProcessedBlock_Call { + _c.Call.Return(run) + return _c +} + +// OriginNetwork provides a mock function with given fields: +func (_m *L2BridgeSyncerMock) OriginNetwork() uint32 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for OriginNetwork") + } + + var r0 uint32 + if rf, ok := ret.Get(0).(func() uint32); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint32) + } + + return r0 +} + +// L2BridgeSyncerMock_OriginNetwork_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'OriginNetwork' +type L2BridgeSyncerMock_OriginNetwork_Call struct { + *mock.Call +} + +// OriginNetwork is a helper method to define mock.On call +func (_e *L2BridgeSyncerMock_Expecter) OriginNetwork() *L2BridgeSyncerMock_OriginNetwork_Call { + return &L2BridgeSyncerMock_OriginNetwork_Call{Call: _e.mock.On("OriginNetwork")} +} + +func (_c *L2BridgeSyncerMock_OriginNetwork_Call) Run(run func()) *L2BridgeSyncerMock_OriginNetwork_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *L2BridgeSyncerMock_OriginNetwork_Call) Return(_a0 uint32) *L2BridgeSyncerMock_OriginNetwork_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *L2BridgeSyncerMock_OriginNetwork_Call) RunAndReturn(run func() uint32) *L2BridgeSyncerMock_OriginNetwork_Call { + _c.Call.Return(run) + return _c +} + +// NewL2BridgeSyncerMock creates a new instance of L2BridgeSyncerMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewL2BridgeSyncerMock(t interface { + mock.TestingT + Cleanup(func()) +}) *L2BridgeSyncerMock { + mock := &L2BridgeSyncerMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggsender/mocks/mock_logger.go b/aggsender/mocks/mock_logger.go new file mode 100644 index 00000000..5b0eb4e9 --- /dev/null +++ b/aggsender/mocks/mock_logger.go @@ -0,0 +1,290 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import mock "github.com/stretchr/testify/mock" + +// LoggerMock is an autogenerated mock type for the Logger type +type LoggerMock struct { + mock.Mock +} + +type LoggerMock_Expecter struct { + mock *mock.Mock +} + +func (_m *LoggerMock) EXPECT() *LoggerMock_Expecter { + return &LoggerMock_Expecter{mock: &_m.Mock} +} + +// Debug provides a mock function with given fields: args +func (_m *LoggerMock) Debug(args ...interface{}) { + var _ca []interface{} + _ca = append(_ca, args...) + _m.Called(_ca...) +} + +// LoggerMock_Debug_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Debug' +type LoggerMock_Debug_Call struct { + *mock.Call +} + +// Debug is a helper method to define mock.On call +// - args ...interface{} +func (_e *LoggerMock_Expecter) Debug(args ...interface{}) *LoggerMock_Debug_Call { + return &LoggerMock_Debug_Call{Call: _e.mock.On("Debug", + append([]interface{}{}, args...)...)} +} + +func (_c *LoggerMock_Debug_Call) Run(run func(args ...interface{})) *LoggerMock_Debug_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]interface{}, len(args)-0) + for i, a := range args[0:] { + if a != nil { + variadicArgs[i] = a.(interface{}) + } + } + run(variadicArgs...) + }) + return _c +} + +func (_c *LoggerMock_Debug_Call) Return() *LoggerMock_Debug_Call { + _c.Call.Return() + return _c +} + +func (_c *LoggerMock_Debug_Call) RunAndReturn(run func(...interface{})) *LoggerMock_Debug_Call { + _c.Call.Return(run) + return _c +} + +// Debugf provides a mock function with given fields: format, args +func (_m *LoggerMock) Debugf(format string, args ...interface{}) { + var _ca []interface{} + _ca = append(_ca, format) + _ca = append(_ca, args...) + _m.Called(_ca...) +} + +// LoggerMock_Debugf_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Debugf' +type LoggerMock_Debugf_Call struct { + *mock.Call +} + +// Debugf is a helper method to define mock.On call +// - format string +// - args ...interface{} +func (_e *LoggerMock_Expecter) Debugf(format interface{}, args ...interface{}) *LoggerMock_Debugf_Call { + return &LoggerMock_Debugf_Call{Call: _e.mock.On("Debugf", + append([]interface{}{format}, args...)...)} +} + +func (_c *LoggerMock_Debugf_Call) Run(run func(format string, args ...interface{})) *LoggerMock_Debugf_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]interface{}, len(args)-1) + for i, a := range args[1:] { + if a != nil { + variadicArgs[i] = a.(interface{}) + } + } + run(args[0].(string), variadicArgs...) + }) + return _c +} + +func (_c *LoggerMock_Debugf_Call) Return() *LoggerMock_Debugf_Call { + _c.Call.Return() + return _c +} + +func (_c *LoggerMock_Debugf_Call) RunAndReturn(run func(string, ...interface{})) *LoggerMock_Debugf_Call { + _c.Call.Return(run) + return _c +} + +// Error provides a mock function with given fields: args +func (_m *LoggerMock) Error(args ...interface{}) { + var _ca []interface{} + _ca = append(_ca, args...) + _m.Called(_ca...) +} + +// LoggerMock_Error_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Error' +type LoggerMock_Error_Call struct { + *mock.Call +} + +// Error is a helper method to define mock.On call +// - args ...interface{} +func (_e *LoggerMock_Expecter) Error(args ...interface{}) *LoggerMock_Error_Call { + return &LoggerMock_Error_Call{Call: _e.mock.On("Error", + append([]interface{}{}, args...)...)} +} + +func (_c *LoggerMock_Error_Call) Run(run func(args ...interface{})) *LoggerMock_Error_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]interface{}, len(args)-0) + for i, a := range args[0:] { + if a != nil { + variadicArgs[i] = a.(interface{}) + } + } + run(variadicArgs...) + }) + return _c +} + +func (_c *LoggerMock_Error_Call) Return() *LoggerMock_Error_Call { + _c.Call.Return() + return _c +} + +func (_c *LoggerMock_Error_Call) RunAndReturn(run func(...interface{})) *LoggerMock_Error_Call { + _c.Call.Return(run) + return _c +} + +// Errorf provides a mock function with given fields: format, args +func (_m *LoggerMock) Errorf(format string, args ...interface{}) { + var _ca []interface{} + _ca = append(_ca, format) + _ca = append(_ca, args...) + _m.Called(_ca...) +} + +// LoggerMock_Errorf_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Errorf' +type LoggerMock_Errorf_Call struct { + *mock.Call +} + +// Errorf is a helper method to define mock.On call +// - format string +// - args ...interface{} +func (_e *LoggerMock_Expecter) Errorf(format interface{}, args ...interface{}) *LoggerMock_Errorf_Call { + return &LoggerMock_Errorf_Call{Call: _e.mock.On("Errorf", + append([]interface{}{format}, args...)...)} +} + +func (_c *LoggerMock_Errorf_Call) Run(run func(format string, args ...interface{})) *LoggerMock_Errorf_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]interface{}, len(args)-1) + for i, a := range args[1:] { + if a != nil { + variadicArgs[i] = a.(interface{}) + } + } + run(args[0].(string), variadicArgs...) + }) + return _c +} + +func (_c *LoggerMock_Errorf_Call) Return() *LoggerMock_Errorf_Call { + _c.Call.Return() + return _c +} + +func (_c *LoggerMock_Errorf_Call) RunAndReturn(run func(string, ...interface{})) *LoggerMock_Errorf_Call { + _c.Call.Return(run) + return _c +} + +// Info provides a mock function with given fields: args +func (_m *LoggerMock) Info(args ...interface{}) { + var _ca []interface{} + _ca = append(_ca, args...) + _m.Called(_ca...) +} + +// LoggerMock_Info_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Info' +type LoggerMock_Info_Call struct { + *mock.Call +} + +// Info is a helper method to define mock.On call +// - args ...interface{} +func (_e *LoggerMock_Expecter) Info(args ...interface{}) *LoggerMock_Info_Call { + return &LoggerMock_Info_Call{Call: _e.mock.On("Info", + append([]interface{}{}, args...)...)} +} + +func (_c *LoggerMock_Info_Call) Run(run func(args ...interface{})) *LoggerMock_Info_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]interface{}, len(args)-0) + for i, a := range args[0:] { + if a != nil { + variadicArgs[i] = a.(interface{}) + } + } + run(variadicArgs...) + }) + return _c +} + +func (_c *LoggerMock_Info_Call) Return() *LoggerMock_Info_Call { + _c.Call.Return() + return _c +} + +func (_c *LoggerMock_Info_Call) RunAndReturn(run func(...interface{})) *LoggerMock_Info_Call { + _c.Call.Return(run) + return _c +} + +// Infof provides a mock function with given fields: format, args +func (_m *LoggerMock) Infof(format string, args ...interface{}) { + var _ca []interface{} + _ca = append(_ca, format) + _ca = append(_ca, args...) + _m.Called(_ca...) +} + +// LoggerMock_Infof_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Infof' +type LoggerMock_Infof_Call struct { + *mock.Call +} + +// Infof is a helper method to define mock.On call +// - format string +// - args ...interface{} +func (_e *LoggerMock_Expecter) Infof(format interface{}, args ...interface{}) *LoggerMock_Infof_Call { + return &LoggerMock_Infof_Call{Call: _e.mock.On("Infof", + append([]interface{}{format}, args...)...)} +} + +func (_c *LoggerMock_Infof_Call) Run(run func(format string, args ...interface{})) *LoggerMock_Infof_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]interface{}, len(args)-1) + for i, a := range args[1:] { + if a != nil { + variadicArgs[i] = a.(interface{}) + } + } + run(args[0].(string), variadicArgs...) + }) + return _c +} + +func (_c *LoggerMock_Infof_Call) Return() *LoggerMock_Infof_Call { + _c.Call.Return() + return _c +} + +func (_c *LoggerMock_Infof_Call) RunAndReturn(run func(string, ...interface{})) *LoggerMock_Infof_Call { + _c.Call.Return(run) + return _c +} + +// NewLoggerMock creates a new instance of LoggerMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewLoggerMock(t interface { + mock.TestingT + Cleanup(func()) +}) *LoggerMock { + mock := &LoggerMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggsender/types/types.go b/aggsender/types/types.go new file mode 100644 index 00000000..d6421132 --- /dev/null +++ b/aggsender/types/types.go @@ -0,0 +1,65 @@ +package types + +import ( + "context" + "fmt" + "math/big" + + "github.com/0xPolygon/cdk/agglayer" + "github.com/0xPolygon/cdk/bridgesync" + "github.com/0xPolygon/cdk/etherman" + "github.com/0xPolygon/cdk/l1infotreesync" + treeTypes "github.com/0xPolygon/cdk/tree/types" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" +) + +// L1InfoTreeSyncer is an interface defining functions that an L1InfoTreeSyncer should implement +type L1InfoTreeSyncer interface { + GetInfoByGlobalExitRoot(globalExitRoot common.Hash) (*l1infotreesync.L1InfoTreeLeaf, error) + GetL1InfoTreeMerkleProofFromIndexToRoot( + ctx context.Context, index uint32, root common.Hash, + ) (treeTypes.Proof, error) + GetL1InfoTreeRootByIndex(ctx context.Context, index uint32) (treeTypes.Root, error) +} + +// L2BridgeSyncer is an interface defining functions that an L2BridgeSyncer should implement +type L2BridgeSyncer interface { + GetBlockByLER(ctx context.Context, ler common.Hash) (uint64, error) + GetExitRootByIndex(ctx context.Context, index uint32) (treeTypes.Root, error) + GetBridgesPublished(ctx context.Context, fromBlock, toBlock uint64) ([]bridgesync.Bridge, error) + GetClaims(ctx context.Context, fromBlock, toBlock uint64) ([]bridgesync.Claim, error) + OriginNetwork() uint32 + BlockFinality() etherman.BlockNumberFinality + GetLastProcessedBlock(ctx context.Context) (uint64, error) +} + +// EthClient is an interface defining functions that an EthClient should implement +type EthClient interface { + BlockNumber(ctx context.Context) (uint64, error) + HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) +} + +// Logger is an interface that defines the methods to log messages +type Logger interface { + Info(args ...interface{}) + Infof(format string, args ...interface{}) + Error(args ...interface{}) + Errorf(format string, args ...interface{}) + Debug(args ...interface{}) + Debugf(format string, args ...interface{}) +} + +type CertificateInfo struct { + Height uint64 `meddler:"height"` + CertificateID common.Hash `meddler:"certificate_id"` + NewLocalExitRoot common.Hash `meddler:"new_local_exit_root"` + FromBlock uint64 `meddler:"from_block"` + ToBlock uint64 `meddler:"to_block"` + Status agglayer.CertificateStatus `meddler:"status"` +} + +func (c CertificateInfo) String() string { + return fmt.Sprintf("Height: %d, CertificateID: %s, FromBlock: %d, ToBlock: %d, NewLocalExitRoot: %s", + c.Height, c.CertificateID.String(), c.FromBlock, c.ToBlock, c.NewLocalExitRoot.String()) +} diff --git a/bridgesync/bridgesync.go b/bridgesync/bridgesync.go index e6a61c5e..b3c3c853 100644 --- a/bridgesync/bridgesync.go +++ b/bridgesync/bridgesync.go @@ -16,10 +16,17 @@ const ( downloadBufferSize = 1000 ) +type ReorgDetector interface { + sync.ReorgDetector +} + // BridgeSync manages the state of the exit tree for the bridge contract by processing Ethereum blockchain events. type BridgeSync struct { processor *processor driver *sync.EVMDriver + + originNetwork uint32 + blockFinality etherman.BlockNumberFinality } // NewL1 creates a bridge syncer that synchronizes the mainnet exit tree @@ -29,12 +36,13 @@ func NewL1( bridge common.Address, syncBlockChunkSize uint64, blockFinalityType etherman.BlockNumberFinality, - rd sync.ReorgDetector, + rd ReorgDetector, ethClient EthClienter, initialBlock uint64, waitForNewBlocksPeriod time.Duration, retryAfterErrorPeriod time.Duration, maxRetryAttemptsAfterError int, + originNetwork uint32, ) (*BridgeSync, error) { return newBridgeSync( ctx, @@ -49,6 +57,7 @@ func NewL1( waitForNewBlocksPeriod, retryAfterErrorPeriod, maxRetryAttemptsAfterError, + originNetwork, false, ) } @@ -60,12 +69,13 @@ func NewL2( bridge common.Address, syncBlockChunkSize uint64, blockFinalityType etherman.BlockNumberFinality, - rd sync.ReorgDetector, + rd ReorgDetector, ethClient EthClienter, initialBlock uint64, waitForNewBlocksPeriod time.Duration, retryAfterErrorPeriod time.Duration, maxRetryAttemptsAfterError int, + originNetwork uint32, ) (*BridgeSync, error) { return newBridgeSync( ctx, @@ -80,6 +90,7 @@ func NewL2( waitForNewBlocksPeriod, retryAfterErrorPeriod, maxRetryAttemptsAfterError, + originNetwork, true, ) } @@ -90,13 +101,14 @@ func newBridgeSync( bridge common.Address, syncBlockChunkSize uint64, blockFinalityType etherman.BlockNumberFinality, - rd sync.ReorgDetector, + rd ReorgDetector, ethClient EthClienter, initialBlock uint64, l1OrL2ID string, waitForNewBlocksPeriod time.Duration, retryAfterErrorPeriod time.Duration, maxRetryAttemptsAfterError int, + originNetwork uint32, syncFullClaims bool, ) (*BridgeSync, error) { processor, err := newProcessor(dbPath, l1OrL2ID) @@ -146,8 +158,10 @@ func newBridgeSync( } return &BridgeSync{ - processor: processor, - driver: driver, + processor: processor, + driver: driver, + originNetwork: originNetwork, + blockFinality: blockFinalityType, }, nil } @@ -172,12 +186,16 @@ func (s *BridgeSync) GetBridges(ctx context.Context, fromBlock, toBlock uint64) return s.processor.GetBridges(ctx, fromBlock, toBlock) } +func (s *BridgeSync) GetBridgesPublished(ctx context.Context, fromBlock, toBlock uint64) ([]Bridge, error) { + return s.processor.GetBridgesPublished(ctx, fromBlock, toBlock) +} + func (s *BridgeSync) GetProof(ctx context.Context, depositCount uint32, localExitRoot common.Hash) (tree.Proof, error) { return s.processor.exitTree.GetProof(ctx, depositCount, localExitRoot) } -func (p *processor) GetBlockByLER(ctx context.Context, ler common.Hash) (uint64, error) { - root, err := p.exitTree.GetRootByHash(ctx, ler) +func (s *BridgeSync) GetBlockByLER(ctx context.Context, ler common.Hash) (uint64, error) { + root, err := s.processor.exitTree.GetRootByHash(ctx, ler) if err != nil { return 0, err } @@ -191,3 +209,18 @@ func (s *BridgeSync) GetRootByLER(ctx context.Context, ler common.Hash) (*tree.R } return root, nil } + +// GetExitRootByIndex returns the root of the exit tree at the moment the leaf with the given index was added +func (s *BridgeSync) GetExitRootByIndex(ctx context.Context, index uint32) (tree.Root, error) { + return s.processor.exitTree.GetRootByIndex(ctx, index) +} + +// OriginNetwork returns the network ID of the origin chain +func (s *BridgeSync) OriginNetwork() uint32 { + return s.originNetwork +} + +// BlockFinality returns the block finality type +func (s *BridgeSync) BlockFinality() etherman.BlockNumberFinality { + return s.blockFinality +} diff --git a/bridgesync/bridgesync_test.go b/bridgesync/bridgesync_test.go new file mode 100644 index 00000000..cb328c68 --- /dev/null +++ b/bridgesync/bridgesync_test.go @@ -0,0 +1,81 @@ +package bridgesync_test + +import ( + "context" + "testing" + "time" + + "github.com/0xPolygon/cdk/bridgesync" + mocksbridgesync "github.com/0xPolygon/cdk/bridgesync/mocks" + "github.com/0xPolygon/cdk/etherman" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +// Mock implementations for the interfaces +type MockEthClienter struct { + mock.Mock +} + +type MockBridgeContractor struct { + mock.Mock +} + +func TestNewLx(t *testing.T) { + ctx := context.Background() + dbPath := "test_db_path" + bridge := common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678") + syncBlockChunkSize := uint64(100) + blockFinalityType := etherman.SafeBlock + initialBlock := uint64(0) + waitForNewBlocksPeriod := time.Second * 10 + retryAfterErrorPeriod := time.Second * 5 + maxRetryAttemptsAfterError := 3 + originNetwork := uint32(1) + + mockEthClient := mocksbridgesync.NewEthClienter(t) + mockReorgDetector := mocksbridgesync.NewReorgDetector(t) + + mockReorgDetector.EXPECT().Subscribe(mock.Anything).Return(nil, nil) + + bridgeSync, err := bridgesync.NewL1( + ctx, + dbPath, + bridge, + syncBlockChunkSize, + blockFinalityType, + mockReorgDetector, + mockEthClient, + initialBlock, + waitForNewBlocksPeriod, + retryAfterErrorPeriod, + maxRetryAttemptsAfterError, + originNetwork, + ) + + assert.NoError(t, err) + assert.NotNil(t, bridgeSync) + assert.Equal(t, originNetwork, bridgeSync.OriginNetwork()) + assert.Equal(t, blockFinalityType, bridgeSync.BlockFinality()) + + bridgeSyncL2, err := bridgesync.NewL2( + ctx, + dbPath, + bridge, + syncBlockChunkSize, + blockFinalityType, + mockReorgDetector, + mockEthClient, + initialBlock, + waitForNewBlocksPeriod, + retryAfterErrorPeriod, + maxRetryAttemptsAfterError, + originNetwork, + ) + + assert.NoError(t, err) + assert.NotNil(t, bridgeSync) + assert.Equal(t, originNetwork, bridgeSyncL2.OriginNetwork()) + assert.Equal(t, blockFinalityType, bridgeSyncL2.BlockFinality()) +} diff --git a/bridgesync/claimcalldata_test.go b/bridgesync/claimcalldata_test.go index b8b432ae..a4ab49de 100644 --- a/bridgesync/claimcalldata_test.go +++ b/bridgesync/claimcalldata_test.go @@ -74,6 +74,7 @@ func TestClaimCalldata(t *testing.T) { ProofRollupExitRoot: proofRollupH, DestinationNetwork: 0, Metadata: []byte{}, + GlobalExitRoot: crypto.Keccak256Hash(common.HexToHash("5ca1e").Bytes(), common.HexToHash("dead").Bytes()), } expectedClaim2 := Claim{ OriginNetwork: 87, @@ -86,6 +87,7 @@ func TestClaimCalldata(t *testing.T) { ProofRollupExitRoot: proofRollupH, DestinationNetwork: 0, Metadata: []byte{}, + GlobalExitRoot: crypto.Keccak256Hash(common.HexToHash("5ca1e").Bytes(), common.HexToHash("dead").Bytes()), } expectedClaim3 := Claim{ OriginNetwork: 69, @@ -98,6 +100,7 @@ func TestClaimCalldata(t *testing.T) { ProofRollupExitRoot: proofRollupH, DestinationNetwork: 0, Metadata: []byte{}, + GlobalExitRoot: crypto.Keccak256Hash(common.HexToHash("5ca1e").Bytes(), common.HexToHash("dead").Bytes()), } auth.GasLimit = 999999 // for some reason gas estimation fails :( diff --git a/bridgesync/config.go b/bridgesync/config.go index 66eb00ed..d2373b53 100644 --- a/bridgesync/config.go +++ b/bridgesync/config.go @@ -24,4 +24,6 @@ type Config struct { MaxRetryAttemptsAfterError int `mapstructure:"MaxRetryAttemptsAfterError"` // WaitForNewBlocksPeriod time that will be waited when the synchronizer has reached the latest block WaitForNewBlocksPeriod types.Duration `mapstructure:"WaitForNewBlocksPeriod"` + // OriginNetwork is the id of the network where the bridge is deployed + OriginNetwork uint32 `mapstructure:"OriginNetwork"` } diff --git a/bridgesync/downloader.go b/bridgesync/downloader.go index dbea8c8f..782d5f1b 100644 --- a/bridgesync/downloader.go +++ b/bridgesync/downloader.go @@ -288,11 +288,14 @@ func decodeClaimCallDataAndSetIfFound(data []interface{}, claim *Claim) (bool, e if !ok { return false, fmt.Errorf("unexpected type for 'DestinationNetwork'. Expected 'uint32', got '%T'", data[7]) } + claim.Metadata, ok = data[10].([]byte) if !ok { return false, fmt.Errorf("unexpected type for 'claim Metadata'. Expected '[]byte', got '%T'", data[10]) } + claim.GlobalExitRoot = crypto.Keccak256Hash(claim.MainnetExitRoot.Bytes(), claim.RollupExitRoot.Bytes()) + return true, nil } } diff --git a/bridgesync/e2e_test.go b/bridgesync/e2e_test.go index c0a22484..a8868ce1 100644 --- a/bridgesync/e2e_test.go +++ b/bridgesync/e2e_test.go @@ -29,7 +29,7 @@ func TestBridgeEventE2E(t *testing.T) { go rd.Start(ctx) //nolint:errcheck testClient := helpers.TestClient{ClientRenamed: client.Client()} - syncer, err := bridgesync.NewL1(ctx, dbPathSyncer, setup.EBZkevmBridgeAddr, 10, etherman.LatestBlock, rd, testClient, 0, time.Millisecond*10, 0, 0) + syncer, err := bridgesync.NewL1(ctx, dbPathSyncer, setup.EBZkevmBridgeAddr, 10, etherman.LatestBlock, rd, testClient, 0, time.Millisecond*10, 0, 0, 1) require.NoError(t, err) go syncer.Start(ctx) diff --git a/bridgesync/migrations/bridgesync0001.sql b/bridgesync/migrations/bridgesync0001.sql index de90910c..74adc6d5 100644 --- a/bridgesync/migrations/bridgesync0001.sql +++ b/bridgesync/migrations/bridgesync0001.sql @@ -16,7 +16,7 @@ CREATE TABLE bridge ( origin_address VARCHAR NOT NULL, destination_network INTEGER NOT NULL, destination_address VARCHAR NOT NULL, - amount DECIMAL(78, 0) NOT NULL, + amount TEXT NOT NULL, metadata BLOB, deposit_count INTEGER NOT NULL, PRIMARY KEY (block_num, block_pos) @@ -25,11 +25,11 @@ CREATE TABLE bridge ( CREATE TABLE claim ( block_num INTEGER NOT NULL REFERENCES block(num) ON DELETE CASCADE, block_pos INTEGER NOT NULL, - global_index DECIMAL(78, 0) NOT NULL, + global_index TEXT NOT NULL, origin_network INTEGER NOT NULL, origin_address VARCHAR NOT NULL, destination_address VARCHAR NOT NULL, - amount DECIMAL(78, 0) NOT NULL, + amount TEXT NOT NULL, proof_local_exit_root VARCHAR, proof_rollup_exit_root VARCHAR, mainnet_exit_root VARCHAR, diff --git a/bridgesync/mocks/bridge_contractor.go b/bridgesync/mocks/bridge_contractor.go new file mode 100644 index 00000000..fd559850 --- /dev/null +++ b/bridgesync/mocks/bridge_contractor.go @@ -0,0 +1,93 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks_bridgesync + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" +) + +// BridgeContractor is an autogenerated mock type for the BridgeContractor type +type BridgeContractor struct { + mock.Mock +} + +type BridgeContractor_Expecter struct { + mock *mock.Mock +} + +func (_m *BridgeContractor) EXPECT() *BridgeContractor_Expecter { + return &BridgeContractor_Expecter{mock: &_m.Mock} +} + +// LastUpdatedDepositCount provides a mock function with given fields: ctx, BlockNumber +func (_m *BridgeContractor) LastUpdatedDepositCount(ctx context.Context, BlockNumber uint64) (uint32, error) { + ret := _m.Called(ctx, BlockNumber) + + if len(ret) == 0 { + panic("no return value specified for LastUpdatedDepositCount") + } + + var r0 uint32 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64) (uint32, error)); ok { + return rf(ctx, BlockNumber) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64) uint32); ok { + r0 = rf(ctx, BlockNumber) + } else { + r0 = ret.Get(0).(uint32) + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64) error); ok { + r1 = rf(ctx, BlockNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BridgeContractor_LastUpdatedDepositCount_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LastUpdatedDepositCount' +type BridgeContractor_LastUpdatedDepositCount_Call struct { + *mock.Call +} + +// LastUpdatedDepositCount is a helper method to define mock.On call +// - ctx context.Context +// - BlockNumber uint64 +func (_e *BridgeContractor_Expecter) LastUpdatedDepositCount(ctx interface{}, BlockNumber interface{}) *BridgeContractor_LastUpdatedDepositCount_Call { + return &BridgeContractor_LastUpdatedDepositCount_Call{Call: _e.mock.On("LastUpdatedDepositCount", ctx, BlockNumber)} +} + +func (_c *BridgeContractor_LastUpdatedDepositCount_Call) Run(run func(ctx context.Context, BlockNumber uint64)) *BridgeContractor_LastUpdatedDepositCount_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64)) + }) + return _c +} + +func (_c *BridgeContractor_LastUpdatedDepositCount_Call) Return(_a0 uint32, _a1 error) *BridgeContractor_LastUpdatedDepositCount_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *BridgeContractor_LastUpdatedDepositCount_Call) RunAndReturn(run func(context.Context, uint64) (uint32, error)) *BridgeContractor_LastUpdatedDepositCount_Call { + _c.Call.Return(run) + return _c +} + +// NewBridgeContractor creates a new instance of BridgeContractor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewBridgeContractor(t interface { + mock.TestingT + Cleanup(func()) +}) *BridgeContractor { + mock := &BridgeContractor{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/bridgesync/mocks/eth_clienter.go b/bridgesync/mocks/eth_clienter.go new file mode 100644 index 00000000..3d208e45 --- /dev/null +++ b/bridgesync/mocks/eth_clienter.go @@ -0,0 +1,1136 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks_bridgesync + +import ( + big "math/big" + + common "github.com/ethereum/go-ethereum/common" + + context "context" + + ethereum "github.com/ethereum/go-ethereum" + + mock "github.com/stretchr/testify/mock" + + rpc "github.com/ethereum/go-ethereum/rpc" + + types "github.com/ethereum/go-ethereum/core/types" +) + +// EthClienter is an autogenerated mock type for the EthClienter type +type EthClienter struct { + mock.Mock +} + +type EthClienter_Expecter struct { + mock *mock.Mock +} + +func (_m *EthClienter) EXPECT() *EthClienter_Expecter { + return &EthClienter_Expecter{mock: &_m.Mock} +} + +// BlockByHash provides a mock function with given fields: ctx, hash +func (_m *EthClienter) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { + ret := _m.Called(ctx, hash) + + if len(ret) == 0 { + panic("no return value specified for BlockByHash") + } + + var r0 *types.Block + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (*types.Block, error)); ok { + return rf(ctx, hash) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *types.Block); ok { + r0 = rf(ctx, hash) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Block) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(ctx, hash) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_BlockByHash_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockByHash' +type EthClienter_BlockByHash_Call struct { + *mock.Call +} + +// BlockByHash is a helper method to define mock.On call +// - ctx context.Context +// - hash common.Hash +func (_e *EthClienter_Expecter) BlockByHash(ctx interface{}, hash interface{}) *EthClienter_BlockByHash_Call { + return &EthClienter_BlockByHash_Call{Call: _e.mock.On("BlockByHash", ctx, hash)} +} + +func (_c *EthClienter_BlockByHash_Call) Run(run func(ctx context.Context, hash common.Hash)) *EthClienter_BlockByHash_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Hash)) + }) + return _c +} + +func (_c *EthClienter_BlockByHash_Call) Return(_a0 *types.Block, _a1 error) *EthClienter_BlockByHash_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_BlockByHash_Call) RunAndReturn(run func(context.Context, common.Hash) (*types.Block, error)) *EthClienter_BlockByHash_Call { + _c.Call.Return(run) + return _c +} + +// BlockByNumber provides a mock function with given fields: ctx, number +func (_m *EthClienter) BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) { + ret := _m.Called(ctx, number) + + if len(ret) == 0 { + panic("no return value specified for BlockByNumber") + } + + var r0 *types.Block + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) (*types.Block, error)); ok { + return rf(ctx, number) + } + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) *types.Block); ok { + r0 = rf(ctx, number) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Block) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { + r1 = rf(ctx, number) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_BlockByNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockByNumber' +type EthClienter_BlockByNumber_Call struct { + *mock.Call +} + +// BlockByNumber is a helper method to define mock.On call +// - ctx context.Context +// - number *big.Int +func (_e *EthClienter_Expecter) BlockByNumber(ctx interface{}, number interface{}) *EthClienter_BlockByNumber_Call { + return &EthClienter_BlockByNumber_Call{Call: _e.mock.On("BlockByNumber", ctx, number)} +} + +func (_c *EthClienter_BlockByNumber_Call) Run(run func(ctx context.Context, number *big.Int)) *EthClienter_BlockByNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*big.Int)) + }) + return _c +} + +func (_c *EthClienter_BlockByNumber_Call) Return(_a0 *types.Block, _a1 error) *EthClienter_BlockByNumber_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_BlockByNumber_Call) RunAndReturn(run func(context.Context, *big.Int) (*types.Block, error)) *EthClienter_BlockByNumber_Call { + _c.Call.Return(run) + return _c +} + +// BlockNumber provides a mock function with given fields: ctx +func (_m *EthClienter) BlockNumber(ctx context.Context) (uint64, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for BlockNumber") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (uint64, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) uint64); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_BlockNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockNumber' +type EthClienter_BlockNumber_Call struct { + *mock.Call +} + +// BlockNumber is a helper method to define mock.On call +// - ctx context.Context +func (_e *EthClienter_Expecter) BlockNumber(ctx interface{}) *EthClienter_BlockNumber_Call { + return &EthClienter_BlockNumber_Call{Call: _e.mock.On("BlockNumber", ctx)} +} + +func (_c *EthClienter_BlockNumber_Call) Run(run func(ctx context.Context)) *EthClienter_BlockNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *EthClienter_BlockNumber_Call) Return(_a0 uint64, _a1 error) *EthClienter_BlockNumber_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_BlockNumber_Call) RunAndReturn(run func(context.Context) (uint64, error)) *EthClienter_BlockNumber_Call { + _c.Call.Return(run) + return _c +} + +// CallContract provides a mock function with given fields: ctx, call, blockNumber +func (_m *EthClienter) CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) { + ret := _m.Called(ctx, call, blockNumber) + + if len(ret) == 0 { + panic("no return value specified for CallContract") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ethereum.CallMsg, *big.Int) ([]byte, error)); ok { + return rf(ctx, call, blockNumber) + } + if rf, ok := ret.Get(0).(func(context.Context, ethereum.CallMsg, *big.Int) []byte); ok { + r0 = rf(ctx, call, blockNumber) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ethereum.CallMsg, *big.Int) error); ok { + r1 = rf(ctx, call, blockNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_CallContract_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CallContract' +type EthClienter_CallContract_Call struct { + *mock.Call +} + +// CallContract is a helper method to define mock.On call +// - ctx context.Context +// - call ethereum.CallMsg +// - blockNumber *big.Int +func (_e *EthClienter_Expecter) CallContract(ctx interface{}, call interface{}, blockNumber interface{}) *EthClienter_CallContract_Call { + return &EthClienter_CallContract_Call{Call: _e.mock.On("CallContract", ctx, call, blockNumber)} +} + +func (_c *EthClienter_CallContract_Call) Run(run func(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int)) *EthClienter_CallContract_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(ethereum.CallMsg), args[2].(*big.Int)) + }) + return _c +} + +func (_c *EthClienter_CallContract_Call) Return(_a0 []byte, _a1 error) *EthClienter_CallContract_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_CallContract_Call) RunAndReturn(run func(context.Context, ethereum.CallMsg, *big.Int) ([]byte, error)) *EthClienter_CallContract_Call { + _c.Call.Return(run) + return _c +} + +// Client provides a mock function with given fields: +func (_m *EthClienter) Client() *rpc.Client { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Client") + } + + var r0 *rpc.Client + if rf, ok := ret.Get(0).(func() *rpc.Client); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*rpc.Client) + } + } + + return r0 +} + +// EthClienter_Client_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Client' +type EthClienter_Client_Call struct { + *mock.Call +} + +// Client is a helper method to define mock.On call +func (_e *EthClienter_Expecter) Client() *EthClienter_Client_Call { + return &EthClienter_Client_Call{Call: _e.mock.On("Client")} +} + +func (_c *EthClienter_Client_Call) Run(run func()) *EthClienter_Client_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *EthClienter_Client_Call) Return(_a0 *rpc.Client) *EthClienter_Client_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *EthClienter_Client_Call) RunAndReturn(run func() *rpc.Client) *EthClienter_Client_Call { + _c.Call.Return(run) + return _c +} + +// CodeAt provides a mock function with given fields: ctx, contract, blockNumber +func (_m *EthClienter) CodeAt(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error) { + ret := _m.Called(ctx, contract, blockNumber) + + if len(ret) == 0 { + panic("no return value specified for CodeAt") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) ([]byte, error)); ok { + return rf(ctx, contract, blockNumber) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) []byte); ok { + r0 = rf(ctx, contract, blockNumber) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Address, *big.Int) error); ok { + r1 = rf(ctx, contract, blockNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_CodeAt_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CodeAt' +type EthClienter_CodeAt_Call struct { + *mock.Call +} + +// CodeAt is a helper method to define mock.On call +// - ctx context.Context +// - contract common.Address +// - blockNumber *big.Int +func (_e *EthClienter_Expecter) CodeAt(ctx interface{}, contract interface{}, blockNumber interface{}) *EthClienter_CodeAt_Call { + return &EthClienter_CodeAt_Call{Call: _e.mock.On("CodeAt", ctx, contract, blockNumber)} +} + +func (_c *EthClienter_CodeAt_Call) Run(run func(ctx context.Context, contract common.Address, blockNumber *big.Int)) *EthClienter_CodeAt_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Address), args[2].(*big.Int)) + }) + return _c +} + +func (_c *EthClienter_CodeAt_Call) Return(_a0 []byte, _a1 error) *EthClienter_CodeAt_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_CodeAt_Call) RunAndReturn(run func(context.Context, common.Address, *big.Int) ([]byte, error)) *EthClienter_CodeAt_Call { + _c.Call.Return(run) + return _c +} + +// EstimateGas provides a mock function with given fields: ctx, call +func (_m *EthClienter) EstimateGas(ctx context.Context, call ethereum.CallMsg) (uint64, error) { + ret := _m.Called(ctx, call) + + if len(ret) == 0 { + panic("no return value specified for EstimateGas") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ethereum.CallMsg) (uint64, error)); ok { + return rf(ctx, call) + } + if rf, ok := ret.Get(0).(func(context.Context, ethereum.CallMsg) uint64); ok { + r0 = rf(ctx, call) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, ethereum.CallMsg) error); ok { + r1 = rf(ctx, call) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_EstimateGas_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'EstimateGas' +type EthClienter_EstimateGas_Call struct { + *mock.Call +} + +// EstimateGas is a helper method to define mock.On call +// - ctx context.Context +// - call ethereum.CallMsg +func (_e *EthClienter_Expecter) EstimateGas(ctx interface{}, call interface{}) *EthClienter_EstimateGas_Call { + return &EthClienter_EstimateGas_Call{Call: _e.mock.On("EstimateGas", ctx, call)} +} + +func (_c *EthClienter_EstimateGas_Call) Run(run func(ctx context.Context, call ethereum.CallMsg)) *EthClienter_EstimateGas_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(ethereum.CallMsg)) + }) + return _c +} + +func (_c *EthClienter_EstimateGas_Call) Return(_a0 uint64, _a1 error) *EthClienter_EstimateGas_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_EstimateGas_Call) RunAndReturn(run func(context.Context, ethereum.CallMsg) (uint64, error)) *EthClienter_EstimateGas_Call { + _c.Call.Return(run) + return _c +} + +// FilterLogs provides a mock function with given fields: ctx, q +func (_m *EthClienter) FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]types.Log, error) { + ret := _m.Called(ctx, q) + + if len(ret) == 0 { + panic("no return value specified for FilterLogs") + } + + var r0 []types.Log + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery) ([]types.Log, error)); ok { + return rf(ctx, q) + } + if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery) []types.Log); ok { + r0 = rf(ctx, q) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]types.Log) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ethereum.FilterQuery) error); ok { + r1 = rf(ctx, q) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_FilterLogs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FilterLogs' +type EthClienter_FilterLogs_Call struct { + *mock.Call +} + +// FilterLogs is a helper method to define mock.On call +// - ctx context.Context +// - q ethereum.FilterQuery +func (_e *EthClienter_Expecter) FilterLogs(ctx interface{}, q interface{}) *EthClienter_FilterLogs_Call { + return &EthClienter_FilterLogs_Call{Call: _e.mock.On("FilterLogs", ctx, q)} +} + +func (_c *EthClienter_FilterLogs_Call) Run(run func(ctx context.Context, q ethereum.FilterQuery)) *EthClienter_FilterLogs_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(ethereum.FilterQuery)) + }) + return _c +} + +func (_c *EthClienter_FilterLogs_Call) Return(_a0 []types.Log, _a1 error) *EthClienter_FilterLogs_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_FilterLogs_Call) RunAndReturn(run func(context.Context, ethereum.FilterQuery) ([]types.Log, error)) *EthClienter_FilterLogs_Call { + _c.Call.Return(run) + return _c +} + +// HeaderByHash provides a mock function with given fields: ctx, hash +func (_m *EthClienter) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) { + ret := _m.Called(ctx, hash) + + if len(ret) == 0 { + panic("no return value specified for HeaderByHash") + } + + var r0 *types.Header + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (*types.Header, error)); ok { + return rf(ctx, hash) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *types.Header); ok { + r0 = rf(ctx, hash) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Header) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(ctx, hash) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_HeaderByHash_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HeaderByHash' +type EthClienter_HeaderByHash_Call struct { + *mock.Call +} + +// HeaderByHash is a helper method to define mock.On call +// - ctx context.Context +// - hash common.Hash +func (_e *EthClienter_Expecter) HeaderByHash(ctx interface{}, hash interface{}) *EthClienter_HeaderByHash_Call { + return &EthClienter_HeaderByHash_Call{Call: _e.mock.On("HeaderByHash", ctx, hash)} +} + +func (_c *EthClienter_HeaderByHash_Call) Run(run func(ctx context.Context, hash common.Hash)) *EthClienter_HeaderByHash_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Hash)) + }) + return _c +} + +func (_c *EthClienter_HeaderByHash_Call) Return(_a0 *types.Header, _a1 error) *EthClienter_HeaderByHash_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_HeaderByHash_Call) RunAndReturn(run func(context.Context, common.Hash) (*types.Header, error)) *EthClienter_HeaderByHash_Call { + _c.Call.Return(run) + return _c +} + +// HeaderByNumber provides a mock function with given fields: ctx, number +func (_m *EthClienter) HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) { + ret := _m.Called(ctx, number) + + if len(ret) == 0 { + panic("no return value specified for HeaderByNumber") + } + + var r0 *types.Header + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) (*types.Header, error)); ok { + return rf(ctx, number) + } + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) *types.Header); ok { + r0 = rf(ctx, number) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Header) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { + r1 = rf(ctx, number) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_HeaderByNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HeaderByNumber' +type EthClienter_HeaderByNumber_Call struct { + *mock.Call +} + +// HeaderByNumber is a helper method to define mock.On call +// - ctx context.Context +// - number *big.Int +func (_e *EthClienter_Expecter) HeaderByNumber(ctx interface{}, number interface{}) *EthClienter_HeaderByNumber_Call { + return &EthClienter_HeaderByNumber_Call{Call: _e.mock.On("HeaderByNumber", ctx, number)} +} + +func (_c *EthClienter_HeaderByNumber_Call) Run(run func(ctx context.Context, number *big.Int)) *EthClienter_HeaderByNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*big.Int)) + }) + return _c +} + +func (_c *EthClienter_HeaderByNumber_Call) Return(_a0 *types.Header, _a1 error) *EthClienter_HeaderByNumber_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_HeaderByNumber_Call) RunAndReturn(run func(context.Context, *big.Int) (*types.Header, error)) *EthClienter_HeaderByNumber_Call { + _c.Call.Return(run) + return _c +} + +// PendingCodeAt provides a mock function with given fields: ctx, account +func (_m *EthClienter) PendingCodeAt(ctx context.Context, account common.Address) ([]byte, error) { + ret := _m.Called(ctx, account) + + if len(ret) == 0 { + panic("no return value specified for PendingCodeAt") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Address) ([]byte, error)); ok { + return rf(ctx, account) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Address) []byte); ok { + r0 = rf(ctx, account) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Address) error); ok { + r1 = rf(ctx, account) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_PendingCodeAt_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PendingCodeAt' +type EthClienter_PendingCodeAt_Call struct { + *mock.Call +} + +// PendingCodeAt is a helper method to define mock.On call +// - ctx context.Context +// - account common.Address +func (_e *EthClienter_Expecter) PendingCodeAt(ctx interface{}, account interface{}) *EthClienter_PendingCodeAt_Call { + return &EthClienter_PendingCodeAt_Call{Call: _e.mock.On("PendingCodeAt", ctx, account)} +} + +func (_c *EthClienter_PendingCodeAt_Call) Run(run func(ctx context.Context, account common.Address)) *EthClienter_PendingCodeAt_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Address)) + }) + return _c +} + +func (_c *EthClienter_PendingCodeAt_Call) Return(_a0 []byte, _a1 error) *EthClienter_PendingCodeAt_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_PendingCodeAt_Call) RunAndReturn(run func(context.Context, common.Address) ([]byte, error)) *EthClienter_PendingCodeAt_Call { + _c.Call.Return(run) + return _c +} + +// PendingNonceAt provides a mock function with given fields: ctx, account +func (_m *EthClienter) PendingNonceAt(ctx context.Context, account common.Address) (uint64, error) { + ret := _m.Called(ctx, account) + + if len(ret) == 0 { + panic("no return value specified for PendingNonceAt") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Address) (uint64, error)); ok { + return rf(ctx, account) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Address) uint64); ok { + r0 = rf(ctx, account) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Address) error); ok { + r1 = rf(ctx, account) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_PendingNonceAt_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PendingNonceAt' +type EthClienter_PendingNonceAt_Call struct { + *mock.Call +} + +// PendingNonceAt is a helper method to define mock.On call +// - ctx context.Context +// - account common.Address +func (_e *EthClienter_Expecter) PendingNonceAt(ctx interface{}, account interface{}) *EthClienter_PendingNonceAt_Call { + return &EthClienter_PendingNonceAt_Call{Call: _e.mock.On("PendingNonceAt", ctx, account)} +} + +func (_c *EthClienter_PendingNonceAt_Call) Run(run func(ctx context.Context, account common.Address)) *EthClienter_PendingNonceAt_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Address)) + }) + return _c +} + +func (_c *EthClienter_PendingNonceAt_Call) Return(_a0 uint64, _a1 error) *EthClienter_PendingNonceAt_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_PendingNonceAt_Call) RunAndReturn(run func(context.Context, common.Address) (uint64, error)) *EthClienter_PendingNonceAt_Call { + _c.Call.Return(run) + return _c +} + +// SendTransaction provides a mock function with given fields: ctx, tx +func (_m *EthClienter) SendTransaction(ctx context.Context, tx *types.Transaction) error { + ret := _m.Called(ctx, tx) + + if len(ret) == 0 { + panic("no return value specified for SendTransaction") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *types.Transaction) error); ok { + r0 = rf(ctx, tx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// EthClienter_SendTransaction_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SendTransaction' +type EthClienter_SendTransaction_Call struct { + *mock.Call +} + +// SendTransaction is a helper method to define mock.On call +// - ctx context.Context +// - tx *types.Transaction +func (_e *EthClienter_Expecter) SendTransaction(ctx interface{}, tx interface{}) *EthClienter_SendTransaction_Call { + return &EthClienter_SendTransaction_Call{Call: _e.mock.On("SendTransaction", ctx, tx)} +} + +func (_c *EthClienter_SendTransaction_Call) Run(run func(ctx context.Context, tx *types.Transaction)) *EthClienter_SendTransaction_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*types.Transaction)) + }) + return _c +} + +func (_c *EthClienter_SendTransaction_Call) Return(_a0 error) *EthClienter_SendTransaction_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *EthClienter_SendTransaction_Call) RunAndReturn(run func(context.Context, *types.Transaction) error) *EthClienter_SendTransaction_Call { + _c.Call.Return(run) + return _c +} + +// SubscribeFilterLogs provides a mock function with given fields: ctx, q, ch +func (_m *EthClienter) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error) { + ret := _m.Called(ctx, q, ch) + + if len(ret) == 0 { + panic("no return value specified for SubscribeFilterLogs") + } + + var r0 ethereum.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery, chan<- types.Log) (ethereum.Subscription, error)); ok { + return rf(ctx, q, ch) + } + if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery, chan<- types.Log) ethereum.Subscription); ok { + r0 = rf(ctx, q, ch) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(ethereum.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ethereum.FilterQuery, chan<- types.Log) error); ok { + r1 = rf(ctx, q, ch) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_SubscribeFilterLogs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubscribeFilterLogs' +type EthClienter_SubscribeFilterLogs_Call struct { + *mock.Call +} + +// SubscribeFilterLogs is a helper method to define mock.On call +// - ctx context.Context +// - q ethereum.FilterQuery +// - ch chan<- types.Log +func (_e *EthClienter_Expecter) SubscribeFilterLogs(ctx interface{}, q interface{}, ch interface{}) *EthClienter_SubscribeFilterLogs_Call { + return &EthClienter_SubscribeFilterLogs_Call{Call: _e.mock.On("SubscribeFilterLogs", ctx, q, ch)} +} + +func (_c *EthClienter_SubscribeFilterLogs_Call) Run(run func(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log)) *EthClienter_SubscribeFilterLogs_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(ethereum.FilterQuery), args[2].(chan<- types.Log)) + }) + return _c +} + +func (_c *EthClienter_SubscribeFilterLogs_Call) Return(_a0 ethereum.Subscription, _a1 error) *EthClienter_SubscribeFilterLogs_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_SubscribeFilterLogs_Call) RunAndReturn(run func(context.Context, ethereum.FilterQuery, chan<- types.Log) (ethereum.Subscription, error)) *EthClienter_SubscribeFilterLogs_Call { + _c.Call.Return(run) + return _c +} + +// SubscribeNewHead provides a mock function with given fields: ctx, ch +func (_m *EthClienter) SubscribeNewHead(ctx context.Context, ch chan<- *types.Header) (ethereum.Subscription, error) { + ret := _m.Called(ctx, ch) + + if len(ret) == 0 { + panic("no return value specified for SubscribeNewHead") + } + + var r0 ethereum.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, chan<- *types.Header) (ethereum.Subscription, error)); ok { + return rf(ctx, ch) + } + if rf, ok := ret.Get(0).(func(context.Context, chan<- *types.Header) ethereum.Subscription); ok { + r0 = rf(ctx, ch) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(ethereum.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, chan<- *types.Header) error); ok { + r1 = rf(ctx, ch) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_SubscribeNewHead_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubscribeNewHead' +type EthClienter_SubscribeNewHead_Call struct { + *mock.Call +} + +// SubscribeNewHead is a helper method to define mock.On call +// - ctx context.Context +// - ch chan<- *types.Header +func (_e *EthClienter_Expecter) SubscribeNewHead(ctx interface{}, ch interface{}) *EthClienter_SubscribeNewHead_Call { + return &EthClienter_SubscribeNewHead_Call{Call: _e.mock.On("SubscribeNewHead", ctx, ch)} +} + +func (_c *EthClienter_SubscribeNewHead_Call) Run(run func(ctx context.Context, ch chan<- *types.Header)) *EthClienter_SubscribeNewHead_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(chan<- *types.Header)) + }) + return _c +} + +func (_c *EthClienter_SubscribeNewHead_Call) Return(_a0 ethereum.Subscription, _a1 error) *EthClienter_SubscribeNewHead_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_SubscribeNewHead_Call) RunAndReturn(run func(context.Context, chan<- *types.Header) (ethereum.Subscription, error)) *EthClienter_SubscribeNewHead_Call { + _c.Call.Return(run) + return _c +} + +// SuggestGasPrice provides a mock function with given fields: ctx +func (_m *EthClienter) SuggestGasPrice(ctx context.Context) (*big.Int, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for SuggestGasPrice") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*big.Int, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *big.Int); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_SuggestGasPrice_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SuggestGasPrice' +type EthClienter_SuggestGasPrice_Call struct { + *mock.Call +} + +// SuggestGasPrice is a helper method to define mock.On call +// - ctx context.Context +func (_e *EthClienter_Expecter) SuggestGasPrice(ctx interface{}) *EthClienter_SuggestGasPrice_Call { + return &EthClienter_SuggestGasPrice_Call{Call: _e.mock.On("SuggestGasPrice", ctx)} +} + +func (_c *EthClienter_SuggestGasPrice_Call) Run(run func(ctx context.Context)) *EthClienter_SuggestGasPrice_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *EthClienter_SuggestGasPrice_Call) Return(_a0 *big.Int, _a1 error) *EthClienter_SuggestGasPrice_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_SuggestGasPrice_Call) RunAndReturn(run func(context.Context) (*big.Int, error)) *EthClienter_SuggestGasPrice_Call { + _c.Call.Return(run) + return _c +} + +// SuggestGasTipCap provides a mock function with given fields: ctx +func (_m *EthClienter) SuggestGasTipCap(ctx context.Context) (*big.Int, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for SuggestGasTipCap") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*big.Int, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *big.Int); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_SuggestGasTipCap_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SuggestGasTipCap' +type EthClienter_SuggestGasTipCap_Call struct { + *mock.Call +} + +// SuggestGasTipCap is a helper method to define mock.On call +// - ctx context.Context +func (_e *EthClienter_Expecter) SuggestGasTipCap(ctx interface{}) *EthClienter_SuggestGasTipCap_Call { + return &EthClienter_SuggestGasTipCap_Call{Call: _e.mock.On("SuggestGasTipCap", ctx)} +} + +func (_c *EthClienter_SuggestGasTipCap_Call) Run(run func(ctx context.Context)) *EthClienter_SuggestGasTipCap_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *EthClienter_SuggestGasTipCap_Call) Return(_a0 *big.Int, _a1 error) *EthClienter_SuggestGasTipCap_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_SuggestGasTipCap_Call) RunAndReturn(run func(context.Context) (*big.Int, error)) *EthClienter_SuggestGasTipCap_Call { + _c.Call.Return(run) + return _c +} + +// TransactionCount provides a mock function with given fields: ctx, blockHash +func (_m *EthClienter) TransactionCount(ctx context.Context, blockHash common.Hash) (uint, error) { + ret := _m.Called(ctx, blockHash) + + if len(ret) == 0 { + panic("no return value specified for TransactionCount") + } + + var r0 uint + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (uint, error)); ok { + return rf(ctx, blockHash) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) uint); ok { + r0 = rf(ctx, blockHash) + } else { + r0 = ret.Get(0).(uint) + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(ctx, blockHash) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_TransactionCount_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'TransactionCount' +type EthClienter_TransactionCount_Call struct { + *mock.Call +} + +// TransactionCount is a helper method to define mock.On call +// - ctx context.Context +// - blockHash common.Hash +func (_e *EthClienter_Expecter) TransactionCount(ctx interface{}, blockHash interface{}) *EthClienter_TransactionCount_Call { + return &EthClienter_TransactionCount_Call{Call: _e.mock.On("TransactionCount", ctx, blockHash)} +} + +func (_c *EthClienter_TransactionCount_Call) Run(run func(ctx context.Context, blockHash common.Hash)) *EthClienter_TransactionCount_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Hash)) + }) + return _c +} + +func (_c *EthClienter_TransactionCount_Call) Return(_a0 uint, _a1 error) *EthClienter_TransactionCount_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_TransactionCount_Call) RunAndReturn(run func(context.Context, common.Hash) (uint, error)) *EthClienter_TransactionCount_Call { + _c.Call.Return(run) + return _c +} + +// TransactionInBlock provides a mock function with given fields: ctx, blockHash, index +func (_m *EthClienter) TransactionInBlock(ctx context.Context, blockHash common.Hash, index uint) (*types.Transaction, error) { + ret := _m.Called(ctx, blockHash, index) + + if len(ret) == 0 { + panic("no return value specified for TransactionInBlock") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash, uint) (*types.Transaction, error)); ok { + return rf(ctx, blockHash, index) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash, uint) *types.Transaction); ok { + r0 = rf(ctx, blockHash, index) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash, uint) error); ok { + r1 = rf(ctx, blockHash, index) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_TransactionInBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'TransactionInBlock' +type EthClienter_TransactionInBlock_Call struct { + *mock.Call +} + +// TransactionInBlock is a helper method to define mock.On call +// - ctx context.Context +// - blockHash common.Hash +// - index uint +func (_e *EthClienter_Expecter) TransactionInBlock(ctx interface{}, blockHash interface{}, index interface{}) *EthClienter_TransactionInBlock_Call { + return &EthClienter_TransactionInBlock_Call{Call: _e.mock.On("TransactionInBlock", ctx, blockHash, index)} +} + +func (_c *EthClienter_TransactionInBlock_Call) Run(run func(ctx context.Context, blockHash common.Hash, index uint)) *EthClienter_TransactionInBlock_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Hash), args[2].(uint)) + }) + return _c +} + +func (_c *EthClienter_TransactionInBlock_Call) Return(_a0 *types.Transaction, _a1 error) *EthClienter_TransactionInBlock_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_TransactionInBlock_Call) RunAndReturn(run func(context.Context, common.Hash, uint) (*types.Transaction, error)) *EthClienter_TransactionInBlock_Call { + _c.Call.Return(run) + return _c +} + +// NewEthClienter creates a new instance of EthClienter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEthClienter(t interface { + mock.TestingT + Cleanup(func()) +}) *EthClienter { + mock := &EthClienter{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/bridgesync/mocks/reorg_detector.go b/bridgesync/mocks/reorg_detector.go new file mode 100644 index 00000000..d24f4b83 --- /dev/null +++ b/bridgesync/mocks/reorg_detector.go @@ -0,0 +1,147 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks_bridgesync + +import ( + context "context" + + common "github.com/ethereum/go-ethereum/common" + + mock "github.com/stretchr/testify/mock" + + reorgdetector "github.com/0xPolygon/cdk/reorgdetector" +) + +// ReorgDetector is an autogenerated mock type for the ReorgDetector type +type ReorgDetector struct { + mock.Mock +} + +type ReorgDetector_Expecter struct { + mock *mock.Mock +} + +func (_m *ReorgDetector) EXPECT() *ReorgDetector_Expecter { + return &ReorgDetector_Expecter{mock: &_m.Mock} +} + +// AddBlockToTrack provides a mock function with given fields: ctx, id, blockNum, blockHash +func (_m *ReorgDetector) AddBlockToTrack(ctx context.Context, id string, blockNum uint64, blockHash common.Hash) error { + ret := _m.Called(ctx, id, blockNum, blockHash) + + if len(ret) == 0 { + panic("no return value specified for AddBlockToTrack") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, uint64, common.Hash) error); ok { + r0 = rf(ctx, id, blockNum, blockHash) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ReorgDetector_AddBlockToTrack_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddBlockToTrack' +type ReorgDetector_AddBlockToTrack_Call struct { + *mock.Call +} + +// AddBlockToTrack is a helper method to define mock.On call +// - ctx context.Context +// - id string +// - blockNum uint64 +// - blockHash common.Hash +func (_e *ReorgDetector_Expecter) AddBlockToTrack(ctx interface{}, id interface{}, blockNum interface{}, blockHash interface{}) *ReorgDetector_AddBlockToTrack_Call { + return &ReorgDetector_AddBlockToTrack_Call{Call: _e.mock.On("AddBlockToTrack", ctx, id, blockNum, blockHash)} +} + +func (_c *ReorgDetector_AddBlockToTrack_Call) Run(run func(ctx context.Context, id string, blockNum uint64, blockHash common.Hash)) *ReorgDetector_AddBlockToTrack_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(uint64), args[3].(common.Hash)) + }) + return _c +} + +func (_c *ReorgDetector_AddBlockToTrack_Call) Return(_a0 error) *ReorgDetector_AddBlockToTrack_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *ReorgDetector_AddBlockToTrack_Call) RunAndReturn(run func(context.Context, string, uint64, common.Hash) error) *ReorgDetector_AddBlockToTrack_Call { + _c.Call.Return(run) + return _c +} + +// Subscribe provides a mock function with given fields: id +func (_m *ReorgDetector) Subscribe(id string) (*reorgdetector.Subscription, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for Subscribe") + } + + var r0 *reorgdetector.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(string) (*reorgdetector.Subscription, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(string) *reorgdetector.Subscription); ok { + r0 = rf(id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*reorgdetector.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ReorgDetector_Subscribe_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Subscribe' +type ReorgDetector_Subscribe_Call struct { + *mock.Call +} + +// Subscribe is a helper method to define mock.On call +// - id string +func (_e *ReorgDetector_Expecter) Subscribe(id interface{}) *ReorgDetector_Subscribe_Call { + return &ReorgDetector_Subscribe_Call{Call: _e.mock.On("Subscribe", id)} +} + +func (_c *ReorgDetector_Subscribe_Call) Run(run func(id string)) *ReorgDetector_Subscribe_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string)) + }) + return _c +} + +func (_c *ReorgDetector_Subscribe_Call) Return(_a0 *reorgdetector.Subscription, _a1 error) *ReorgDetector_Subscribe_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ReorgDetector_Subscribe_Call) RunAndReturn(run func(string) (*reorgdetector.Subscription, error)) *ReorgDetector_Subscribe_Call { + _c.Call.Return(run) + return _c +} + +// NewReorgDetector creates a new instance of ReorgDetector. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewReorgDetector(t interface { + mock.TestingT + Cleanup(func()) +}) *ReorgDetector { + mock := &ReorgDetector{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/bridgesync/processor.go b/bridgesync/processor.go index e4ba5423..e8a79c1f 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -20,9 +20,14 @@ import ( _ "modernc.org/sqlite" ) +const ( + globalIndexPartSize = 4 + globalIndexMaxSize = 9 +) + var ( - // ErrBlockNotProcessed indicates that the given block(s) have not been processed yet. - ErrBlockNotProcessed = errors.New("given block(s) have not been processed yet") + // errBlockNotProcessedFormat indicates that the given block(s) have not been processed yet. + errBlockNotProcessedFormat = fmt.Sprintf("block %%d not processed, last processed: %%d") ) // Bridge is the representation of a bridge event @@ -93,10 +98,15 @@ type Event struct { Claim *Claim } +type BridgeContractor interface { + LastUpdatedDepositCount(ctx context.Context, BlockNumber uint64) (uint32, error) +} + type processor struct { - db *sql.DB - exitTree *tree.AppendOnlyTree - log *log.Logger + db *sql.DB + exitTree *tree.AppendOnlyTree + log *log.Logger + bridgeContract BridgeContractor } func newProcessor(dbPath, loggerPrefix string) (*processor, error) { @@ -116,6 +126,11 @@ func newProcessor(dbPath, loggerPrefix string) (*processor, error) { log: logger, }, nil } +func (p *processor) GetBridgesPublished( + ctx context.Context, fromBlock, toBlock uint64, +) ([]Bridge, error) { + return p.GetBridges(ctx, fromBlock, toBlock) +} func (p *processor) GetBridges( ctx context.Context, fromBlock, toBlock uint64, @@ -196,7 +211,7 @@ func (p *processor) isBlockProcessed(tx db.Querier, blockNum uint64) error { return err } if lpb < blockNum { - return ErrBlockNotProcessed + return fmt.Errorf(errBlockNotProcessedFormat, blockNum, lpb) } return nil } @@ -300,7 +315,7 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { func GenerateGlobalIndex(mainnetFlag bool, rollupIndex uint32, localExitRootIndex uint32) *big.Int { var ( globalIndexBytes []byte - buf [4]byte + buf [globalIndexPartSize]byte ) if mainnetFlag { globalIndexBytes = append(globalIndexBytes, big.NewInt(1).Bytes()...) @@ -313,5 +328,52 @@ func GenerateGlobalIndex(mainnetFlag bool, rollupIndex uint32, localExitRootInde leri := big.NewInt(0).SetUint64(uint64(localExitRootIndex)).FillBytes(buf[:]) globalIndexBytes = append(globalIndexBytes, leri...) - return big.NewInt(0).SetBytes(globalIndexBytes) + result := big.NewInt(0).SetBytes(globalIndexBytes) + + return result +} + +// Decodes global index to its three parts: +// 1. mainnetFlag - first byte +// 2. rollupIndex - next 4 bytes +// 3. localExitRootIndex - last 4 bytes +// NOTE - mainnet flag is not in the global index bytes if it is false +// NOTE - rollup index is 0 if mainnet flag is true +// NOTE - rollup index is not in the global index bytes if mainnet flag is false and rollup index is 0 +func DecodeGlobalIndex(globalIndex *big.Int) (mainnetFlag bool, + rollupIndex uint32, localExitRootIndex uint32, err error) { + globalIndexBytes := globalIndex.Bytes() + l := len(globalIndexBytes) + if l > globalIndexMaxSize { + return false, 0, 0, errors.New("invalid global index length") + } + + if l == 0 { + // false, 0, 0 + return + } + + if l == globalIndexMaxSize { + // true, rollupIndex, localExitRootIndex + mainnetFlag = true + } + + localExitRootFromIdx := l - globalIndexPartSize + if localExitRootFromIdx < 0 { + localExitRootFromIdx = 0 + } + + rollupIndexFromIdx := localExitRootFromIdx - globalIndexPartSize + if rollupIndexFromIdx < 0 { + rollupIndexFromIdx = 0 + } + + rollupIndex = convertBytesToUint32(globalIndexBytes[rollupIndexFromIdx:localExitRootFromIdx]) + localExitRootIndex = convertBytesToUint32(globalIndexBytes[localExitRootFromIdx:]) + + return +} + +func convertBytesToUint32(bytes []byte) uint32 { + return uint32(big.NewInt(0).SetBytes(bytes).Uint64()) } diff --git a/bridgesync/processor_test.go b/bridgesync/processor_test.go index 2ff03c76..ab31f17d 100644 --- a/bridgesync/processor_test.go +++ b/bridgesync/processor_test.go @@ -3,6 +3,7 @@ package bridgesync import ( "context" "encoding/json" + "errors" "fmt" "math/big" "os" @@ -11,13 +12,71 @@ import ( "testing" migrationsBridge "github.com/0xPolygon/cdk/bridgesync/migrations" + "github.com/0xPolygon/cdk/db" "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sync" "github.com/0xPolygon/cdk/tree/testvectors" + "github.com/0xPolygon/cdk/tree/types" "github.com/ethereum/go-ethereum/common" + "github.com/russross/meddler" "github.com/stretchr/testify/require" ) +func TestBigIntString(t *testing.T) { + globalIndex := GenerateGlobalIndex(true, 0, 1093) + fmt.Println(globalIndex.String()) + + _, ok := new(big.Int).SetString(globalIndex.String(), 10) + require.True(t, ok) + + dbPath := path.Join(t.TempDir(), "file::memory:?cache=shared") + + err := migrationsBridge.RunMigrations(dbPath) + require.NoError(t, err) + db, err := db.NewSQLiteDB(dbPath) + require.NoError(t, err) + + ctx := context.Background() + tx, err := db.BeginTx(ctx, nil) + require.NoError(t, err) + + claim := &Claim{ + BlockNum: 1, + BlockPos: 0, + GlobalIndex: GenerateGlobalIndex(true, 0, 1093), + OriginNetwork: 11, + Amount: big.NewInt(11), + OriginAddress: common.HexToAddress("0x11"), + DestinationAddress: common.HexToAddress("0x11"), + ProofLocalExitRoot: types.Proof{}, + ProofRollupExitRoot: types.Proof{}, + MainnetExitRoot: common.Hash{}, + RollupExitRoot: common.Hash{}, + GlobalExitRoot: common.Hash{}, + DestinationNetwork: 12, + } + + _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, claim.BlockNum) + require.NoError(t, err) + require.NoError(t, meddler.Insert(tx, "claim", claim)) + + require.NoError(t, tx.Commit()) + + tx, err = db.BeginTx(ctx, nil) + require.NoError(t, err) + + rows, err := tx.Query(` + SELECT * FROM claim + WHERE block_num >= $1 AND block_num <= $2; + `, claim.BlockNum, claim.BlockNum) + require.NoError(t, err) + + claimsFromDB := []*Claim{} + require.NoError(t, meddler.ScanAll(rows, &claimsFromDB)) + require.Len(t, claimsFromDB, 1) + require.Equal(t, claim, claimsFromDB[0]) +} + func TestProceessor(t *testing.T) { path := path.Join(t.TempDir(), "file::memory:?cache=shared") log.Debugf("sqlite path: %s", path) @@ -53,7 +112,7 @@ func TestProceessor(t *testing.T) { fromBlock: 0, toBlock: 2, expectedClaims: nil, - expectedErr: ErrBlockNotProcessed, + expectedErr: fmt.Errorf(errBlockNotProcessedFormat, 2, 0), }, &getBridges{ p: p, @@ -62,7 +121,7 @@ func TestProceessor(t *testing.T) { fromBlock: 0, toBlock: 2, expectedBridges: nil, - expectedErr: ErrBlockNotProcessed, + expectedErr: fmt.Errorf(errBlockNotProcessedFormat, 2, 0), }, &processBlockAction{ p: p, @@ -85,7 +144,7 @@ func TestProceessor(t *testing.T) { fromBlock: 0, toBlock: 2, expectedClaims: nil, - expectedErr: ErrBlockNotProcessed, + expectedErr: fmt.Errorf(errBlockNotProcessedFormat, 2, 1), }, &getBridges{ p: p, @@ -94,7 +153,7 @@ func TestProceessor(t *testing.T) { fromBlock: 0, toBlock: 2, expectedBridges: nil, - expectedErr: ErrBlockNotProcessed, + expectedErr: fmt.Errorf(errBlockNotProcessedFormat, 2, 1), }, &getClaims{ p: p, @@ -128,7 +187,7 @@ func TestProceessor(t *testing.T) { fromBlock: 0, toBlock: 2, expectedClaims: nil, - expectedErr: ErrBlockNotProcessed, + expectedErr: fmt.Errorf(errBlockNotProcessedFormat, 2, 0), }, &getBridges{ p: p, @@ -137,7 +196,7 @@ func TestProceessor(t *testing.T) { fromBlock: 0, toBlock: 2, expectedBridges: nil, - expectedErr: ErrBlockNotProcessed, + expectedErr: fmt.Errorf(errBlockNotProcessedFormat, 2, 0), }, &processBlockAction{ p: p, @@ -582,3 +641,219 @@ func TestHashBridge(t *testing.T) { }) } } + +func TestDecodeGlobalIndex(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + globalIndex *big.Int + expectedMainnetFlag bool + expectedRollupIndex uint32 + expectedLocalIndex uint32 + expectedErr error + }{ + { + name: "Mainnet flag true, rollup index 0", + globalIndex: GenerateGlobalIndex(true, 0, 2), + expectedMainnetFlag: true, + expectedRollupIndex: 0, + expectedLocalIndex: 2, + expectedErr: nil, + }, + { + name: "Mainnet flag true, indexes 0", + globalIndex: GenerateGlobalIndex(true, 0, 0), + expectedMainnetFlag: true, + expectedRollupIndex: 0, + expectedLocalIndex: 0, + expectedErr: nil, + }, + { + name: "Mainnet flag false, rollup index 0", + globalIndex: GenerateGlobalIndex(false, 0, 2), + expectedMainnetFlag: false, + expectedRollupIndex: 0, + expectedLocalIndex: 2, + expectedErr: nil, + }, + { + name: "Mainnet flag false, rollup index non-zero", + globalIndex: GenerateGlobalIndex(false, 11, 0), + expectedMainnetFlag: false, + expectedRollupIndex: 11, + expectedLocalIndex: 0, + expectedErr: nil, + }, + { + name: "Mainnet flag false, indexes 0", + globalIndex: GenerateGlobalIndex(false, 0, 0), + expectedMainnetFlag: false, + expectedRollupIndex: 0, + expectedLocalIndex: 0, + expectedErr: nil, + }, + { + name: "Mainnet flag false, indexes non zero", + globalIndex: GenerateGlobalIndex(false, 1231, 111234), + expectedMainnetFlag: false, + expectedRollupIndex: 1231, + expectedLocalIndex: 111234, + expectedErr: nil, + }, + { + name: "Invalid global index length", + globalIndex: big.NewInt(0).SetBytes([]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}), + expectedMainnetFlag: false, + expectedRollupIndex: 0, + expectedLocalIndex: 0, + expectedErr: errors.New("invalid global index length"), + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + mainnetFlag, rollupIndex, localExitRootIndex, err := DecodeGlobalIndex(tt.globalIndex) + if tt.expectedErr != nil { + require.EqualError(t, err, tt.expectedErr.Error()) + } else { + require.NoError(t, err) + } + require.Equal(t, tt.expectedMainnetFlag, mainnetFlag) + require.Equal(t, tt.expectedRollupIndex, rollupIndex) + require.Equal(t, tt.expectedLocalIndex, localExitRootIndex) + }) + } +} + +func TestInsertAndGetClaim(t *testing.T) { + path := path.Join(t.TempDir(), "file::memory:?cache=shared") + log.Debugf("sqlite path: %s", path) + err := migrationsBridge.RunMigrations(path) + require.NoError(t, err) + p, err := newProcessor(path, "foo") + require.NoError(t, err) + + tx, err := p.db.BeginTx(context.Background(), nil) + require.NoError(t, err) + + // insert test claim + testClaim := &Claim{ + BlockNum: 1, + BlockPos: 0, + GlobalIndex: GenerateGlobalIndex(true, 0, 1093), + OriginNetwork: 11, + OriginAddress: common.HexToAddress("0x11"), + DestinationAddress: common.HexToAddress("0x11"), + Amount: big.NewInt(11), + ProofLocalExitRoot: types.Proof{}, + ProofRollupExitRoot: types.Proof{}, + MainnetExitRoot: common.Hash{}, + RollupExitRoot: common.Hash{}, + GlobalExitRoot: common.Hash{}, + DestinationNetwork: 12, + Metadata: []byte("0x11"), + IsMessage: false, + } + + _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, testClaim.BlockNum) + require.NoError(t, err) + require.NoError(t, meddler.Insert(tx, "claim", testClaim)) + + require.NoError(t, tx.Commit()) + + // get test claim + claims, err := p.GetClaims(context.Background(), 1, 1) + require.NoError(t, err) + require.Len(t, claims, 1) + require.Equal(t, testClaim, &claims[0]) +} + +type mockBridgeContract struct { + lastUpdatedDepositCount uint32 + err error +} + +func (m *mockBridgeContract) LastUpdatedDepositCount(ctx context.Context, blockNumber uint64) (uint32, error) { + return m.lastUpdatedDepositCount, m.err +} + +func TestGetBridgesPublished(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + fromBlock uint64 + toBlock uint64 + bridges []Bridge + lastUpdatedDepositCount uint32 + expectedBridges []Bridge + expectedError error + }{ + { + name: "no bridges", + fromBlock: 1, + toBlock: 10, + bridges: []Bridge{}, + lastUpdatedDepositCount: 0, + expectedBridges: []Bridge{}, + expectedError: nil, + }, + { + name: "bridges within deposit count", + fromBlock: 1, + toBlock: 10, + bridges: []Bridge{ + {DepositCount: 1, BlockNum: 1, Amount: big.NewInt(1)}, + {DepositCount: 2, BlockNum: 2, Amount: big.NewInt(1)}, + }, + lastUpdatedDepositCount: 2, + expectedBridges: []Bridge{ + {DepositCount: 1, BlockNum: 1, Amount: big.NewInt(1)}, + {DepositCount: 2, BlockNum: 2, Amount: big.NewInt(1)}, + }, + expectedError: nil, + }, + } + + for _, tc := range testCases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + path := path.Join(t.TempDir(), "file::memory:?cache=shared") + require.NoError(t, migrationsBridge.RunMigrations(path)) + p, err := newProcessor(path, "foo") + require.NoError(t, err) + + tx, err := p.db.BeginTx(context.Background(), nil) + require.NoError(t, err) + + for i := tc.fromBlock; i <= tc.toBlock; i++ { + _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, i) + require.NoError(t, err) + } + + for _, bridge := range tc.bridges { + require.NoError(t, meddler.Insert(tx, "bridge", &bridge)) + } + + require.NoError(t, tx.Commit()) + + ctx := context.Background() + bridges, err := p.GetBridgesPublished(ctx, tc.fromBlock, tc.toBlock) + + if tc.expectedError != nil { + require.Equal(t, tc.expectedError, err) + } else { + require.NoError(t, err) + require.Equal(t, tc.expectedBridges, bridges) + } + }) + } +} diff --git a/claimsponsor/e2e_test.go b/claimsponsor/e2e_test.go index b4fce499..426d7b3e 100644 --- a/claimsponsor/e2e_test.go +++ b/claimsponsor/e2e_test.go @@ -26,7 +26,7 @@ func TestE2EL1toEVML2(t *testing.T) { env := aggoraclehelpers.SetupAggoracleWithEVMChain(t) dbPathBridgeSyncL1 := path.Join(t.TempDir(), "file::memory:?cache=shared") testClient := helpers.TestClient{ClientRenamed: env.L1Client.Client()} - bridgeSyncL1, err := bridgesync.NewL1(ctx, dbPathBridgeSyncL1, env.BridgeL1Addr, 10, etherman.LatestBlock, env.ReorgDetector, testClient, 0, time.Millisecond*10, 0, 0) + bridgeSyncL1, err := bridgesync.NewL1(ctx, dbPathBridgeSyncL1, env.BridgeL1Addr, 10, etherman.LatestBlock, env.ReorgDetector, testClient, 0, time.Millisecond*10, 0, 0, 1) require.NoError(t, err) go bridgeSyncL1.Start(ctx) diff --git a/cmd/main.go b/cmd/main.go index 23c01783..15b0fdc6 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -41,7 +41,8 @@ var ( Aliases: []string{"co"}, Usage: "List of components to run", Required: false, - Value: cli.NewStringSlice(common.SEQUENCE_SENDER, common.AGGREGATOR, common.AGGORACLE, common.RPC), + Value: cli.NewStringSlice(common.SEQUENCE_SENDER, common.AGGREGATOR, + common.AGGORACLE, common.RPC, common.AGGSENDER), } saveConfigFlag = cli.StringFlag{ Name: config.FlagSaveConfigPath, diff --git a/cmd/run.go b/cmd/run.go index 4bd4dd0d..c30da739 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -12,10 +12,12 @@ import ( zkevm "github.com/0xPolygon/cdk" dataCommitteeClient "github.com/0xPolygon/cdk-data-availability/client" jRPC "github.com/0xPolygon/cdk-rpc/rpc" + "github.com/0xPolygon/cdk/agglayer" "github.com/0xPolygon/cdk/aggoracle" "github.com/0xPolygon/cdk/aggoracle/chaingersender" "github.com/0xPolygon/cdk/aggregator" "github.com/0xPolygon/cdk/aggregator/db" + "github.com/0xPolygon/cdk/aggsender" "github.com/0xPolygon/cdk/bridgesync" "github.com/0xPolygon/cdk/claimsponsor" cdkcommon "github.com/0xPolygon/cdk/common" @@ -61,7 +63,7 @@ func start(cliCtx *cli.Context) error { components := cliCtx.StringSlice(config.FlagComponents) l1Client := runL1ClientIfNeeded(components, c.Etherman.URL) - l2Client := runL2ClientIfNeeded(components, c.AggOracle.EVMSender.URLRPCL2) + l2Client := runL2ClientIfNeeded(components, getL2RPCUrl(c)) reorgDetectorL1, errChanL1 := runReorgDetectorL1IfNeeded(cliCtx.Context, components, l1Client, &c.ReorgDetectorL1) go func() { if err := <-errChanL1; err != nil { @@ -119,6 +121,18 @@ func start(cliCtx *cli.Context) error { log.Fatal(err) } }() + case cdkcommon.AGGSENDER: + aggsender, err := createAggSender( + cliCtx.Context, + c.AggSender, + l1InfoTreeSync, + l2BridgeSync, + ) + if err != nil { + log.Fatal(err) + } + + go aggsender.Start(cliCtx.Context) } } @@ -127,6 +141,18 @@ func start(cliCtx *cli.Context) error { return nil } +func createAggSender( + ctx context.Context, + cfg aggsender.Config, + l1InfoTreeSync *l1infotreesync.L1InfoTreeSync, + l2Syncer *bridgesync.BridgeSync, +) (*aggsender.AggSender, error) { + logger := log.WithFields("module", cdkcommon.AGGSENDER) + agglayerClient := agglayer.NewAggLayerClient(cfg.AggLayerURL) + + return aggsender.New(ctx, logger, cfg, agglayerClient, l1InfoTreeSync, l2Syncer) +} + func createAggregator(ctx context.Context, c config.Config, runMigrations bool) *aggregator.Aggregator { logger := log.WithFields("module", cdkcommon.AGGREGATOR) // Migrations @@ -479,7 +505,8 @@ func runL1InfoTreeSyncerIfNeeded( l1Client *ethclient.Client, reorgDetector *reorgdetector.ReorgDetector, ) *l1infotreesync.L1InfoTreeSync { - if !isNeeded([]string{cdkcommon.AGGORACLE, cdkcommon.RPC, cdkcommon.SEQUENCE_SENDER}, components) { + if !isNeeded([]string{cdkcommon.AGGORACLE, cdkcommon.RPC, + cdkcommon.SEQUENCE_SENDER, cdkcommon.AGGSENDER}, components) { return nil } l1InfoTreeSync, err := l1infotreesync.New( @@ -509,6 +536,7 @@ func runL1ClientIfNeeded(components []string, urlRPCL1 string) *ethclient.Client if !isNeeded([]string{ cdkcommon.SEQUENCE_SENDER, cdkcommon.AGGREGATOR, cdkcommon.AGGORACLE, cdkcommon.RPC, + cdkcommon.AGGSENDER, }, components) { return nil } @@ -522,10 +550,11 @@ func runL1ClientIfNeeded(components []string, urlRPCL1 string) *ethclient.Client } func runL2ClientIfNeeded(components []string, urlRPCL2 string) *ethclient.Client { - if !isNeeded([]string{cdkcommon.AGGORACLE, cdkcommon.RPC}, components) { + if !isNeeded([]string{cdkcommon.AGGORACLE, cdkcommon.RPC, cdkcommon.AGGSENDER}, components) { return nil } - log.Debugf("dialing L2 client at: %s", urlRPCL2) + + log.Infof("dialing L2 client at: %s", urlRPCL2) l2CLient, err := ethclient.Dial(urlRPCL2) if err != nil { log.Fatal(err) @@ -542,7 +571,7 @@ func runReorgDetectorL1IfNeeded( ) (*reorgdetector.ReorgDetector, chan error) { if !isNeeded([]string{ cdkcommon.SEQUENCE_SENDER, cdkcommon.AGGREGATOR, - cdkcommon.AGGORACLE, cdkcommon.RPC}, + cdkcommon.AGGORACLE, cdkcommon.RPC, cdkcommon.AGGSENDER}, components) { return nil, nil } @@ -565,7 +594,7 @@ func runReorgDetectorL2IfNeeded( l2Client *ethclient.Client, cfg *reorgdetector.Config, ) (*reorgdetector.ReorgDetector, chan error) { - if !isNeeded([]string{cdkcommon.AGGORACLE, cdkcommon.RPC}, components) { + if !isNeeded([]string{cdkcommon.AGGORACLE, cdkcommon.RPC, cdkcommon.AGGSENDER}, components) { return nil, nil } rd := newReorgDetector(cfg, l2Client) @@ -675,6 +704,7 @@ func runBridgeSyncL1IfNeeded( cfg.WaitForNewBlocksPeriod.Duration, cfg.RetryAfterErrorPeriod.Duration, cfg.MaxRetryAttemptsAfterError, + cfg.OriginNetwork, ) if err != nil { log.Fatalf("error creating bridgeSyncL1: %s", err) @@ -691,10 +721,10 @@ func runBridgeSyncL2IfNeeded( reorgDetectorL2 *reorgdetector.ReorgDetector, l2Client *ethclient.Client, ) *bridgesync.BridgeSync { - // TODO: will be needed by AGGSENDER - if !isNeeded([]string{cdkcommon.RPC}, components) { + if !isNeeded([]string{cdkcommon.RPC, cdkcommon.AGGSENDER}, components) { return nil } + bridgeSyncL2, err := bridgesync.NewL2( ctx, cfg.DBPath, @@ -707,6 +737,7 @@ func runBridgeSyncL2IfNeeded( cfg.WaitForNewBlocksPeriod.Duration, cfg.RetryAfterErrorPeriod.Duration, cfg.MaxRetryAttemptsAfterError, + cfg.OriginNetwork, ) if err != nil { log.Fatalf("error creating bridgeSyncL2: %s", err) @@ -745,3 +776,11 @@ func createRPC( return jRPC.NewServer(cfg, services, jRPC.WithLogger(logger.GetSugaredLogger())) } + +func getL2RPCUrl(c *config.Config) string { + if c.AggSender.URLRPCL2 != "" { + return c.AggSender.URLRPCL2 + } + + return c.AggOracle.EVMSender.URLRPCL2 +} diff --git a/common/common.go b/common/common.go index cd5b5d70..c74f56e4 100644 --- a/common/common.go +++ b/common/common.go @@ -1,10 +1,15 @@ package common import ( + "crypto/ecdsa" "encoding/binary" "math/big" + "os" + "path/filepath" + "github.com/0xPolygon/cdk/config/types" "github.com/0xPolygon/cdk/log" + "github.com/ethereum/go-ethereum/accounts/keystore" "github.com/ethereum/go-ethereum/common" "github.com/iden3/go-iden3-crypto/keccak256" ) @@ -88,3 +93,19 @@ func CalculateAccInputHash( return common.BytesToHash(keccak256.Hash(v1, v2, v3, v4, v5, v6)) } + +// NewKeyFromKeystore creates a private key from a keystore file +func NewKeyFromKeystore(cfg types.KeystoreFileConfig) (*ecdsa.PrivateKey, error) { + if cfg.Path == "" && cfg.Password == "" { + return nil, nil + } + keystoreEncrypted, err := os.ReadFile(filepath.Clean(cfg.Path)) + if err != nil { + return nil, err + } + key, err := keystore.DecryptKey(keystoreEncrypted, cfg.Password) + if err != nil { + return nil, err + } + return key.PrivateKey, nil +} diff --git a/common/components.go b/common/components.go index 0c2df8d7..7ef9d285 100644 --- a/common/components.go +++ b/common/components.go @@ -13,4 +13,6 @@ const ( CLAIM_SPONSOR = "claim-sponsor" //nolint:stylecheck // PROVER name to identify the prover component PROVER = "prover" + // AGGSENDER name to identify the aggsender component + AGGSENDER = "aggsender" ) diff --git a/config/config.go b/config/config.go index b21ba971..9363b93b 100644 --- a/config/config.go +++ b/config/config.go @@ -10,6 +10,7 @@ import ( jRPC "github.com/0xPolygon/cdk-rpc/rpc" "github.com/0xPolygon/cdk/aggoracle" "github.com/0xPolygon/cdk/aggregator" + "github.com/0xPolygon/cdk/aggsender" "github.com/0xPolygon/cdk/bridgesync" "github.com/0xPolygon/cdk/claimsponsor" "github.com/0xPolygon/cdk/common" @@ -135,7 +136,6 @@ type Config struct { NetworkConfig NetworkConfig // Configuration of the sequence sender service SequenceSender sequencesender.Config - // Common Config that affects all the services Common common.Config // Configuration of the reorg detector service to be used for the L1 @@ -162,6 +162,9 @@ type Config struct { // LastGERSync is the config for the synchronizer in charge of syncing the last GER injected on L2. // Needed for the bridge service (RPC) LastGERSync lastgersync.Config + + // AggSender is the configuration of the agg sender service + AggSender aggsender.Config } // Load loads the configuration diff --git a/config/default.go b/config/default.go index 5e5fafcb..7f2ae8b6 100644 --- a/config/default.go +++ b/config/default.go @@ -7,7 +7,7 @@ L1URL = "http://localhost:8545" L2URL = "http://localhost:8123" L1AggOracleURL = "http://test-aggoracle-l1:8545" L2AggOracleURL = "http://test-aggoracle-l2:8545" - +AggLayerURL = "https://agglayer-dev.polygon.technology" ForkId = 9 ContractVersions = "elderberry" @@ -17,13 +17,12 @@ L2Coinbase = "0xfa3b44587990f97ba8b6ba7e230a5f0e95d14b3d" SequencerPrivateKeyPath = "/app/sequencer.keystore" SequencerPrivateKeyPassword = "test" WitnessURL = "http://localhost:8123" -AggLayerURL = "https://agglayer-dev.polygon.technology" AggregatorPrivateKeyPath = "/app/keystore/aggregator.keystore" AggregatorPrivateKeyPassword = "testonly" # Who send Proof to L1? AggLayer addr, or aggregator addr? SenderProofToL1Addr = "0x0000000000000000000000000000000000000000" - +polygonBridgeAddr = "0x0000000000000000000000000000000000000000" # This values can be override directly from genesis.json @@ -36,7 +35,7 @@ genesisBlockNumber = 0 polygonRollupManagerAddress = "0x0000000000000000000000000000000000000000" polTokenAddress = "0x0000000000000000000000000000000000000000" polygonZkEVMAddress = "0x0000000000000000000000000000000000000000" - polygonBridgeAddr = "0x0000000000000000000000000000000000000000" + [L2Config] GlobalExitRootAddr = "0x0000000000000000000000000000000000000000" @@ -265,7 +264,7 @@ WriteTimeout = "2s" MaxRequestsPerIPAndSecond = 10 [ClaimSponsor] -DBPath = "/{{PathRWData}}/claimsopnsor" +DBPath = "/{{PathRWData}}/claimsopnsor.sqlite" Enabled = true SenderAddr = "0xfa3b44587990f97ba8b6ba7e230a5f0e95d14b3d" BridgeAddrL2 = "0xB7098a13a48EcE087d3DA15b2D28eCE0f89819B8" @@ -297,28 +296,30 @@ GasOffset = 0 HTTPHeaders = [] [BridgeL1Sync] -DBPath = "{{PathRWData}}/bridgel1sync" +DBPath = "{{PathRWData}}/bridgel1sync.sqlite" BlockFinality = "LatestBlock" InitialBlockNum = 0 -BridgeAddr = "{{L1Config.polygonBridgeAddr}}" +BridgeAddr = "{{polygonBridgeAddr}}" SyncBlockChunkSize = 100 RetryAfterErrorPeriod = "1s" MaxRetryAttemptsAfterError = -1 WaitForNewBlocksPeriod = "3s" +OriginNetwork=0 [BridgeL2Sync] -DBPath = "{{PathRWData}}/bridgel2sync" +DBPath = "{{PathRWData}}/bridgel2sync.sqlite" BlockFinality = "LatestBlock" InitialBlockNum = 0 -BridgeAddr = "{{L1Config.polygonBridgeAddr}}" +BridgeAddr = "{{polygonBridgeAddr}}" SyncBlockChunkSize = 100 RetryAfterErrorPeriod = "1s" MaxRetryAttemptsAfterError = -1 WaitForNewBlocksPeriod = "3s" +OriginNetwork=1 [LastGERSync] # MDBX database path -DBPath = "{{PathRWData}}/lastgersync" +DBPath = "{{PathRWData}}/lastgersync.sqlite" BlockFinality = "LatestBlock" InitialBlockNum = 0 GlobalExitRootL2Addr = "{{L2Config.GlobalExitRootAddr}}" @@ -335,4 +336,12 @@ RollupManagerAddr = "{{L1Config.polygonRollupManagerAddress}}" GlobalExitRootManagerAddr = "{{L1Config.polygonZkEVMGlobalExitRootAddress}}" +[AggSender] +StoragePath = "{{PathRWData}}/aggsender.sqlite" +AggLayerURL = "{{AggLayerURL}}" +AggsenderPrivateKey = {Path = "{{SequencerPrivateKeyPath}}", Password = "{{SequencerPrivateKeyPassword}}"} +BlockGetInterval = "2s" +URLRPCL2="{{L2URL}}" +CheckSettledInterval = "2s" +SaveCertificatesToFiles = false ` diff --git a/l1infotree/tree.go b/l1infotree/tree.go index f3ad6d36..17258ba0 100644 --- a/l1infotree/tree.go +++ b/l1infotree/tree.go @@ -109,15 +109,17 @@ func (mt *L1InfoTree) ComputeMerkleProof(gerIndex uint32, leaves [][32]byte) ([] if len(leaves)%2 == 1 { leaves = append(leaves, mt.zeroHashes[h]) } - if index >= uint32(len(leaves)) { - siblings = append(siblings, mt.zeroHashes[h]) - } else { - if index%2 == 1 { // If it is odd + if index%2 == 1 { // If it is odd + siblings = append(siblings, leaves[index-1]) + } else if len(leaves) > 1 { // It is even + if index >= uint32(len(leaves)) { + // siblings = append(siblings, mt.zeroHashes[h]) siblings = append(siblings, leaves[index-1]) - } else { // It is even + } else { siblings = append(siblings, leaves[index+1]) } } + var ( nsi [][][]byte hashes [][32]byte diff --git a/l1infotree/tree_test.go b/l1infotree/tree_test.go index 6af4b8b3..a0fe9b97 100644 --- a/l1infotree/tree_test.go +++ b/l1infotree/tree_test.go @@ -3,6 +3,7 @@ package l1infotree_test import ( "encoding/hex" "encoding/json" + "fmt" "os" "testing" @@ -129,3 +130,56 @@ func TestAddLeaf2(t *testing.T) { require.Equal(t, testVector.NewRoot, newRoot) } } + +func TestAddLeaf2TestLastLeaf(t *testing.T) { + mt, err := l1infotree.NewL1InfoTree(log.GetDefaultLogger(), uint8(32), [][32]byte{}) + require.NoError(t, err) + leaves := [][32]byte{ + common.HexToHash("0x6a617315ffc0a6831d2de6331f8d3e053889e9385696c13f11853fdcba50e123"), + common.HexToHash("0x1cff355b898cf285bcc3f84a8d6ed51c19fe87ab654f4146f2dc7723a59fc741"), + } + siblings, root, err := mt.ComputeMerkleProof(2, leaves) + require.NoError(t, err) + fmt.Printf("Root: %s\n", root.String()) + for i := 0; i < len(siblings); i++ { + hash := common.BytesToHash(siblings[i][:]) + fmt.Printf("Sibling %d: %s\n", i, hash.String()) + } + expectedProof := []string{ + "0x1cff355b898cf285bcc3f84a8d6ed51c19fe87ab654f4146f2dc7723a59fc741", + "0x7ae3eca221dee534b82adffb8003ad3826ddf116132e4ff55c681ff723bc7e42", + "0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30", + "0x21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85", + "0xe58769b32a1beaf1ea27375a44095a0d1fb664ce2dd358e7fcbfb78c26a19344", + "0x0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d", + "0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968", + "0xffd70157e48063fc33c97a050f7f640233bf646cc98d9524c6b92bcf3ab56f83", + "0x9867cc5f7f196b93bae1e27e6320742445d290f2263827498b54fec539f756af", + "0xcefad4e508c098b9a7e1d8feb19955fb02ba9675585078710969d3440f5054e0", + "0xf9dc3e7fe016e050eff260334f18a5d4fe391d82092319f5964f2e2eb7c1c3a5", + "0xf8b13a49e282f609c317a833fb8d976d11517c571d1221a265d25af778ecf892", + "0x3490c6ceeb450aecdc82e28293031d10c7d73bf85e57bf041a97360aa2c5d99c", + "0xc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb", + "0x5c67add7c6caf302256adedf7ab114da0acfe870d449a3a489f781d659e8becc", + "0xda7bce9f4e8618b6bd2f4132ce798cdc7a60e7e1460a7299e3c6342a579626d2", + "0x2733e50f526ec2fa19a22b31e8ed50f23cd1fdf94c9154ed3a7609a2f1ff981f", + "0xe1d3b5c807b281e4683cc6d6315cf95b9ade8641defcb32372f1c126e398ef7a", + "0x5a2dce0a8a7f68bb74560f8f71837c2c2ebbcbf7fffb42ae1896f13f7c7479a0", + "0xb46a28b6f55540f89444f63de0378e3d121be09e06cc9ded1c20e65876d36aa0", + "0xc65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2", + "0xf4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd9", + "0x5a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e377", + "0x4df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652", + "0xcdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef", + "0x0abf5ac974a1ed57f4050aa510dd9c74f508277b39d7973bb2dfccc5eeb0618d", + "0xb8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d0", + "0x838c5655cb21c6cb83313b5a631175dff4963772cce9108188b34ac87c81c41e", + "0x662ee4dd2dd7b2bc707961b1e646c4047669dcb6584f0d8d770daf5d7e7deb2e", + "0x388ab20e2573d171a88108e79d820e98f26c0b84aa8b2f4aa4968dbb818ea322", + "0x93237c50ba75ee485f4c22adf2f741400bdf8d6a9cc7df7ecae576221665d735", + "0x8448818bb4ae4562849e949e17ac16e0be16688e156b5cf15e098c627c0056a9"} + for i := 0; i < len(siblings); i++ { + require.Equal(t, expectedProof[i], "0x"+hex.EncodeToString(siblings[i][:])) + } + require.Equal(t, "0xb85687d05a6bdccadcc1170a0e2bbba6855c35c984a0bc91697bc066bd38a338", root.String()) +} diff --git a/l1infotreesync/processor.go b/l1infotreesync/processor.go index e7115a60..2cd6190c 100644 --- a/l1infotreesync/processor.go +++ b/l1infotreesync/processor.go @@ -402,7 +402,7 @@ func (p *processor) GetInfoByGlobalExitRoot(ger common.Hash) (*L1InfoTreeLeaf, e SELECT * FROM l1info_leaf WHERE global_exit_root = $1 LIMIT 1; - `, ger.Hex()) + `, ger.String()) return info, db.ReturnErrNotFound(err) } diff --git a/l1infotreesync/processor_test.go b/l1infotreesync/processor_test.go index 52a81ce8..34c5daef 100644 --- a/l1infotreesync/processor_test.go +++ b/l1infotreesync/processor_test.go @@ -1,10 +1,15 @@ package l1infotreesync import ( + "fmt" "testing" "github.com/0xPolygon/cdk/db" + "github.com/0xPolygon/cdk/l1infotree" + "github.com/0xPolygon/cdk/l1infotreesync/migrations" + "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sync" + "github.com/0xPolygon/cdk/tree" "github.com/0xPolygon/cdk/tree/types" "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/require" @@ -124,8 +129,6 @@ func TestGetLatestInfoUntilBlockIfNotFoundReturnsErrNotFound(t *testing.T) { } func Test_processor_GetL1InfoTreeMerkleProof(t *testing.T) { - t.Parallel() - testTable := []struct { name string getProcessor func(t *testing.T) *processor @@ -184,8 +187,6 @@ func Test_processor_GetL1InfoTreeMerkleProof(t *testing.T) { tt := tt t.Run(tt.name, func(t *testing.T) { - t.Parallel() - p := tt.getProcessor(t) proof, root, err := p.GetL1InfoTreeMerkleProof(context.Background(), tt.idx) if tt.expectedErr != nil { @@ -267,3 +268,93 @@ func Test_processor_Reorg(t *testing.T) { }) } } + +func TestProofsFromDifferentTrees(t *testing.T) { + fmt.Println("aggregator L1InfoTree ===============================================") + + l1Tree, err := l1infotree.NewL1InfoTree(log.WithFields("test"), types.DefaultHeight, [][32]byte{}) + require.NoError(t, err) + + leaves := createTestLeaves(t, 2) + + aLeaves := make([][32]byte, len(leaves)) + for i, leaf := range leaves { + aLeaves[i] = l1infotree.HashLeafData( + leaf.GlobalExitRoot, + leaf.PreviousBlockHash, + leaf.Timestamp) + } + + aggregatorL1InfoTree, aggregatorRoot, err := l1Tree.ComputeMerkleProof(leaves[0].L1InfoTreeIndex, aLeaves) + require.NoError(t, err) + + aggregatorProof := types.Proof{} + for i, p := range aggregatorL1InfoTree { + aggregatorProof[i] = common.BytesToHash(p[:]) + } + + fmt.Println(aggregatorRoot) + fmt.Println(aggregatorProof) + fmt.Println("l1 info tree syncer L1InfoTree ===============================================") + + dbPath := "file:l1InfoTreeTest?mode=memory&cache=shared" + require.NoError(t, migrations.RunMigrations(dbPath)) + + dbe, err := db.NewSQLiteDB(dbPath) + require.NoError(t, err) + + l1InfoTree := tree.NewAppendOnlyTree(dbe, migrations.L1InfoTreePrefix) + + tx, err := db.NewTx(context.Background(), dbe) + require.NoError(t, err) + + for _, leaf := range leaves { + err = l1InfoTree.AddLeaf(tx, leaf.BlockNumber, leaf.BlockPosition, types.Leaf{ + Index: leaf.L1InfoTreeIndex, + Hash: leaf.Hash, + }) + + require.NoError(t, err) + } + + require.NoError(t, tx.Commit()) + + l1InfoTreeSyncerRoot, err := l1InfoTree.GetRootByIndex(context.Background(), leaves[1].L1InfoTreeIndex) + require.NoError(t, err) + l1InfoTreeSyncerProof, err := l1InfoTree.GetProof(context.Background(), leaves[0].L1InfoTreeIndex, l1InfoTreeSyncerRoot.Hash) + require.NoError(t, err) + for i, l := range aggregatorL1InfoTree { + require.Equal(t, common.Hash(l), l1InfoTreeSyncerProof[i]) + } + + fmt.Println(leaves[0].GlobalExitRoot) + fmt.Println(l1InfoTreeSyncerProof) + + require.Equal(t, aggregatorRoot, l1InfoTreeSyncerRoot.Hash) + require.Equal(t, aggregatorProof, l1InfoTreeSyncerProof) +} + +func createTestLeaves(t *testing.T, numOfLeaves int) []*L1InfoTreeLeaf { + t.Helper() + + leaves := make([]*L1InfoTreeLeaf, 0, numOfLeaves) + + for i := 0; i < numOfLeaves; i++ { + leaf := &L1InfoTreeLeaf{ + L1InfoTreeIndex: uint32(i), + Timestamp: uint64(i), + BlockNumber: uint64(i), + BlockPosition: uint64(i), + PreviousBlockHash: common.HexToHash(fmt.Sprintf("0x%x", i)), + MainnetExitRoot: common.HexToHash(fmt.Sprintf("0x%x", i)), + RollupExitRoot: common.HexToHash(fmt.Sprintf("0x%x", i)), + } + + leaf.GlobalExitRoot = leaf.globalExitRoot() + leaf.Hash = leaf.hash() + + leaves = append(leaves, leaf) + } + + return leaves +} diff --git a/scripts/local_config b/scripts/local_config index 6922f15e..d1a47b2c 100755 --- a/scripts/local_config +++ b/scripts/local_config @@ -30,10 +30,13 @@ function get_value_from_toml_file(){ local _LINE local _inside_section=0 local _return_next_line=0 + local _TMP_FILE=$(mktemp) + cat $_FILE > $_TMP_FILE + # Maybe the file doesnt end with a new line so we added just in case + echo >> $_TMP_FILE while read -r _LINE; do # Clean up line from spaces and tabs _LINE=$(echo $_LINE | tr -d '[:space:]') - #echo $_LINE if [ $_inside_section -eq 1 ]; then if [[ "$_LINE" == [* ]]; then return 1 @@ -51,6 +54,7 @@ function get_value_from_toml_file(){ if [ $_key_value == "[" ]; then _return_next_line=1 else + rm $_TMP_FILE # sed sentence remove quotes echo $_key_value | sed 's/^[[:space:]]*"//;s/"$//' return 0 @@ -61,7 +65,8 @@ function get_value_from_toml_file(){ fi - done < "$_FILE" + done < "$_TMP_FILE" + rm $_TMP_FILE return 2 } @@ -73,7 +78,7 @@ function export_key_from_toml_file_or_fatal(){ local _KEY="$4" local _VALUE=$(get_value_from_toml_file $_FILE $_SECTION $_KEY) if [ -z "$_VALUE" ]; then - log_fatal "$FUNCNAME: key $_KEY not found in section $_SECTION" + log_fatal "$FUNCNAME: key $_KEY not found in section $_SECTION in file $_FILE" fi export $_EXPORTED_VAR_NAME="$_VALUE" log_debug "$_EXPORTED_VAR_NAME=${!_EXPORTED_VAR_NAME} \t\t\t# file:$_FILE section:$_SECTION key:$_KEY" @@ -141,7 +146,10 @@ function export_values_of_cdk_node_config(){ export_key_from_toml_file_or_fatal aggregator_db_password $_CDK_CONFIG_FILE Aggregator.DB Password export_obj_key_from_toml_file_or_fatal zkevm_l2_aggregator_keystore_password $_CDK_CONFIG_FILE Aggregator.EthTxManager PrivateKeys Password - export_key_from_toml_file_or_fatal zkevm_rollup_fork_id $_CDK_CONFIG_FILE Aggregator ForkId + export_key_from_toml_file_or_fatal zkevm_rollup_fork_id $_CDK_CONFIG_FILE Aggregator ForkId + export_key_from_toml_file_or_fatal zkevm_l2_agglayer_keystore_password $_CDK_CONFIG_FILE AggSender.SequencerPrivateKey Password + export_key_from_toml_file_or_fatal zkevm_bridge_address $_CDK_CONFIG_FILE BridgeL1Sync BridgeAddr + export is_cdk_validium=$zkevm_is_validium export zkevm_rollup_chain_id=$l2_chain_id @@ -198,13 +206,14 @@ function export_portnum_from_kurtosis_or_fail(){ ############################################################################### function export_ports_from_kurtosis(){ export_portnum_from_kurtosis_or_fail l1_rpc_port el-1-geth-lighthouse rpc - export_portnum_from_kurtosis_or_fail zkevm_rpc_http_port cdk-erigon-node-001 rpc rpc + export_portnum_from_kurtosis_or_fail zkevm_rpc_http_port cdk-erigon-node-001 http-rpc rpc export_portnum_from_kurtosis_or_fail zkevm_data_streamer_port cdk-erigon-sequencer-001 data-streamer export_portnum_from_kurtosis_or_fail aggregator_db_port postgres-001 postgres export_portnum_from_kurtosis_or_fail agglayer_port agglayer agglayer export aggregator_db_hostname="127.0.0.1" export l1_rpc_url="http://localhost:${l1_rpc_port}" export l2_rpc_url="http://localhost:${zkevm_rpc_http_port}" + export agglayer_url="http://localhost:${agglayer_port}" } ############################################################################### @@ -244,8 +253,10 @@ EOF ############################################################################### function create_dest_folder(){ export DEST=${TMP_CDK_FOLDER}/local_config + export path_rw_data=${TMP_CDK_FOLDER}/runtime [ ! -d ${DEST} ] && mkdir -p ${DEST} rm $DEST/* + mkdir $path_rw_data } ############################################################################### function download_kurtosis_artifacts(){ @@ -263,6 +274,10 @@ function download_kurtosis_artifacts(){ kurtosis files download $KURTOSIS_ENCLAVE aggregator-keystore $DEST ok_or_fatal "Error downloading kurtosis artifact cdk-node-config-artifact to $DEST" export zkevm_l2_aggregator_keystore_file=$DEST/aggregator.keystore + + kurtosis files download $KURTOSIS_ENCLAVE agglayer-keystore $DEST + ok_or_fatal "Error downloading kurtosis artifact agglayer to $DEST" + export zkevm_l2_agglayer_keystore_file=$DEST/agglayer.keystore } ############################################################################### @@ -278,9 +293,31 @@ function check_generated_config_file(){ fi } ############################################################################### +function parse_command_line_args(){ + while [[ $# -gt 0 ]]; do + case $1 in + -h|--help) + echo "Usage: $0" + echo " -h: help" + exit 0 + ;; + -e|--enclave) + KURTOSIS_ENCLAVE=$2 + shift + shift + ;; + -*) + echo "Invalid Option: $1" 1>&2 + exit 1 + ;; + esac + done +} +############################################################################### # MAIN ############################################################################### set -o pipefail # enable strict command pipe error detection +parse_command_line_args $* check_requirements create_dest_folder @@ -311,6 +348,7 @@ echo "- Stop cdk-node:" echo " kurtosis service stop cdk-v1 cdk-node-001" echo " " echo "- Add next configuration to vscode launch.json" +echo " -----------------------------------------------------------" cat << EOF { "name": "Debug cdk", @@ -325,5 +363,12 @@ cat << EOF "-components", "sequence-sender,aggregator", ] }, + + To run AggSender change components to: + "-components", "aggsender", EOF +echo " -----------------------------------------------------------" +echo " " +echo " - rembember to clean previous execution data: " +echo " rm -Rf ${path_rw_data}/*" \ No newline at end of file diff --git a/sonar-project.properties b/sonar-project.properties index 815d53a8..f46e9863 100644 --- a/sonar-project.properties +++ b/sonar-project.properties @@ -7,11 +7,11 @@ sonar.projectName=cdk sonar.organization=0xpolygon sonar.sources=. -sonar.exclusions=**/test/**,**/vendor/**,**/mocks/**,**/build/**,**/target/**,**/proto/include/**,**/*.pb.go,**/docs/**,**/*.sql,**/mocks_*/*, scripts/** +sonar.exclusions=**/test/**,**/vendor/**,**/mocks/**,**/build/**,**/target/**,**/proto/include/**,**/*.pb.go,**/docs/**,**/*.sql,**/mocks_*/*,scripts/**,**/mock_*.go,**/agglayer/**,**/cmd/** sonar.tests=. sonar.test.inclusions=**/*_test.go -sonar.test.exclusions=**/vendor/**,**/docs/**,**/mocks/**,**/*.pb.go,**/*.yml,**/*.yaml,**/*.json,**/*.xml,**/*.toml,**/mocks_*/* +sonar.test.exclusions=**/vendor/**,**/docs/**,**/mocks/**,**/*.pb.go,**/*.yml,**/*.yaml,**/*.json,**/*.xml,**/*.toml,**/mocks_*/*,**/mock_*.go,**/agglayer/**,**/cmd/** sonar.issue.enforceSemantic=true # ===================================================== diff --git a/test/Makefile b/test/Makefile index d173c423..a864cf82 100644 --- a/test/Makefile +++ b/test/Makefile @@ -1,8 +1,8 @@ .PHONY: generate-mocks generate-mocks: generate-mocks-bridgesync generate-mocks-reorgdetector generate-mocks-sequencesender \ generate-mocks-da generate-mocks-l1infotreesync generate-mocks-helpers \ - generate-mocks-sync generate-mocks-l1infotreesync generate-mocks-aggregator - + generate-mocks-sync generate-mocks-l1infotreesync generate-mocks-aggregator \ + generate-mocks-aggsender generate-mocks-agglayer generate-mocks-bridgesync .PHONY: generate-mocks-bridgesync generate-mocks-bridgesync: ## Generates mocks for bridgesync, using mockery tool @@ -53,13 +53,30 @@ generate-mocks-aggregator: ## Generates mocks for aggregator, using mockery tool export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=ProverInterface --dir=../aggregator --output=../aggregator/mocks --outpkg=mocks --structname=ProverInterfaceMock --filename=mock_prover.go export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=Etherman --dir=../aggregator --output=../aggregator/mocks --outpkg=mocks --structname=EthermanMock --filename=mock_etherman.go export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=StateInterface --dir=../aggregator --output=../aggregator/mocks --outpkg=mocks --structname=StateInterfaceMock --filename=mock_state.go - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=AgglayerClientInterface --dir=../aggregator/agglayer --output=../aggregator/mocks --outpkg=mocks --structname=AgglayerClientInterfaceMock --filename=mock_agglayer_client.go export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=Synchronizer --srcpkg=github.com/0xPolygonHermez/zkevm-synchronizer-l1/synchronizer --output=../aggregator/mocks --outpkg=mocks --structname=SynchronizerInterfaceMock --filename=mock_synchronizer.go export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=EthTxManagerClient --dir=../aggregator --output=../aggregator/mocks --outpkg=mocks --structname=EthTxManagerClientMock --filename=mock_eth_tx_manager.go export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=Tx --srcpkg=github.com/jackc/pgx/v4 --output=../aggregator/mocks --outpkg=mocks --structname=DbTxMock --filename=mock_dbtx.go export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=RPCInterface --dir=../aggregator --output=../aggregator/mocks --outpkg=mocks --structname=RPCInterfaceMock --filename=mock_rpc.go +.PHONY: generate-mocks-aggsender +generate-mocks-aggsender: ## Generates mocks for aggsender, using mockery tool + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=L1InfoTreeSyncer --dir=../aggsender/types --output=../aggsender/mocks --outpkg=mocks --structname=L1InfoTreeSyncerMock --filename=mock_l1infotree_syncer.go ${COMMON_MOCKERY_PARAMS} + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=L2BridgeSyncer --dir=../aggsender/types --output=../aggsender/mocks --outpkg=mocks --structname=L2BridgeSyncerMock --filename=mock_l2bridge_syncer.go ${COMMON_MOCKERY_PARAMS} + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=Logger --dir=../aggsender/types --output=../aggsender/mocks --outpkg=mocks --structname=LoggerMock --filename=mock_logger.go ${COMMON_MOCKERY_PARAMS} + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=AggSenderStorage --dir=../aggsender/db --output=../aggsender/mocks --outpkg=mocks --structname=AggSenderStorageMock --filename=mock_aggsender_storage.go ${COMMON_MOCKERY_PARAMS} + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=EthClient --dir=../aggsender/types --output=../aggsender/mocks --outpkg=mocks --structname=EthClientMock --filename=mock_eth_client.go ${COMMON_MOCKERY_PARAMS} + +.PHONY: generate-mocks-agglayer +generate-mocks-agglayer: ## Generates mocks for agglayer, using mockery tool + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=AgglayerClientInterface --dir=../agglayer --output=../agglayer --outpkg=agglayer --inpackage --structname=AgglayerClientMock --filename=mock_agglayer_client.go + +.PHONY: generate-mocks-bridgesync +generate-mocks-bridgesync: ## Generates mocks for bridgesync, using mockery tool + rm -Rf ../bridgesync/mocks + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --all --case snake --dir ../bridgesync --output ../bridgesync/mocks --outpkg mocks_bridgesync ${COMMON_MOCKERY_PARAMS} + + .PHONY: test-e2e-fork9-validium test-e2e-fork9-validium: stop ./run-e2e.sh fork9 cdk-validium diff --git a/test/config/kurtosis-cdk-node-config.toml.template b/test/config/kurtosis-cdk-node-config.toml.template index fa01b528..68f6ec97 100644 --- a/test/config/kurtosis-cdk-node-config.toml.template +++ b/test/config/kurtosis-cdk-node-config.toml.template @@ -1,8 +1,9 @@ -PathRWData = "/data/" +PathRWData = "{{.path_rw_data}}/" L1URL="{{.l1_rpc_url}}" L2URL="http://{{.l2_rpc_name}}{{.deployment_suffix}}:{{.zkevm_rpc_http_port}}" L1AggOracleURL = "http://test-aggoracle-l1:8545" L2AggOracleURL = "http://test-aggoracle-l2:8545" +AggLayerURL="{{.agglayer_url}}" ForkId = {{.zkevm_rollup_fork_id}} IsValidiumMode = {{.is_cdk_validium}} @@ -19,13 +20,11 @@ SequencerPrivateKeyPassword = "{{.zkevm_l2_keystore_password}}" AggregatorPrivateKeyPath = "{{or .zkevm_l2_aggregator_keystore_file "/etc/cdk/aggregator.keystore"}}" AggregatorPrivateKeyPassword = "{{.zkevm_l2_keystore_password}}" SenderProofToL1Addr = "{{.zkevm_l2_agglayer_address}}" - +polygonBridgeAddr = "{{.zkevm_bridge_address}}" RPCURL = "http://{{.l2_rpc_name}}{{.deployment_suffix}}:{{.zkevm_rpc_http_port}}" WitnessURL = "http://{{.l2_rpc_name}}{{.deployment_suffix}}:{{.zkevm_rpc_http_port}}" -AggLayerURL = "http://agglayer:{{.agglayer_port}}" - # This values can be override directly from genesis.json @@ -38,8 +37,7 @@ genesisBlockNumber = "{{.zkevm_rollup_manager_block_number}}" polygonRollupManagerAddress = "{{.zkevm_rollup_manager_address}}" polTokenAddress = "{{.pol_token_address}}" polygonZkEVMAddress = "{{.zkevm_rollup_address}}" - polygonBridgeAddr = "0x0000000000000000000000000000000000000000" - + [L2Config] GlobalExitRootAddr = "{{.zkevm_global_exit_root_address}}" @@ -58,4 +56,7 @@ Outputs = ["stderr"] Host = "{{.aggregator_db.hostname}}" Port = "{{.aggregator_db.port}}" EnableLog = false - MaxConns = 200 \ No newline at end of file + MaxConns = 200 + +[AggSender] +SequencerPrivateKey = {Path = "{{or .zkevm_l2_agglayer_keystore_file "/pk/sequencer.keystore"}}", Password = "{{.zkevm_l2_agglayer_keystore_password}}"} diff --git a/test/helpers/lxly-bridge-test.bash b/test/helpers/lxly-bridge-test.bash index c753393a..7b3cb008 100644 --- a/test/helpers/lxly-bridge-test.bash +++ b/test/helpers/lxly-bridge-test.bash @@ -38,7 +38,6 @@ function claim() { echo "Getting full list of deposits" >&3 curl -s "$bridge_api_url/bridges/$destination_addr?limit=100&offset=0" | jq '.' | tee $bridge_deposit_file - echo "Looking for claimable deposits" >&3 jq '[.deposits[] | select(.ready_for_claim == true and .claim_tx_hash == "" and .dest_net == '$destination_net')]' $bridge_deposit_file | tee $claimable_deposit_file readonly claimable_count=$(jq '. | length' $claimable_deposit_file) diff --git a/tree/tree.go b/tree/tree.go index 0e3a0c69..6abb9e3d 100644 --- a/tree/tree.go +++ b/tree/tree.go @@ -7,8 +7,10 @@ import ( "fmt" "github.com/0xPolygon/cdk/db" + "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/tree/types" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" "github.com/russross/meddler" "golang.org/x/crypto/sha3" ) @@ -112,7 +114,8 @@ func (t *Tree) GetProof(ctx context.Context, index uint32, root common.Hash) (ty return types.Proof{}, err } if isErrNotFound { - return types.Proof{}, db.ErrNotFound + // TODO: Validate it. It returns a proof of a tree with missing leafs + log.Warnf("getSiblings returned proof with zero hashes for index %d and root %s", index, root.String()) } return siblings, nil } @@ -122,7 +125,7 @@ func (t *Tree) getRHTNode(tx db.Querier, nodeHash common.Hash) (*types.TreeNode, err := meddler.QueryRow( tx, node, fmt.Sprintf(`select * from %s where hash = $1`, t.rhtTable), - nodeHash.Hex(), + nodeHash.String(), ) if err != nil { if errors.Is(err, sql.ErrNoRows) { @@ -250,5 +253,20 @@ func (t *Tree) Reorg(tx db.Txer, firstReorgedBlock uint64) error { firstReorgedBlock, ) return err - // NOTE: rht is not cleaned, this could be done in the future as optimization +} + +// CalculateRoot calculates the Merkle Root based on the leaf and proof of inclusion +func CalculateRoot(leafHash common.Hash, proof [types.DefaultHeight]common.Hash, index uint32) common.Hash { + node := leafHash + + // Compute the Merkle root + for height := uint8(0); height < types.DefaultHeight; height++ { + if (index>>height)&1 == 1 { + node = crypto.Keccak256Hash(proof[height].Bytes(), node.Bytes()) + } else { + node = crypto.Keccak256Hash(node.Bytes(), proof[height].Bytes()) + } + } + + return node } From 7a84588d65b739a9408a5a2b16775e9daa84c5de Mon Sep 17 00:00:00 2001 From: Goran Rojovic <100121253+goran-ethernal@users.noreply.github.com> Date: Thu, 31 Oct 2024 16:47:11 +0100 Subject: [PATCH 06/33] feat: Add `metadata` field on the certificate (#151) * feat: use metadata field on certificate * fix: lint and UT * fix: comments --- agglayer/types.go | 21 ++++++++++++- agglayer/types_test.go | 4 +-- aggsender/aggsender.go | 20 ++++++++++-- aggsender/aggsender_test.go | 5 ++- common/common.go | 17 +++++++++++ common/common_test.go | 61 +++++++++++++++++++++++++++++++++++++ 6 files changed, 121 insertions(+), 7 deletions(-) create mode 100644 common/common_test.go diff --git a/agglayer/types.go b/agglayer/types.go index e8bdb254..825c9db2 100644 --- a/agglayer/types.go +++ b/agglayer/types.go @@ -83,6 +83,7 @@ type Certificate struct { NewLocalExitRoot [32]byte `json:"new_local_exit_root"` BridgeExits []*BridgeExit `json:"bridge_exits"` ImportedBridgeExits []*ImportedBridgeExit `json:"imported_bridge_exits"` + Metadata common.Hash `json:"metadata"` } // Hash returns a hash that uniquely identifies the certificate @@ -110,6 +111,20 @@ func (c *Certificate) Hash() common.Hash { ) } +// HashToSign is the actual hash that needs to be signed by the aggsender +// as expected by the agglayer +func (c *Certificate) HashToSign() common.Hash { + globalIndexHashes := make([][]byte, len(c.ImportedBridgeExits)) + for i, importedBridgeExit := range c.ImportedBridgeExits { + globalIndexHashes[i] = importedBridgeExit.GlobalIndex.Hash().Bytes() + } + + return crypto.Keccak256Hash( + c.NewLocalExitRoot[:], + crypto.Keccak256Hash(globalIndexHashes...).Bytes(), + ) +} + // SignedCertificate is the struct that contains the certificate and the signature of the signer type SignedCertificate struct { *Certificate @@ -138,7 +153,10 @@ type GlobalIndex struct { func (g *GlobalIndex) Hash() common.Hash { return crypto.Keccak256Hash( - bridgesync.GenerateGlobalIndex(g.MainnetFlag, g.RollupIndex, g.LeafIndex).Bytes()) + cdkcommon.BigIntToLittleEndianBytes( + bridgesync.GenerateGlobalIndex(g.MainnetFlag, g.RollupIndex, g.LeafIndex), + ), + ) } // BridgeExit represents a token bridge exit @@ -379,6 +397,7 @@ type CertificateHeader struct { CertificateID common.Hash `json:"certificate_id"` NewLocalExitRoot common.Hash `json:"new_local_exit_root"` Status CertificateStatus `json:"status"` + Metadata common.Hash `json:"metadata"` } func (c CertificateHeader) String() string { diff --git a/agglayer/types_test.go b/agglayer/types_test.go index 1df1f20f..325c0b88 100644 --- a/agglayer/types_test.go +++ b/agglayer/types_test.go @@ -11,8 +11,8 @@ import ( ) const ( - expectedSignedCertificateEmptyMetadataJSON = `{"network_id":1,"height":1,"prev_local_exit_root":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"new_local_exit_root":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"bridge_exits":[{"leaf_type":"Transfer","token_info":null,"dest_network":0,"dest_address":"0x0000000000000000000000000000000000000000","amount":"1","metadata":[]}],"imported_bridge_exits":[{"bridge_exit":{"leaf_type":"Transfer","token_info":null,"dest_network":0,"dest_address":"0x0000000000000000000000000000000000000000","amount":"1","metadata":[]},"claim_data":null,"global_index":{"mainnet_flag":false,"rollup_index":1,"leaf_index":1}}],"signature":{"r":"0x0000000000000000000000000000000000000000000000000000000000000000","s":"0x0000000000000000000000000000000000000000000000000000000000000000","odd_y_parity":false}}` - expectedSignedCertificateyMetadataJSON = `{"network_id":1,"height":1,"prev_local_exit_root":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"new_local_exit_root":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"bridge_exits":[{"leaf_type":"Transfer","token_info":null,"dest_network":0,"dest_address":"0x0000000000000000000000000000000000000000","amount":"1","metadata":[1,2,3]}],"imported_bridge_exits":[{"bridge_exit":{"leaf_type":"Transfer","token_info":null,"dest_network":0,"dest_address":"0x0000000000000000000000000000000000000000","amount":"1","metadata":[]},"claim_data":null,"global_index":{"mainnet_flag":false,"rollup_index":1,"leaf_index":1}}],"signature":{"r":"0x0000000000000000000000000000000000000000000000000000000000000000","s":"0x0000000000000000000000000000000000000000000000000000000000000000","odd_y_parity":false}}` + expectedSignedCertificateEmptyMetadataJSON = `{"network_id":1,"height":1,"prev_local_exit_root":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"new_local_exit_root":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"bridge_exits":[{"leaf_type":"Transfer","token_info":null,"dest_network":0,"dest_address":"0x0000000000000000000000000000000000000000","amount":"1","metadata":[]}],"imported_bridge_exits":[{"bridge_exit":{"leaf_type":"Transfer","token_info":null,"dest_network":0,"dest_address":"0x0000000000000000000000000000000000000000","amount":"1","metadata":[]},"claim_data":null,"global_index":{"mainnet_flag":false,"rollup_index":1,"leaf_index":1}}],"metadata":"0x0000000000000000000000000000000000000000000000000000000000000000","signature":{"r":"0x0000000000000000000000000000000000000000000000000000000000000000","s":"0x0000000000000000000000000000000000000000000000000000000000000000","odd_y_parity":false}}` + expectedSignedCertificateyMetadataJSON = `{"network_id":1,"height":1,"prev_local_exit_root":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"new_local_exit_root":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"bridge_exits":[{"leaf_type":"Transfer","token_info":null,"dest_network":0,"dest_address":"0x0000000000000000000000000000000000000000","amount":"1","metadata":[1,2,3]}],"imported_bridge_exits":[{"bridge_exit":{"leaf_type":"Transfer","token_info":null,"dest_network":0,"dest_address":"0x0000000000000000000000000000000000000000","amount":"1","metadata":[]},"claim_data":null,"global_index":{"mainnet_flag":false,"rollup_index":1,"leaf_index":1}}],"metadata":"0x0000000000000000000000000000000000000000000000000000000000000000","signature":{"r":"0x0000000000000000000000000000000000000000000000000000000000000000","s":"0x0000000000000000000000000000000000000000000000000000000000000000","odd_y_parity":false}}` ) func TestMarshalJSON(t *testing.T) { diff --git a/aggsender/aggsender.go b/aggsender/aggsender.go index a228e1a9..f1df20ff 100644 --- a/aggsender/aggsender.go +++ b/aggsender/aggsender.go @@ -6,6 +6,7 @@ import ( "encoding/json" "errors" "fmt" + "math/big" "os" "time" @@ -153,7 +154,7 @@ func (a *AggSender) sendCertificate(ctx context.Context) error { a.log.Infof("building certificate for block: %d to block: %d", fromBlock, toBlock) - certificate, err := a.buildCertificate(ctx, bridges, claims, lastSentCertificateInfo) + certificate, err := a.buildCertificate(ctx, bridges, claims, lastSentCertificateInfo, toBlock) if err != nil { return fmt.Errorf("error building certificate: %w", err) } @@ -209,7 +210,8 @@ func (a *AggSender) saveCertificateToFile(signedCertificate *agglayer.SignedCert func (a *AggSender) buildCertificate(ctx context.Context, bridges []bridgesync.Bridge, claims []bridgesync.Claim, - lastSentCertificateInfo aggsendertypes.CertificateInfo) (*agglayer.Certificate, error) { + lastSentCertificateInfo aggsendertypes.CertificateInfo, + toBlock uint64) (*agglayer.Certificate, error) { if len(bridges) == 0 && len(claims) == 0 { return nil, errNoBridgesAndClaims } @@ -245,6 +247,7 @@ func (a *AggSender) buildCertificate(ctx context.Context, BridgeExits: bridgeExits, ImportedBridgeExits: importedBridgeExits, Height: height, + Metadata: createCertificateMetadata(toBlock), }, nil } @@ -412,13 +415,19 @@ func (a *AggSender) getImportedBridgeExits( // signCertificate signs a certificate with the sequencer key func (a *AggSender) signCertificate(certificate *agglayer.Certificate) (*agglayer.SignedCertificate, error) { - hashToSign := certificate.Hash() + hashToSign := certificate.HashToSign() sig, err := crypto.Sign(hashToSign.Bytes(), a.sequencerKey) if err != nil { return nil, err } + a.log.Infof("Signed certificate. sequencer address: %s. New local exit root: %s Hash signed: %s", + crypto.PubkeyToAddress(a.sequencerKey.PublicKey).String(), + common.BytesToHash(certificate.NewLocalExitRoot[:]).String(), + hashToSign.String(), + ) + r, s, isOddParity, err := extractSignatureData(sig) if err != nil { return nil, err @@ -500,3 +509,8 @@ func extractSignatureData(signature []byte) (r, s common.Hash, isOddParity bool, return } + +// createCertificateMetadata creates a certificate metadata from given input +func createCertificateMetadata(toBlock uint64) common.Hash { + return common.BigToHash(new(big.Int).SetUint64(toBlock)) +} diff --git a/aggsender/aggsender_test.go b/aggsender/aggsender_test.go index 69dc6ed1..71878679 100644 --- a/aggsender/aggsender_test.go +++ b/aggsender/aggsender_test.go @@ -493,6 +493,7 @@ func TestBuildCertificate(t *testing.T) { bridges []bridgesync.Bridge claims []bridgesync.Claim lastSentCertificateInfo aggsendertypes.CertificateInfo + toBlock uint64 mockFn func() expectedCert *agglayer.Certificate expectedError bool @@ -532,10 +533,12 @@ func TestBuildCertificate(t *testing.T) { NewLocalExitRoot: common.HexToHash("0x123"), Height: 1, }, + toBlock: 10, expectedCert: &agglayer.Certificate{ NetworkID: 1, PrevLocalExitRoot: common.HexToHash("0x123"), NewLocalExitRoot: common.HexToHash("0x789"), + Metadata: createCertificateMetadata(10), BridgeExits: []*agglayer.BridgeExit{ { LeafType: agglayer.LeafTypeAsset, @@ -686,7 +689,7 @@ func TestBuildCertificate(t *testing.T) { l1infoTreeSyncer: mockL1InfoTreeSyncer, log: log.WithFields("test", "unittest"), } - cert, err := aggSender.buildCertificate(context.Background(), tt.bridges, tt.claims, tt.lastSentCertificateInfo) + cert, err := aggSender.buildCertificate(context.Background(), tt.bridges, tt.claims, tt.lastSentCertificateInfo, tt.toBlock) if tt.expectedError { require.Error(t, err) diff --git a/common/common.go b/common/common.go index c74f56e4..f8b92d16 100644 --- a/common/common.go +++ b/common/common.go @@ -109,3 +109,20 @@ func NewKeyFromKeystore(cfg types.KeystoreFileConfig) (*ecdsa.PrivateKey, error) } return key.PrivateKey, nil } + +// BigIntToLittleEndianBytes converts a big.Int to a 32-byte little-endian representation. +// big.Int is capped to 32 bytes +func BigIntToLittleEndianBytes(n *big.Int) []byte { + // Get the absolute value in big-endian byte slice + beBytes := n.Bytes() + + // Initialize a 32-byte array for the result + leBytes := make([]byte, common.HashLength) + + // Fill the array in reverse order to convert to little-endian + for i := 0; i < len(beBytes) && i < common.HashLength; i++ { + leBytes[i] = beBytes[len(beBytes)-1-i] + } + + return leBytes +} diff --git a/common/common_test.go b/common/common_test.go new file mode 100644 index 00000000..b6b99c5f --- /dev/null +++ b/common/common_test.go @@ -0,0 +1,61 @@ +package common + +import ( + "fmt" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestAsLittleEndianSlice(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + input *big.Int + expected []byte + }{ + { + name: "Zero value", + input: big.NewInt(0), + expected: make([]byte, 32), + }, + { + name: "Positive value", + input: big.NewInt(123456789), + expected: append([]byte{21, 205, 91, 7}, make([]byte, 28)...), + }, + { + name: "Negative value", + input: big.NewInt(-123456789), + expected: append([]byte{21, 205, 91, 7}, make([]byte, 28)...), + }, + { + name: "Large positive value", + input: new(big.Int).SetBytes([]byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}), + expected: []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + result := BigIntToLittleEndianBytes(tt.input) + require.Len(t, result, common.HashLength) + + for i := range result { + require.Equal(t, tt.expected[i], result[i], + fmt.Sprintf("expected byte at index %d to be %x, got %x", i, tt.expected[i], result[i])) + } + }) + } +} From 8481a35579e6aacc91b15cfddee6b79077fc72c4 Mon Sep 17 00:00:00 2001 From: Arnau Bennassar Date: Thu, 31 Oct 2024 18:28:13 +0100 Subject: [PATCH 07/33] minor improvements on the config (#149) --- config/default.go | 12 +++++------- logerror | 1 + test/config/kurtosis-cdk-node-config.toml.template | 2 -- 3 files changed, 6 insertions(+), 9 deletions(-) create mode 100644 logerror diff --git a/config/default.go b/config/default.go index 7f2ae8b6..096d98de 100644 --- a/config/default.go +++ b/config/default.go @@ -5,8 +5,6 @@ package config const DefaultMandatoryVars = ` L1URL = "http://localhost:8545" L2URL = "http://localhost:8123" -L1AggOracleURL = "http://test-aggoracle-l1:8545" -L2AggOracleURL = "http://test-aggoracle-l2:8545" AggLayerURL = "https://agglayer-dev.polygon.technology" ForkId = 9 @@ -219,18 +217,18 @@ GlobalExitRootAddr="{{NetworkConfig.L1.GlobalExitRootManagerAddr}}" RollupManagerAddr = "{{NetworkConfig.L1.RollupManagerAddr}}" SyncBlockChunkSize=10 BlockFinality="LatestBlock" -URLRPCL1="{{L1AggOracleURL}}" +URLRPCL1="{{L1URL}}" WaitForNewBlocksPeriod="100ms" InitialBlock=0 [AggOracle] TargetChainType="EVM" -URLRPCL1="{{L1AggOracleURL}}" +URLRPCL1="{{L1URL}}" BlockFinality="FinalizedBlock" WaitPeriodNextGER="100ms" [AggOracle.EVMSender] GlobalExitRootL2="{{L2Config.GlobalExitRootAddr}}" - URLRPCL2="{{L2AggOracleURL}}" + URLRPCL2="{{L2URL}}" ChainIDL2=1337 GasOffset=0 WaitPeriodMonitorTx="100ms" @@ -251,7 +249,7 @@ WaitPeriodNextGER="100ms" SafeStatusL1NumberOfBlocks = 5 FinalizedStatusL1NumberOfBlocks = 10 [AggOracle.EVMSender.EthTxManager.Etherman] - URL = "{{L2AggOracleURL}}" + URL = "{{L2URL}}" MultiGasProvider = false L1ChainID = {{NetworkConfig.L1.L1ChainID}} HTTPHeaders = [] @@ -290,7 +288,7 @@ GasOffset = 0 SafeStatusL1NumberOfBlocks = 5 FinalizedStatusL1NumberOfBlocks = 10 [ClaimSponsor.EthTxManager.Etherman] - URL = "{{L2AggOracleURL}}" + URL = "{{L2URL}}" MultiGasProvider = false L1ChainID = {{NetworkConfig.L1.L1ChainID}} HTTPHeaders = [] diff --git a/logerror b/logerror new file mode 100644 index 00000000..cf3e44c1 --- /dev/null +++ b/logerror @@ -0,0 +1 @@ +ok github.com/0xPolygon/cdk/l1infotreesync 2.438s diff --git a/test/config/kurtosis-cdk-node-config.toml.template b/test/config/kurtosis-cdk-node-config.toml.template index 68f6ec97..1d70226d 100644 --- a/test/config/kurtosis-cdk-node-config.toml.template +++ b/test/config/kurtosis-cdk-node-config.toml.template @@ -1,8 +1,6 @@ PathRWData = "{{.path_rw_data}}/" L1URL="{{.l1_rpc_url}}" L2URL="http://{{.l2_rpc_name}}{{.deployment_suffix}}:{{.zkevm_rpc_http_port}}" -L1AggOracleURL = "http://test-aggoracle-l1:8545" -L2AggOracleURL = "http://test-aggoracle-l2:8545" AggLayerURL="{{.agglayer_url}}" ForkId = {{.zkevm_rollup_fork_id}} From faa2a749675c528ee77c96e56700aceb426a372e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toni=20Ram=C3=ADrez?= <58293609+ToniRamirezM@users.noreply.github.com> Date: Fri, 1 Nov 2024 12:17:20 +0100 Subject: [PATCH 08/33] feat: update zkevm-ethtx-manager to v0.2.1 (#153) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index ae03382e..4a3a983e 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( github.com/0xPolygon/cdk-contracts-tooling v0.0.0-20240826154954-f6182d2b17a2 github.com/0xPolygon/cdk-data-availability v0.0.10 github.com/0xPolygon/cdk-rpc v0.0.0-20241004114257-6c3cb6eebfb6 - github.com/0xPolygon/zkevm-ethtx-manager v0.2.0 + github.com/0xPolygon/zkevm-ethtx-manager v0.2.1 github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.5 github.com/ethereum/go-ethereum v1.14.8 github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3 diff --git a/go.sum b/go.sum index 96f2dc93..28771a51 100644 --- a/go.sum +++ b/go.sum @@ -4,8 +4,8 @@ github.com/0xPolygon/cdk-data-availability v0.0.10 h1:pVcke2I7GuPH7JeRLKokEOHffP github.com/0xPolygon/cdk-data-availability v0.0.10/go.mod h1:nn5RmnkzOiugAxizSbaYnA+em79YLLLoR25i0UlKc5Q= github.com/0xPolygon/cdk-rpc v0.0.0-20241004114257-6c3cb6eebfb6 h1:FXL/rcO7/GtZ3kRFw+C7J6vmGnl8gcazg+Gh/NVmnas= github.com/0xPolygon/cdk-rpc v0.0.0-20241004114257-6c3cb6eebfb6/go.mod h1:2scWqMMufrQXu7TikDgQ3BsyaKoX8qP26D6E262vSOg= -github.com/0xPolygon/zkevm-ethtx-manager v0.2.0 h1:QWE6nKBBHkMEiza723hJk0+oZbLSdQZTX4I48jWw15I= -github.com/0xPolygon/zkevm-ethtx-manager v0.2.0/go.mod h1:lqQmzSo2OXEZItD0R4Cd+lqKFxphXEWgqHefVcGDZZc= +github.com/0xPolygon/zkevm-ethtx-manager v0.2.1 h1:2Yb+KdJFMpVrS9LIkd658XiWuN+MCTs7SgeWaopXScg= +github.com/0xPolygon/zkevm-ethtx-manager v0.2.1/go.mod h1:lqQmzSo2OXEZItD0R4Cd+lqKFxphXEWgqHefVcGDZZc= github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.5 h1:YmnhuCl349MoNASN0fMeGKU1o9HqJhiZkfMsA/1cTRA= github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.5/go.mod h1:X4Su/M/+hSISqdl9yomKlRsbTyuZHsRohporyHsP8gg= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= From 34e2887a2d1d1be09c57809f8b6e197e190b0dbb Mon Sep 17 00:00:00 2001 From: Victor Castell <0x@vcastellm.xyz> Date: Tue, 5 Nov 2024 15:11:21 +0000 Subject: [PATCH 09/33] refactor: retrieve and parse versions at buildtime Use input_parser.star from kurtosis --- Cargo.lock | 58 +++++++++++++++++++++++------------ crates/cdk/Cargo.toml | 14 +++++---- crates/cdk/build.rs | 62 ++++++++++++++++++++++++++++++++++++++ crates/cdk/src/versions.rs | 30 +++++++----------- crates/cdk/versions.json | 15 +++++++++ 5 files changed, 134 insertions(+), 45 deletions(-) create mode 100644 crates/cdk/versions.json diff --git a/Cargo.lock b/Cargo.lock index b9956840..07f6da60 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -61,9 +61,9 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "0.4.2" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8fa8a1a3c4cbd221f2b8e3693aeb328fca79a757fe556ed08e47bbbc2a70db7" +checksum = "af5979e0d5a7bf9c7eb79749121e8256e59021af611322aee56e77e20776b4b3" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -112,9 +112,9 @@ dependencies = [ [[package]] name = "alloy-rpc-client" -version = "0.4.2" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "370143ed581aace6e663342d21d209c6b2e34ee6142f7d6675adb518deeaf0dc" +checksum = "7fc2bd1e7403463a5f2c61e955bcc9d3072b63aa177442b0f9aa6a6d22a941e3" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -130,6 +130,7 @@ dependencies = [ "tower 0.5.1", "tracing", "url", + "wasmtimer", ] [[package]] @@ -192,9 +193,9 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "0.4.2" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ac3e97dad3d31770db0fc89bd6a63b789fbae78963086733f960cf32c483904" +checksum = "be77579633ebbc1266ae6fd7694f75c408beb1aeb6865d0b18f22893c265a061" dependencies = [ "alloy-json-rpc", "base64 0.22.1", @@ -207,13 +208,14 @@ dependencies = [ "tower 0.5.1", "tracing", "url", + "wasmtimer", ] [[package]] name = "alloy-transport-http" -version = "0.4.2" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b367dcccada5b28987c2296717ee04b9a5637aacd78eacb1726ef211678b5212" +checksum = "91fd1a5d0827939847983b46f2f79510361f901dc82f8e3c38ac7397af142c6e" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -691,6 +693,7 @@ dependencies = [ "colored", "dotenvy", "execute", + "regex", "reqwest 0.12.8", "serde", "serde_json", @@ -2685,7 +2688,7 @@ dependencies = [ "lalrpop-util", "petgraph", "regex", - "regex-syntax 0.8.4", + "regex-syntax 0.8.5", "string_cache", "term", "tiny-keccak", @@ -2699,7 +2702,7 @@ version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "507460a910eb7b32ee961886ff48539633b788a36b65692b95f225b844c82553" dependencies = [ - "regex-automata 0.4.7", + "regex-automata 0.4.8", ] [[package]] @@ -3334,7 +3337,7 @@ dependencies = [ "rand", "rand_chacha", "rand_xorshift", - "regex-syntax 0.8.4", + "regex-syntax 0.8.5", "rusty-fork", "tempfile", "unarray", @@ -3443,14 +3446,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.5" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b91213439dad192326a0d7c6ee3955910425f441d7038e0d6933b0aec5c4517f" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.7", - "regex-syntax 0.8.4", + "regex-automata 0.4.8", + "regex-syntax 0.8.5", ] [[package]] @@ -3464,13 +3467,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" +checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.4", + "regex-syntax 0.8.5", ] [[package]] @@ -3481,9 +3484,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "reqwest" @@ -3535,6 +3538,7 @@ dependencies = [ "base64 0.22.1", "bytes", "encoding_rs", + "futures-channel", "futures-core", "futures-util", "h2 0.4.5", @@ -5066,6 +5070,20 @@ version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" +[[package]] +name = "wasmtimer" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7ed9d8b15c7fb594d72bfb4b5a276f3d2029333cd93a932f376f5937f6f80ee" +dependencies = [ + "futures", + "js-sys", + "parking_lot", + "pin-utils", + "slab", + "wasm-bindgen", +] + [[package]] name = "web-sys" version = "0.3.69" diff --git a/crates/cdk/Cargo.toml b/crates/cdk/Cargo.toml index 0c1f8274..e6e9723b 100644 --- a/crates/cdk/Cargo.toml +++ b/crates/cdk/Cargo.toml @@ -15,14 +15,16 @@ tracing.workspace = true tracing-subscriber = { workspace = true, features = ["env-filter", "json"] } url = { workspace = true, features = ["serde"] } colored = "2.0" - - cdk-config = { path = "../cdk-config" } serde.workspace = true serde_json.workspace = true tempfile = "3.12.0" -alloy-rpc-client = "0.4.2" -alloy-transport-http = "0.4.2" +alloy-rpc-client = "0.5.4" +alloy-transport-http = "0.5.4" tokio = "1.40.0" -reqwest = "0.12.8" -alloy-json-rpc = "0.4.2" +alloy-json-rpc = "0.5.4" + +[build-dependencies] +reqwest = {version = "0.12.8", features = ["blocking"]} +serde_json.workspace = true +regex = "1.11.1" diff --git a/crates/cdk/build.rs b/crates/cdk/build.rs index 59fffda7..802b68c0 100644 --- a/crates/cdk/build.rs +++ b/crates/cdk/build.rs @@ -1,8 +1,15 @@ +use regex::Regex; +use reqwest::blocking::get; use std::env; +use std::fs::File; +use std::io::Write; +use std::path::Path; use std::path::PathBuf; use std::process::Command; fn main() { + let _ = build_versions(); + let build_script_disabled = env::var("BUILD_SCRIPT_DISABLED") .map(|v| v == "1") .unwrap_or(false); // run by default @@ -46,3 +53,58 @@ fn main() { // only when a specific file changes: // println!("cargo:rerun-if-changed=path/to/file"); } + +// build_versions retrieves the versions from the Starlark file and embeds them in the binary. +fn build_versions() -> std::io::Result<()> { + // Retrieve the contents of the file from the URL + let url = "https://raw.githubusercontent.com/0xPolygon/kurtosis-cdk/refs/heads/main/input_parser.star"; + let response = get(url).expect("Failed to send request"); + let content = response.text().expect("Failed to read response text"); + + // Write the contents to a file + let out_dir = std::env::var("OUT_DIR").unwrap(); + let dest_path = Path::new(&out_dir).join("input_parser.star"); + let mut file = File::create(&dest_path)?; + file.write_all(content.as_bytes())?; + + // Get lines 28 to 40 from the contents of the starlark file + let versions = content + .lines() + .skip(30) + .take(15) + .collect::>() + .join("\n"); + + // Replace the string DEFAULT_IMAGES = from the versions string + let versions = versions.replace("DEFAULT_IMAGES = ", ""); + + // Remove all comments to the end of the line using a regexp + let re = Regex::new(r"\s#\s.*\n").unwrap(); + let versions = re.replace_all(&versions, ""); + // Replace the trailing comma on the last line + let versions = versions.replace(", }", " }"); + + print!("{}", versions); + + // The versions string is a JSON object we can parse + let versions_json: serde_json::Value = serde_json::from_str(&versions).unwrap(); + + // Write the versions to a file + let dest_path = Path::new(".").join("versions.json"); + let mut file = File::create(&dest_path)?; + file.write_all( + serde_json::to_string_pretty(&versions_json) + .unwrap() + .as_bytes(), + )?; + + // Optionally, print the output of the make command + println!("cargo:rerun-if-changed=build.rs"); + + // Here you can also add additional commands to inform Cargo about + // how to rerun the build script. For example, to rerun this script + // only when a specific file changes: + // println!("cargo:rerun-if-changed=path/to/file"); + + Ok(()) +} diff --git a/crates/cdk/src/versions.rs b/crates/cdk/src/versions.rs index 77581452..3b148787 100644 --- a/crates/cdk/src/versions.rs +++ b/crates/cdk/src/versions.rs @@ -14,34 +14,26 @@ fn version() -> Result { } pub(crate) fn versions() { + // Load the versions from the versions.json file in the crate directory + // and parse it using serde_json. + let versions = include_str!("../versions.json"); + let versions_json: serde_json::Value = serde_json::from_str(versions).unwrap(); + + // Convert the JSON object to a HashMap. + let versions_map = versions_json.as_object().unwrap(); + // Get the version of the cdk-node binary. let output = version().unwrap(); let version = String::from_utf8(output.stdout).unwrap(); println!("{}", format!("{}", version.trim()).green()); - let versions = vec![ - ( - "zkEVM Contracts", - "https://github.com/0xPolygonHermez/zkevm-contracts/releases/tag/v8.0.0-rc.4-fork.12", - ), - ("zkEVM Prover", "v8.0.0-RC12"), - ("CDK Erigon", "hermeznetwork/cdk-erigon:0948e33"), - ( - "zkEVM Pool Manager", - "hermeznetwork/zkevm-pool-manager:v0.1.1", - ), - ( - "CDK Data Availability Node", - "0xpolygon/cdk-data-availability:0.0.10", - ), - ]; - // Multi-line string to print the versions with colors. - let formatted_versions: Vec = versions + let formatted_versions: Vec = versions_map .iter() - .map(|(key, value)| format!("{}: {}", key.green(), value.blue())) + .map(|(key, value)| format!("{}: {}", key.green(), value.to_string().blue())) .collect(); + println!("{}", "Supported up to fork12".yellow()); println!("{}", formatted_versions.join("\n")); } diff --git a/crates/cdk/versions.json b/crates/cdk/versions.json new file mode 100644 index 00000000..7cbe12dd --- /dev/null +++ b/crates/cdk/versions.json @@ -0,0 +1,15 @@ +{ + "agglayer_image": "ghcr.io/agglayer/agglayer:feature-storage-adding-epoch-packing", + "cdk_erigon_node_image": "hermeznetwork/cdk-erigon:v2.1.1", + "cdk_node_image": "ghcr.io/0xpolygon/cdk:0.4.0-beta1", + "cdk_validium_node_image": "0xpolygon/cdk-validium-node:0.7.0-cdk", + "zkevm_bridge_proxy_image": "haproxy:3.0-bookworm", + "zkevm_bridge_service_image": "hermeznetwork/zkevm-bridge-service:v0.6.0-RC1", + "zkevm_bridge_ui_image": "leovct/zkevm-bridge-ui:multi-network-2", + "zkevm_contracts_image": "leovct/zkevm-contracts:v8.0.0-rc.4-fork.12", + "zkevm_da_image": "0xpolygon/cdk-data-availability:0.0.10", + "zkevm_node_image": "hermeznetwork/zkevm-node:v0.7.3", + "zkevm_pool_manager_image": "hermeznetwork/zkevm-pool-manager:v0.1.1", + "zkevm_prover_image": "hermeznetwork/zkevm-prover:v8.0.0-RC14-fork.12", + "zkevm_sequence_sender_image": "hermeznetwork/zkevm-sequence-sender:v0.2.4" +} \ No newline at end of file From 6d8dd74342278a31d2611e4c50e33c107e80643f Mon Sep 17 00:00:00 2001 From: Arnau Bennassar Date: Tue, 5 Nov 2024 12:42:17 -0600 Subject: [PATCH 10/33] feat: use sqlite on lastgersync (#150) * feat use sqlite on lastgersync * apply requests * rm tree migrations * Update lastgersync/processor.go Co-authored-by: Goran Rojovic <100121253+goran-ethernal@users.noreply.github.com> --------- Co-authored-by: Goran Rojovic <100121253+goran-ethernal@users.noreply.github.com> --- lastgersync/e2e_test.go | 9 +- lastgersync/evmdownloader.go | 18 +- lastgersync/lastgersync.go | 4 +- lastgersync/migrations/lastgersync0001.sql | 14 + lastgersync/migrations/migrations.go | 21 ++ lastgersync/processor.go | 300 +++++---------------- rpc/bridge.go | 4 +- rpc/bridge_interfaces.go | 3 +- rpc/mocks/last_ge_rer.go | 36 +-- 9 files changed, 141 insertions(+), 268 deletions(-) create mode 100644 lastgersync/migrations/lastgersync0001.sql create mode 100644 lastgersync/migrations/migrations.go diff --git a/lastgersync/e2e_test.go b/lastgersync/e2e_test.go index e4d5e407..9b9a6f36 100644 --- a/lastgersync/e2e_test.go +++ b/lastgersync/e2e_test.go @@ -3,6 +3,7 @@ package lastgersync_test import ( "context" "fmt" + "path" "strconv" "testing" "time" @@ -18,7 +19,7 @@ import ( func TestE2E(t *testing.T) { ctx := context.Background() env := aggoraclehelpers.SetupAggoracleWithEVMChain(t) - dbPathSyncer := t.TempDir() + dbPathSyncer := path.Join(t.TempDir(), "file::memory:?cache=shared") syncer, err := lastgersync.New( ctx, dbPathSyncer, @@ -65,8 +66,8 @@ func TestE2E(t *testing.T) { } require.True(t, syncerUpToDate, errMsg) - _, actualGER, err := syncer.GetFirstGERAfterL1InfoTreeIndex(ctx, uint32(i)) - require.NoError(t, err) - require.Equal(t, common.Hash(expectedGER), actualGER) + e, err := syncer.GetFirstGERAfterL1InfoTreeIndex(ctx, uint32(i)) + require.NoError(t, err, fmt.Sprint("iteration: ", i)) + require.Equal(t, common.Hash(expectedGER), e.GlobalExitRoot, fmt.Sprint("iteration: ", i)) } } diff --git a/lastgersync/evmdownloader.go b/lastgersync/evmdownloader.go index e76bb578..bf9a236f 100644 --- a/lastgersync/evmdownloader.go +++ b/lastgersync/evmdownloader.go @@ -62,13 +62,13 @@ func newDownloader( func (d *downloader) Download(ctx context.Context, fromBlock uint64, downloadedCh chan sync.EVMBlock) { var ( attempts int - lastIndex uint32 + nextIndex uint32 err error ) for { - lastIndex, err = d.processor.getLastIndex(ctx) + lastIndex, err := d.processor.getLastIndex() if errors.Is(err, db.ErrNotFound) { - lastIndex = 0 + nextIndex = 0 } else if err != nil { log.Errorf("error getting last indes: %v", err) attempts++ @@ -76,7 +76,9 @@ func (d *downloader) Download(ctx context.Context, fromBlock uint64, downloadedC continue } - + if lastIndex > 0 { + nextIndex = lastIndex + 1 + } break } for { @@ -88,12 +90,12 @@ func (d *downloader) Download(ctx context.Context, fromBlock uint64, downloadedC return default: } - lastBlock := d.WaitForNewBlocks(ctx, fromBlock) + fromBlock = d.WaitForNewBlocks(ctx, fromBlock) attempts = 0 var gers []Event for { - gers, err = d.getGERsFromIndex(ctx, lastIndex) + gers, err = d.getGERsFromIndex(ctx, nextIndex) if err != nil { log.Errorf("error getting GERs: %v", err) attempts++ @@ -105,7 +107,7 @@ func (d *downloader) Download(ctx context.Context, fromBlock uint64, downloadedC break } - blockHeader, isCanceled := d.GetBlockHeader(ctx, lastBlock) + blockHeader, isCanceled := d.GetBlockHeader(ctx, fromBlock) if isCanceled { return } @@ -126,7 +128,7 @@ func (d *downloader) Download(ctx context.Context, fromBlock uint64, downloadedC if !ok { log.Errorf("unexpected type %T in events", block.Events[0]) } - lastIndex = event.L1InfoTreeIndex + nextIndex = event.L1InfoTreeIndex + 1 } } } diff --git a/lastgersync/lastgersync.go b/lastgersync/lastgersync.go index 1b40bfcf..c6689293 100644 --- a/lastgersync/lastgersync.go +++ b/lastgersync/lastgersync.go @@ -32,7 +32,7 @@ func New( waitForNewBlocksPeriod time.Duration, downloadBufferSize int, ) (*LastGERSync, error) { - processor, err := newProcessor(dbPath) + processor, err := newProcessor(dbPath, "lastGERSync") if err != nil { return nil, err } @@ -75,7 +75,7 @@ func (s *LastGERSync) Start(ctx context.Context) { func (s *LastGERSync) GetFirstGERAfterL1InfoTreeIndex( ctx context.Context, atOrAfterL1InfoTreeIndex uint32, -) (injectedL1InfoTreeIndex uint32, ger common.Hash, err error) { +) (Event, error) { return s.processor.GetFirstGERAfterL1InfoTreeIndex(ctx, atOrAfterL1InfoTreeIndex) } diff --git a/lastgersync/migrations/lastgersync0001.sql b/lastgersync/migrations/lastgersync0001.sql new file mode 100644 index 00000000..88021fa1 --- /dev/null +++ b/lastgersync/migrations/lastgersync0001.sql @@ -0,0 +1,14 @@ +-- +migrate Down +DROP TABLE IF EXISTS block; +DROP TABLE IF EXISTS global_exit_root; + +-- +migrate Up +CREATE TABLE block ( + num BIGINT PRIMARY KEY +); + +CREATE TABLE imported_global_exit_root ( + block_num INTEGER PRIMARY KEY REFERENCES block(num) ON DELETE CASCADE, + global_exit_root VARCHAR NOT NULL, + l1_info_tree_index INTEGER NOT NULL +); \ No newline at end of file diff --git a/lastgersync/migrations/migrations.go b/lastgersync/migrations/migrations.go new file mode 100644 index 00000000..d55dd449 --- /dev/null +++ b/lastgersync/migrations/migrations.go @@ -0,0 +1,21 @@ +package migrations + +import ( + _ "embed" + + "github.com/0xPolygon/cdk/db" + "github.com/0xPolygon/cdk/db/types" +) + +//go:embed lastgersync0001.sql +var mig001 string + +func RunMigrations(dbPath string) error { + migrations := []types.Migration{ + { + ID: "lastgersync0001", + SQL: mig001, + }, + } + return db.RunMigrations(dbPath, migrations) +} diff --git a/lastgersync/processor.go b/lastgersync/processor.go index 45104f09..dd86482f 100644 --- a/lastgersync/processor.go +++ b/lastgersync/processor.go @@ -2,292 +2,136 @@ package lastgersync import ( "context" + "database/sql" "errors" - "fmt" - "math" - "github.com/0xPolygon/cdk/common" "github.com/0xPolygon/cdk/db" + "github.com/0xPolygon/cdk/lastgersync/migrations" "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sync" ethCommon "github.com/ethereum/go-ethereum/common" - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/mdbx" -) - -const ( - lastProcessedTable = "lastgersync-lastProcessed" - gerTable = "lastgersync-ger" - blockTable = "lastgersync-block" -) - -var ( - lastProcessedKey = []byte("lp") + "github.com/russross/meddler" ) type Event struct { - GlobalExitRoot ethCommon.Hash - L1InfoTreeIndex uint32 + GlobalExitRoot ethCommon.Hash `meddler:"global_exit_root,hash"` + L1InfoTreeIndex uint32 `meddler:"l1_info_tree_index"` } -type blockWithGERs struct { - // inclusive - FirstIndex uint32 - // not inclusive - LastIndex uint32 -} - -func (b *blockWithGERs) MarshalBinary() ([]byte, error) { - return append(common.Uint32ToBytes(b.FirstIndex), common.Uint32ToBytes(b.LastIndex)...), nil -} - -func (b *blockWithGERs) UnmarshalBinary(data []byte) error { - const expectedDataLength = 8 - if len(data) != expectedDataLength { - return fmt.Errorf("expected len %d, actual len %d", expectedDataLength, len(data)) - } - b.FirstIndex = common.BytesToUint32(data[:4]) - b.LastIndex = common.BytesToUint32(data[4:]) - - return nil +type eventWithBlockNum struct { + GlobalExitRoot ethCommon.Hash `meddler:"global_exit_root,hash"` + L1InfoTreeIndex uint32 `meddler:"l1_info_tree_index"` + BlockNum uint64 `meddler:"block_num"` } type processor struct { - db kv.RwDB + db *sql.DB + log *log.Logger } -func newProcessor(dbPath string) (*processor, error) { - tableCfgFunc := func(defaultBuckets kv.TableCfg) kv.TableCfg { - cfg := kv.TableCfg{ - lastProcessedTable: {}, - gerTable: {}, - blockTable: {}, - } - - return cfg +func newProcessor(dbPath string, loggerPrefix string) (*processor, error) { + err := migrations.RunMigrations(dbPath) + if err != nil { + return nil, err } - db, err := mdbx.NewMDBX(nil). - Path(dbPath). - WithTableCfg(tableCfgFunc). - Open() + db, err := db.NewSQLiteDB(dbPath) if err != nil { return nil, err } - + logger := log.WithFields("lastger-syncer", loggerPrefix) return &processor{ - db: db, + db: db, + log: logger, }, nil } -// GetLastProcessedBlockAndL1InfoTreeIndex returns the last processed block oby the processor, including blocks +// GetLastProcessedBlock returns the last processed block by the processor, including blocks // that don't have events func (p *processor) GetLastProcessedBlock(ctx context.Context) (uint64, error) { - tx, err := p.db.BeginRo(ctx) - if err != nil { - return 0, err - } - defer tx.Rollback() - - return p.getLastProcessedBlockWithTx(tx) -} - -func (p *processor) getLastIndex(ctx context.Context) (uint32, error) { - tx, err := p.db.BeginRo(ctx) - if err != nil { - return 0, err - } - defer tx.Rollback() - - return p.getLastIndexWithTx(tx) -} - -func (p *processor) getLastIndexWithTx(tx kv.Tx) (uint32, error) { - iter, err := tx.RangeDescend(gerTable, common.Uint32ToBytes(math.MaxUint32), common.Uint32ToBytes(0), 1) - if err != nil { - return 0, err - } - k, _, err := iter.Next() - if err != nil { - return 0, err - } - if k == nil { - return 0, db.ErrNotFound + var lastProcessedBlock uint64 + row := p.db.QueryRow("SELECT num FROM BLOCK ORDER BY num DESC LIMIT 1;") + err := row.Scan(&lastProcessedBlock) + if errors.Is(err, sql.ErrNoRows) { + return 0, nil } - - return common.BytesToUint32(k), nil + return lastProcessedBlock, err } -func (p *processor) getLastProcessedBlockWithTx(tx kv.Tx) (uint64, error) { - if lastProcessedBytes, err := tx.GetOne(lastProcessedTable, lastProcessedKey); err != nil { - return 0, err - } else if lastProcessedBytes == nil { +func (p *processor) getLastIndex() (uint32, error) { + var lastIndex uint32 + row := p.db.QueryRow(` + SELECT l1_info_tree_index + FROM imported_global_exit_root + ORDER BY l1_info_tree_index DESC LIMIT 1; + `) + err := row.Scan(&lastIndex) + if errors.Is(err, sql.ErrNoRows) { return 0, nil - } else { - return common.BytesToUint64(lastProcessedBytes), nil } -} - -func (p *processor) updateLastProcessedBlockWithTx(tx kv.RwTx, blockNum uint64) error { - return tx.Put(lastProcessedTable, lastProcessedKey, common.Uint64ToBytes(blockNum)) + return lastIndex, err } func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { - tx, err := p.db.BeginRw(ctx) + tx, err := db.NewTx(ctx, p.db) if err != nil { return err } - - lenEvents := len(block.Events) - var lastIndex int64 - if lenEvents > 0 { - li, err := p.getLastIndexWithTx(tx) - switch { - case errors.Is(err, db.ErrNotFound): - lastIndex = -1 - - case err != nil: - tx.Rollback() - return err - - default: - lastIndex = int64(li) + shouldRollback := true + defer func() { + if shouldRollback { + if errRollback := tx.Rollback(); errRollback != nil { + log.Errorf("error while rolling back tx %v", errRollback) + } } - } + }() + if _, err := tx.Exec(`INSERT INTO block (num) VALUES ($1)`, block.Num); err != nil { + return err + } for _, e := range block.Events { event, ok := e.(Event) if !ok { - log.Errorf("unexpected type %T in events", e) - } - if int64(event.L1InfoTreeIndex) < lastIndex { - continue - } - lastIndex = int64(event.L1InfoTreeIndex) - if err := tx.Put( - gerTable, - common.Uint32ToBytes(event.L1InfoTreeIndex), - event.GlobalExitRoot[:], - ); err != nil { - tx.Rollback() - - return err - } - } - - if lenEvents > 0 { - firstEvent, ok := block.Events[0].(Event) - if !ok { - log.Errorf("unexpected type %T in events", block.Events[0]) - tx.Rollback() - - return fmt.Errorf("unexpected type %T in events", block.Events[0]) - } - - lastEvent, ok := block.Events[lenEvents-1].(Event) - if !ok { - log.Errorf("unexpected type %T in events", block.Events[lenEvents-1]) - tx.Rollback() - - return fmt.Errorf("unexpected type %T in events", block.Events[lenEvents-1]) - } - - bwg := blockWithGERs{ - FirstIndex: firstEvent.L1InfoTreeIndex, - LastIndex: lastEvent.L1InfoTreeIndex + 1, + return errors.New("failed to convert sync.Block.Event to Event") } - - data, err := bwg.MarshalBinary() - if err != nil { - tx.Rollback() - - return err - } - if err = tx.Put(blockTable, common.Uint64ToBytes(block.Num), data); err != nil { - tx.Rollback() - + if err = meddler.Insert(tx, "imported_global_exit_root", &eventWithBlockNum{ + GlobalExitRoot: event.GlobalExitRoot, + L1InfoTreeIndex: event.L1InfoTreeIndex, + BlockNum: block.Num, + }); err != nil { return err } } - if err := p.updateLastProcessedBlockWithTx(tx, block.Num); err != nil { - tx.Rollback() - + if err := tx.Commit(); err != nil { return err } - - return tx.Commit() + shouldRollback = false + p.log.Debugf("processed %d events until block %d", len(block.Events), block.Num) + return nil } func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { - tx, err := p.db.BeginRw(ctx) - if err != nil { - return err - } - - iter, err := tx.Range(blockTable, common.Uint64ToBytes(firstReorgedBlock), nil) - if err != nil { - tx.Rollback() - - return err - } - for bNumBytes, bWithGERBytes, err := iter.Next(); bNumBytes != nil; bNumBytes, bWithGERBytes, err = iter.Next() { - if err != nil { - tx.Rollback() - - return err - } - if err := tx.Delete(blockTable, bNumBytes); err != nil { - tx.Rollback() - - return err - } - - bWithGER := &blockWithGERs{} - if err := bWithGER.UnmarshalBinary(bWithGERBytes); err != nil { - tx.Rollback() - - return err - } - for i := bWithGER.FirstIndex; i < bWithGER.LastIndex; i++ { - if err := tx.Delete(gerTable, common.Uint32ToBytes(i)); err != nil { - tx.Rollback() - - return err - } - } - } - - if err := p.updateLastProcessedBlockWithTx(tx, firstReorgedBlock-1); err != nil { - tx.Rollback() - - return err - } - - return tx.Commit() + _, err := p.db.Exec(`DELETE FROM block WHERE num >= $1;`, firstReorgedBlock) + return err } // GetFirstGERAfterL1InfoTreeIndex returns the first GER injected on the chain that is related to l1InfoTreeIndex // or greater func (p *processor) GetFirstGERAfterL1InfoTreeIndex( ctx context.Context, l1InfoTreeIndex uint32, -) (uint32, ethCommon.Hash, error) { - tx, err := p.db.BeginRo(ctx) - if err != nil { - return 0, ethCommon.Hash{}, err - } - defer tx.Rollback() - - iter, err := tx.Range(gerTable, common.Uint32ToBytes(l1InfoTreeIndex), nil) - if err != nil { - return 0, ethCommon.Hash{}, err - } - l1InfoIndexBytes, ger, err := iter.Next() +) (Event, error) { + e := Event{} + err := meddler.QueryRow(p.db, &e, ` + SELECT l1_info_tree_index, global_exit_root + FROM imported_global_exit_root + WHERE l1_info_tree_index >= $1 + ORDER BY l1_info_tree_index ASC LIMIT 1; + `, l1InfoTreeIndex) if err != nil { - return 0, ethCommon.Hash{}, err - } - if l1InfoIndexBytes == nil { - return 0, ethCommon.Hash{}, db.ErrNotFound + if errors.Is(err, sql.ErrNoRows) { + return e, db.ErrNotFound + } + return e, err } - - return common.BytesToUint32(l1InfoIndexBytes), ethCommon.BytesToHash(ger), nil + return e, nil } diff --git a/rpc/bridge.go b/rpc/bridge.go index 96394a4f..e9865108 100644 --- a/rpc/bridge.go +++ b/rpc/bridge.go @@ -132,11 +132,11 @@ func (b *BridgeEndpoints) InjectedInfoAfterIndex(networkID uint32, l1InfoTreeInd return info, nil } if networkID == b.networkID { - injectedL1InfoTreeIndex, _, err := b.injectedGERs.GetFirstGERAfterL1InfoTreeIndex(ctx, l1InfoTreeIndex) + e, err := b.injectedGERs.GetFirstGERAfterL1InfoTreeIndex(ctx, l1InfoTreeIndex) if err != nil { return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get global exit root, error: %s", err)) } - info, err := b.l1InfoTree.GetInfoByIndex(ctx, injectedL1InfoTreeIndex) + info, err := b.l1InfoTree.GetInfoByIndex(ctx, e.L1InfoTreeIndex) if err != nil { return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get global exit root, error: %s", err)) } diff --git a/rpc/bridge_interfaces.go b/rpc/bridge_interfaces.go index 84292e22..89929531 100644 --- a/rpc/bridge_interfaces.go +++ b/rpc/bridge_interfaces.go @@ -6,6 +6,7 @@ import ( "github.com/0xPolygon/cdk/claimsponsor" "github.com/0xPolygon/cdk/l1infotreesync" + "github.com/0xPolygon/cdk/lastgersync" tree "github.com/0xPolygon/cdk/tree/types" "github.com/ethereum/go-ethereum/common" ) @@ -18,7 +19,7 @@ type Bridger interface { type LastGERer interface { GetFirstGERAfterL1InfoTreeIndex( ctx context.Context, atOrAfterL1InfoTreeIndex uint32, - ) (injectedL1InfoTreeIndex uint32, ger common.Hash, err error) + ) (lastgersync.Event, error) } type L1InfoTreer interface { diff --git a/rpc/mocks/last_ge_rer.go b/rpc/mocks/last_ge_rer.go index d2e3068a..7b338e2e 100644 --- a/rpc/mocks/last_ge_rer.go +++ b/rpc/mocks/last_ge_rer.go @@ -5,8 +5,7 @@ package mocks import ( context "context" - common "github.com/ethereum/go-ethereum/common" - + lastgersync "github.com/0xPolygon/cdk/lastgersync" mock "github.com/stretchr/testify/mock" ) @@ -24,40 +23,31 @@ func (_m *LastGERer) EXPECT() *LastGERer_Expecter { } // GetFirstGERAfterL1InfoTreeIndex provides a mock function with given fields: ctx, atOrAfterL1InfoTreeIndex -func (_m *LastGERer) GetFirstGERAfterL1InfoTreeIndex(ctx context.Context, atOrAfterL1InfoTreeIndex uint32) (uint32, common.Hash, error) { +func (_m *LastGERer) GetFirstGERAfterL1InfoTreeIndex(ctx context.Context, atOrAfterL1InfoTreeIndex uint32) (lastgersync.Event, error) { ret := _m.Called(ctx, atOrAfterL1InfoTreeIndex) if len(ret) == 0 { panic("no return value specified for GetFirstGERAfterL1InfoTreeIndex") } - var r0 uint32 - var r1 common.Hash - var r2 error - if rf, ok := ret.Get(0).(func(context.Context, uint32) (uint32, common.Hash, error)); ok { + var r0 lastgersync.Event + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint32) (lastgersync.Event, error)); ok { return rf(ctx, atOrAfterL1InfoTreeIndex) } - if rf, ok := ret.Get(0).(func(context.Context, uint32) uint32); ok { + if rf, ok := ret.Get(0).(func(context.Context, uint32) lastgersync.Event); ok { r0 = rf(ctx, atOrAfterL1InfoTreeIndex) } else { - r0 = ret.Get(0).(uint32) + r0 = ret.Get(0).(lastgersync.Event) } - if rf, ok := ret.Get(1).(func(context.Context, uint32) common.Hash); ok { + if rf, ok := ret.Get(1).(func(context.Context, uint32) error); ok { r1 = rf(ctx, atOrAfterL1InfoTreeIndex) } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(common.Hash) - } - } - - if rf, ok := ret.Get(2).(func(context.Context, uint32) error); ok { - r2 = rf(ctx, atOrAfterL1InfoTreeIndex) - } else { - r2 = ret.Error(2) + r1 = ret.Error(1) } - return r0, r1, r2 + return r0, r1 } // LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFirstGERAfterL1InfoTreeIndex' @@ -79,12 +69,12 @@ func (_c *LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call) Run(run func(ctx conte return _c } -func (_c *LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call) Return(injectedL1InfoTreeIndex uint32, ger common.Hash, err error) *LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call { - _c.Call.Return(injectedL1InfoTreeIndex, ger, err) +func (_c *LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call) Return(_a0 lastgersync.Event, _a1 error) *LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call { + _c.Call.Return(_a0, _a1) return _c } -func (_c *LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call) RunAndReturn(run func(context.Context, uint32) (uint32, common.Hash, error)) *LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call { +func (_c *LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call) RunAndReturn(run func(context.Context, uint32) (lastgersync.Event, error)) *LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call { _c.Call.Return(run) return _c } From 0f3f691dbebb37556887681171ce04048b530f90 Mon Sep 17 00:00:00 2001 From: Arnau Bennassar Date: Tue, 5 Nov 2024 12:42:35 -0600 Subject: [PATCH 11/33] feat: use sqlite on claimsponsor (#157) * feat use sqlite on claimsponsor * wip * pass UTs * fix identation * fix identation * rm cover.out * rm tree migrations * make err a var --- claimsponsor/claimsponsor.go | 378 ++++++------------- claimsponsor/e2e_test.go | 6 +- claimsponsor/evmclaimsponsor.go | 2 +- claimsponsor/migrations/claimsponsor0001.sql | 20 + claimsponsor/migrations/migrations.go | 21 ++ rpc/bridge.go | 4 +- rpc/bridge_interfaces.go | 4 +- rpc/mocks/claim_sponsorer.go | 52 ++- 8 files changed, 192 insertions(+), 295 deletions(-) create mode 100644 claimsponsor/migrations/claimsponsor0001.sql create mode 100644 claimsponsor/migrations/migrations.go diff --git a/claimsponsor/claimsponsor.go b/claimsponsor/claimsponsor.go index c9df6561..32483789 100644 --- a/claimsponsor/claimsponsor.go +++ b/claimsponsor/claimsponsor.go @@ -2,56 +2,51 @@ package claimsponsor import ( "context" - "encoding/json" + "database/sql" "errors" - "math" + "fmt" "math/big" "time" - dbCommon "github.com/0xPolygon/cdk/common" + "github.com/0xPolygon/cdk/claimsponsor/migrations" "github.com/0xPolygon/cdk/db" "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sync" tree "github.com/0xPolygon/cdk/tree/types" "github.com/ethereum/go-ethereum/common" - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/iter" - "github.com/ledgerwatch/erigon-lib/kv/mdbx" + "github.com/russross/meddler" ) type ClaimStatus string const ( - PendingClaimStatus = "pending" - WIPStatus = "work in progress" - SuccessClaimStatus = "success" - FailedClaimStatus = "failed" - - claimTable = "claimsponsor-tx" - queueTable = "claimsponsor-queue" + PendingClaimStatus ClaimStatus = "pending" + WIPClaimStatus ClaimStatus = "work in progress" + SuccessClaimStatus ClaimStatus = "success" + FailedClaimStatus ClaimStatus = "failed" ) var ( - ErrInvalidClaim = errors.New("invalid claim") + ErrInvalidClaim = errors.New("invalid claim") + ErrClaimDoesntExist = errors.New("the claim requested to be updated does not exist") ) // Claim representation of a claim event type Claim struct { - LeafType uint8 - ProofLocalExitRoot tree.Proof - ProofRollupExitRoot tree.Proof - GlobalIndex *big.Int - MainnetExitRoot common.Hash - RollupExitRoot common.Hash - OriginNetwork uint32 - OriginTokenAddress common.Address - DestinationNetwork uint32 - DestinationAddress common.Address - Amount *big.Int - Metadata []byte - - Status ClaimStatus - TxID string + LeafType uint8 `meddler:"leaf_type"` + ProofLocalExitRoot tree.Proof `meddler:"proof_local_exit_root,merkleproof"` + ProofRollupExitRoot tree.Proof `meddler:"proof_rollup_exit_root,merkleproof"` + GlobalIndex *big.Int `meddler:"global_index,bigint"` + MainnetExitRoot common.Hash `meddler:"mainnet_exit_root,hash"` + RollupExitRoot common.Hash `meddler:"rollup_exit_root,hash"` + OriginNetwork uint32 `meddler:"origin_network"` + OriginTokenAddress common.Address `meddler:"origin_token_address,address"` + DestinationNetwork uint32 `meddler:"destination_network"` + DestinationAddress common.Address `meddler:"destination_address,address"` + Amount *big.Int `meddler:"amount,bigint"` + Metadata []byte `meddler:"metadata"` + Status ClaimStatus `meddler:"status"` + TxID string `meddler:"tx_id"` } func (c *Claim) Key() []byte { @@ -66,7 +61,7 @@ type ClaimSender interface { type ClaimSponsor struct { logger *log.Logger - db kv.RwDB + db *sql.DB sender ClaimSender rh *sync.RetryHandler waitTxToBeMinedPeriod time.Duration @@ -82,18 +77,11 @@ func newClaimSponsor( waitTxToBeMinedPeriod time.Duration, waitOnEmptyQueue time.Duration, ) (*ClaimSponsor, error) { - tableCfgFunc := func(defaultBuckets kv.TableCfg) kv.TableCfg { - cfg := kv.TableCfg{ - claimTable: {}, - queueTable: {}, - } - - return cfg + err := migrations.RunMigrations(dbPath) + if err != nil { + return nil, err } - db, err := mdbx.NewMDBX(nil). - Path(dbPath). - WithTableCfg(tableCfgFunc). - Open() + db, err := db.NewSQLiteDB(dbPath) if err != nil { return nil, err } @@ -115,264 +103,136 @@ func newClaimSponsor( func (c *ClaimSponsor) Start(ctx context.Context) { var ( attempts int - err error ) for { + err := c.claim(ctx) if err != nil { attempts++ + c.logger.Error(err) c.rh.Handle("claimsponsor main loop", attempts) + } else { + attempts = 0 } - tx, err2 := c.db.BeginRw(ctx) - if err2 != nil { - err = err2 - c.logger.Errorf("error calling BeginRw: %v", err) - continue - } - queueIndex, globalIndex, err2 := getFirstQueueIndex(tx) - if err2 != nil { - err = err2 - tx.Rollback() - if errors.Is(err, db.ErrNotFound) { - c.logger.Debugf("queue is empty") - err = nil - time.Sleep(c.waitOnEmptyQueue) - - continue - } - c.logger.Errorf("error calling getFirstQueueIndex: %v", err) - continue - } - claim, err2 := getClaim(tx, globalIndex) - if err2 != nil { - err = err2 - tx.Rollback() - c.logger.Errorf("error calling getClaim with globalIndex %s: %v", globalIndex.String(), err) - continue - } - if claim.TxID == "" { - txID, err2 := c.sender.sendClaim(ctx, claim) - if err2 != nil { - err = err2 - tx.Rollback() - c.logger.Errorf("error calling sendClaim with globalIndex %s: %v", globalIndex.String(), err) - continue - } - claim.TxID = txID - claim.Status = WIPStatus - err2 = putClaim(tx, claim) - if err2 != nil { - err = err2 - tx.Rollback() - c.logger.Errorf("error calling putClaim with globalIndex %s: %v", globalIndex.String(), err) - continue - } - } - err2 = tx.Commit() - if err2 != nil { - err = err2 - c.logger.Errorf("error calling tx.Commit after putting claim: %v", err) - continue - } - - c.logger.Infof("waiting for tx %s with global index %s to succeed or fail", claim.TxID, globalIndex.String()) - status, err2 := c.waitTxToBeSuccessOrFail(ctx, claim.TxID) - if err2 != nil { - err = err2 - c.logger.Errorf("error calling waitTxToBeSuccessOrFail for tx %s: %v", claim.TxID, err) - continue - } - c.logger.Infof("tx %s with global index %s concluded with status: %s", claim.TxID, globalIndex.String(), status) - tx, err2 = c.db.BeginRw(ctx) - if err2 != nil { - err = err2 - c.logger.Errorf("error calling BeginRw: %v", err) - continue - } - claim.Status = status - err2 = putClaim(tx, claim) - if err2 != nil { - err = err2 - tx.Rollback() - c.logger.Errorf("error calling putClaim with globalIndex %s: %v", globalIndex.String(), err) - continue - } - err2 = tx.Delete(queueTable, dbCommon.Uint64ToBytes(queueIndex)) - if err2 != nil { - err = err2 - tx.Rollback() - c.logger.Errorf("error calling delete on the queue table with index %d: %v", queueIndex, err) - continue - } - err2 = tx.Commit() - if err2 != nil { - err = err2 - c.logger.Errorf("error calling tx.Commit after putting claim: %v", err) - continue - } - - attempts = 0 } } -func (c *ClaimSponsor) waitTxToBeSuccessOrFail(ctx context.Context, txID string) (ClaimStatus, error) { - t := time.NewTicker(c.waitTxToBeMinedPeriod) - for { - select { - case <-ctx.Done(): - return "", errors.New("context cancelled") - case <-t.C: - status, err := c.sender.claimStatus(ctx, txID) - if err != nil { - return "", err - } - if status == FailedClaimStatus || status == SuccessClaimStatus { - return status, nil +func (c *ClaimSponsor) claim(ctx context.Context) error { + claim, err := c.getWIPClaim() + if err != nil && !errors.Is(err, db.ErrNotFound) { + return fmt.Errorf("error getting WIP claim: %w", err) + } + if errors.Is(err, db.ErrNotFound) || claim == nil { + // there is no WIP claim, go for the next pending claim + claim, err = c.getFirstPendingClaim() + if err != nil { + if errors.Is(err, db.ErrNotFound) { + c.logger.Debugf("queue is empty") + time.Sleep(c.waitOnEmptyQueue) + return nil } + return fmt.Errorf("error calling getClaim with globalIndex %s: %w", claim.GlobalIndex.String(), err) } - } -} - -func (c *ClaimSponsor) AddClaimToQueue(ctx context.Context, claim *Claim) error { - if claim.GlobalIndex == nil { - return ErrInvalidClaim - } - claim.Status = PendingClaimStatus - tx, err := c.db.BeginRw(ctx) - if err != nil { - return err - } - - _, err = getClaim(tx, claim.GlobalIndex) - if !errors.Is(err, db.ErrNotFound) { + txID, err := c.sender.sendClaim(ctx, claim) if err != nil { - tx.Rollback() - - return err - } else { - tx.Rollback() - - return errors.New("claim already added") + return fmt.Errorf("error getting sending claim: %w", err) + } + if err := c.updateClaimTxID(claim.GlobalIndex, txID); err != nil { + return fmt.Errorf("error updating claim txID: %w", err) } } - err = putClaim(tx, claim) - if err != nil { - tx.Rollback() - - return err - } - - var queuePosition uint64 - lastQueuePosition, _, err := getLastQueueIndex(tx) - switch { - case errors.Is(err, db.ErrNotFound): - queuePosition = 0 - - case err != nil: - tx.Rollback() - - return err - - default: - queuePosition = lastQueuePosition + 1 - } - - err = tx.Put(queueTable, dbCommon.Uint64ToBytes(queuePosition), claim.Key()) + c.logger.Infof("waiting for tx %s with global index %s to succeed or fail", claim.TxID, claim.GlobalIndex.String()) + status, err := c.waitTxToBeSuccessOrFail(ctx, claim.TxID) if err != nil { - tx.Rollback() - - return err + return fmt.Errorf("error calling waitTxToBeSuccessOrFail for tx %s: %w", claim.TxID, err) } - - return tx.Commit() + c.logger.Infof("tx %s with global index %s concluded with status: %s", claim.TxID, claim.GlobalIndex.String(), status) + return c.updateClaimStatus(claim.GlobalIndex, status) } -func putClaim(tx kv.RwTx, claim *Claim) error { - value, err := json.Marshal(claim) - if err != nil { - return err - } +func (c *ClaimSponsor) getWIPClaim() (*Claim, error) { + claim := &Claim{} + err := meddler.QueryRow( + c.db, claim, + `SELECT * FROM claim WHERE status = $1 ORDER BY rowid ASC LIMIT 1;`, + WIPClaimStatus, + ) + return claim, db.ReturnErrNotFound(err) +} - return tx.Put(claimTable, claim.Key(), value) +func (c *ClaimSponsor) getFirstPendingClaim() (*Claim, error) { + claim := &Claim{} + err := meddler.QueryRow( + c.db, claim, + `SELECT * FROM claim WHERE status = $1 ORDER BY rowid ASC LIMIT 1;`, + PendingClaimStatus, + ) + return claim, db.ReturnErrNotFound(err) } -func (c *ClaimSponsor) getClaimByQueueIndex(ctx context.Context, queueIndex uint64) (*Claim, error) { - tx, err := c.db.BeginRo(ctx) +func (c *ClaimSponsor) updateClaimTxID(globalIndex *big.Int, txID string) error { + res, err := c.db.Exec( + `UPDATE claim SET tx_id = $1 WHERE global_index = $2`, + txID, globalIndex.String(), + ) if err != nil { - return nil, err + return fmt.Errorf("error updating claim status: %w", err) } - defer tx.Rollback() - - globalIndexBytes, err := tx.GetOne(queueTable, dbCommon.Uint64ToBytes(queueIndex)) + rowsAff, err := res.RowsAffected() if err != nil { - return nil, err + return fmt.Errorf("error getting rows affected: %w", err) } - if globalIndexBytes == nil { - return nil, db.ErrNotFound + if rowsAff == 0 { + return ErrClaimDoesntExist } - - return getClaim(tx, new(big.Int).SetBytes(globalIndexBytes)) + return nil } -func getLastQueueIndex(tx kv.Tx) (uint64, *big.Int, error) { - iter, err := tx.RangeDescend( - queueTable, - dbCommon.Uint64ToBytes(math.MaxUint64), - dbCommon.Uint64ToBytes(0), 1, +func (c *ClaimSponsor) updateClaimStatus(globalIndex *big.Int, status ClaimStatus) error { + res, err := c.db.Exec( + `UPDATE claim SET status = $1 WHERE global_index = $2`, + status, globalIndex.String(), ) if err != nil { - return 0, nil, err + return fmt.Errorf("error updating claim status: %w", err) } - - return getIndex(iter) -} - -func getFirstQueueIndex(tx kv.Tx) (uint64, *big.Int, error) { - iter, err := tx.RangeAscend( - queueTable, - dbCommon.Uint64ToBytes(0), - nil, 1, - ) + rowsAff, err := res.RowsAffected() if err != nil { - return 0, nil, err + return fmt.Errorf("error getting rows affected: %w", err) } - - return getIndex(iter) -} - -func getIndex(iter iter.KV) (uint64, *big.Int, error) { - k, v, err := iter.Next() - if err != nil { - return 0, nil, err - } - if k == nil { - return 0, nil, db.ErrNotFound + if rowsAff == 0 { + return ErrClaimDoesntExist } - globalIndex := new(big.Int).SetBytes(v) - - return dbCommon.BytesToUint64(k), globalIndex, nil + return nil } -func (c *ClaimSponsor) GetClaim(ctx context.Context, globalIndex *big.Int) (*Claim, error) { - tx, err := c.db.BeginRo(ctx) - if err != nil { - return nil, err +func (c *ClaimSponsor) waitTxToBeSuccessOrFail(ctx context.Context, txID string) (ClaimStatus, error) { + t := time.NewTicker(c.waitTxToBeMinedPeriod) + for { + select { + case <-ctx.Done(): + return "", errors.New("context cancelled") + case <-t.C: + status, err := c.sender.claimStatus(ctx, txID) + if err != nil { + return "", err + } + if status == FailedClaimStatus || status == SuccessClaimStatus { + return status, nil + } + } } - defer tx.Rollback() +} - return getClaim(tx, globalIndex) +func (c *ClaimSponsor) AddClaimToQueue(claim *Claim) error { + claim.Status = PendingClaimStatus + return meddler.Insert(c.db, "claim", claim) } -func getClaim(tx kv.Tx, globalIndex *big.Int) (*Claim, error) { - claimBytes, err := tx.GetOne(claimTable, globalIndex.Bytes()) - if err != nil { - return nil, err - } - if claimBytes == nil { - return nil, db.ErrNotFound - } +func (c *ClaimSponsor) GetClaim(globalIndex *big.Int) (*Claim, error) { claim := &Claim{} - err = json.Unmarshal(claimBytes, claim) - - return claim, err + err := meddler.QueryRow( + c.db, claim, `SELECT * FROM claim WHERE global_index = $1`, globalIndex.String(), + ) + return claim, db.ReturnErrNotFound(err) } diff --git a/claimsponsor/e2e_test.go b/claimsponsor/e2e_test.go index 426d7b3e..dc61416e 100644 --- a/claimsponsor/e2e_test.go +++ b/claimsponsor/e2e_test.go @@ -31,7 +31,7 @@ func TestE2EL1toEVML2(t *testing.T) { go bridgeSyncL1.Start(ctx) // start claim sponsor - dbPathClaimSponsor := t.TempDir() + dbPathClaimSponsor := path.Join(t.TempDir(), "file::memory:?cache=shared") claimer, err := claimsponsor.NewEVMClaimSponsor( log.GetDefaultLogger(), dbPathClaimSponsor, @@ -71,7 +71,7 @@ func TestE2EL1toEVML2(t *testing.T) { // Request to sponsor claim globalIndex := bridgesync.GenerateGlobalIndex(true, 0, uint32(i)) - err = claimer.AddClaimToQueue(ctx, &claimsponsor.Claim{ + err = claimer.AddClaimToQueue(&claimsponsor.Claim{ LeafType: 0, ProofLocalExitRoot: localProof, ProofRollupExitRoot: rollupProof, @@ -90,7 +90,7 @@ func TestE2EL1toEVML2(t *testing.T) { // Wait until success succeed := false for i := 0; i < 10; i++ { - claim, err := claimer.GetClaim(ctx, globalIndex) + claim, err := claimer.GetClaim(globalIndex) require.NoError(t, err) if claim.Status == claimsponsor.FailedClaimStatus { require.NoError(t, errors.New("claim failed")) diff --git a/claimsponsor/evmclaimsponsor.go b/claimsponsor/evmclaimsponsor.go index 12d0c4ca..6f315d94 100644 --- a/claimsponsor/evmclaimsponsor.go +++ b/claimsponsor/evmclaimsponsor.go @@ -168,7 +168,7 @@ func (c *EVMClaimSponsor) claimStatus(ctx context.Context, id string) (ClaimStat switch res.Status { case ethtxtypes.MonitoredTxStatusCreated, ethtxtypes.MonitoredTxStatusSent: - return WIPStatus, nil + return WIPClaimStatus, nil case ethtxtypes.MonitoredTxStatusFailed: return FailedClaimStatus, nil case ethtxtypes.MonitoredTxStatusMined, diff --git a/claimsponsor/migrations/claimsponsor0001.sql b/claimsponsor/migrations/claimsponsor0001.sql new file mode 100644 index 00000000..9e4586ea --- /dev/null +++ b/claimsponsor/migrations/claimsponsor0001.sql @@ -0,0 +1,20 @@ +-- +migrate Down +DROP TABLE IF EXISTS claim; + +-- +migrate Up +CREATE TABLE claim ( + leaf_type INT NOT NULL, + proof_local_exit_root VARCHAR NOT NULL, + proof_rollup_exit_root VARCHAR NOT NULL, + global_index VARCHAR NOT NULL, + mainnet_exit_root VARCHAR NOT NULL, + rollup_exit_root VARCHAR NOT NULL, + origin_network INT NOT NULL, + origin_token_address VARCHAR NOT NULL, + destination_network INT NOT NULL, + destination_address VARCHAR NOT NULL, + amount VARCHAR NOT NULL, + metadata VARCHAR, + status VARCHAR NOT NULL, + tx_id VARCHAR NOT NULL +); \ No newline at end of file diff --git a/claimsponsor/migrations/migrations.go b/claimsponsor/migrations/migrations.go new file mode 100644 index 00000000..9166b5b3 --- /dev/null +++ b/claimsponsor/migrations/migrations.go @@ -0,0 +1,21 @@ +package migrations + +import ( + _ "embed" + + "github.com/0xPolygon/cdk/db" + "github.com/0xPolygon/cdk/db/types" +) + +//go:embed claimsponsor0001.sql +var mig001 string + +func RunMigrations(dbPath string) error { + migrations := []types.Migration{ + { + ID: "claimsponsor0001", + SQL: mig001, + }, + } + return db.RunMigrations(dbPath, migrations) +} diff --git a/rpc/bridge.go b/rpc/bridge.go index e9865108..7b52ed73 100644 --- a/rpc/bridge.go +++ b/rpc/bridge.go @@ -229,7 +229,7 @@ func (b *BridgeEndpoints) SponsorClaim(claim claimsponsor.Claim) (interface{}, r fmt.Sprintf("this client only sponsors claims for network %d", b.networkID), ) } - if err := b.sponsor.AddClaimToQueue(ctx, &claim); err != nil { + if err := b.sponsor.AddClaimToQueue(&claim); err != nil { return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("error adding claim to the queue %s", err)) } return nil, nil @@ -250,7 +250,7 @@ func (b *BridgeEndpoints) GetSponsoredClaimStatus(globalIndex *big.Int) (interfa if b.sponsor == nil { return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, "this client does not support claim sponsoring") } - claim, err := b.sponsor.GetClaim(ctx, globalIndex) + claim, err := b.sponsor.GetClaim(globalIndex) if err != nil { return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get claim status, error: %s", err)) } diff --git a/rpc/bridge_interfaces.go b/rpc/bridge_interfaces.go index 89929531..bf6721ea 100644 --- a/rpc/bridge_interfaces.go +++ b/rpc/bridge_interfaces.go @@ -36,6 +36,6 @@ type L1InfoTreer interface { } type ClaimSponsorer interface { - AddClaimToQueue(ctx context.Context, claim *claimsponsor.Claim) error - GetClaim(ctx context.Context, globalIndex *big.Int) (*claimsponsor.Claim, error) + AddClaimToQueue(claim *claimsponsor.Claim) error + GetClaim(globalIndex *big.Int) (*claimsponsor.Claim, error) } diff --git a/rpc/mocks/claim_sponsorer.go b/rpc/mocks/claim_sponsorer.go index 59530955..9a9ef9b5 100644 --- a/rpc/mocks/claim_sponsorer.go +++ b/rpc/mocks/claim_sponsorer.go @@ -3,11 +3,9 @@ package mocks import ( - context "context" big "math/big" claimsponsor "github.com/0xPolygon/cdk/claimsponsor" - mock "github.com/stretchr/testify/mock" ) @@ -24,17 +22,17 @@ func (_m *ClaimSponsorer) EXPECT() *ClaimSponsorer_Expecter { return &ClaimSponsorer_Expecter{mock: &_m.Mock} } -// AddClaimToQueue provides a mock function with given fields: ctx, claim -func (_m *ClaimSponsorer) AddClaimToQueue(ctx context.Context, claim *claimsponsor.Claim) error { - ret := _m.Called(ctx, claim) +// AddClaimToQueue provides a mock function with given fields: claim +func (_m *ClaimSponsorer) AddClaimToQueue(claim *claimsponsor.Claim) error { + ret := _m.Called(claim) if len(ret) == 0 { panic("no return value specified for AddClaimToQueue") } var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *claimsponsor.Claim) error); ok { - r0 = rf(ctx, claim) + if rf, ok := ret.Get(0).(func(*claimsponsor.Claim) error); ok { + r0 = rf(claim) } else { r0 = ret.Error(0) } @@ -48,15 +46,14 @@ type ClaimSponsorer_AddClaimToQueue_Call struct { } // AddClaimToQueue is a helper method to define mock.On call -// - ctx context.Context // - claim *claimsponsor.Claim -func (_e *ClaimSponsorer_Expecter) AddClaimToQueue(ctx interface{}, claim interface{}) *ClaimSponsorer_AddClaimToQueue_Call { - return &ClaimSponsorer_AddClaimToQueue_Call{Call: _e.mock.On("AddClaimToQueue", ctx, claim)} +func (_e *ClaimSponsorer_Expecter) AddClaimToQueue(claim interface{}) *ClaimSponsorer_AddClaimToQueue_Call { + return &ClaimSponsorer_AddClaimToQueue_Call{Call: _e.mock.On("AddClaimToQueue", claim)} } -func (_c *ClaimSponsorer_AddClaimToQueue_Call) Run(run func(ctx context.Context, claim *claimsponsor.Claim)) *ClaimSponsorer_AddClaimToQueue_Call { +func (_c *ClaimSponsorer_AddClaimToQueue_Call) Run(run func(claim *claimsponsor.Claim)) *ClaimSponsorer_AddClaimToQueue_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(*claimsponsor.Claim)) + run(args[0].(*claimsponsor.Claim)) }) return _c } @@ -66,14 +63,14 @@ func (_c *ClaimSponsorer_AddClaimToQueue_Call) Return(_a0 error) *ClaimSponsorer return _c } -func (_c *ClaimSponsorer_AddClaimToQueue_Call) RunAndReturn(run func(context.Context, *claimsponsor.Claim) error) *ClaimSponsorer_AddClaimToQueue_Call { +func (_c *ClaimSponsorer_AddClaimToQueue_Call) RunAndReturn(run func(*claimsponsor.Claim) error) *ClaimSponsorer_AddClaimToQueue_Call { _c.Call.Return(run) return _c } -// GetClaim provides a mock function with given fields: ctx, globalIndex -func (_m *ClaimSponsorer) GetClaim(ctx context.Context, globalIndex *big.Int) (*claimsponsor.Claim, error) { - ret := _m.Called(ctx, globalIndex) +// GetClaim provides a mock function with given fields: globalIndex +func (_m *ClaimSponsorer) GetClaim(globalIndex *big.Int) (*claimsponsor.Claim, error) { + ret := _m.Called(globalIndex) if len(ret) == 0 { panic("no return value specified for GetClaim") @@ -81,19 +78,19 @@ func (_m *ClaimSponsorer) GetClaim(ctx context.Context, globalIndex *big.Int) (* var r0 *claimsponsor.Claim var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *big.Int) (*claimsponsor.Claim, error)); ok { - return rf(ctx, globalIndex) + if rf, ok := ret.Get(0).(func(*big.Int) (*claimsponsor.Claim, error)); ok { + return rf(globalIndex) } - if rf, ok := ret.Get(0).(func(context.Context, *big.Int) *claimsponsor.Claim); ok { - r0 = rf(ctx, globalIndex) + if rf, ok := ret.Get(0).(func(*big.Int) *claimsponsor.Claim); ok { + r0 = rf(globalIndex) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*claimsponsor.Claim) } } - if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { - r1 = rf(ctx, globalIndex) + if rf, ok := ret.Get(1).(func(*big.Int) error); ok { + r1 = rf(globalIndex) } else { r1 = ret.Error(1) } @@ -107,15 +104,14 @@ type ClaimSponsorer_GetClaim_Call struct { } // GetClaim is a helper method to define mock.On call -// - ctx context.Context // - globalIndex *big.Int -func (_e *ClaimSponsorer_Expecter) GetClaim(ctx interface{}, globalIndex interface{}) *ClaimSponsorer_GetClaim_Call { - return &ClaimSponsorer_GetClaim_Call{Call: _e.mock.On("GetClaim", ctx, globalIndex)} +func (_e *ClaimSponsorer_Expecter) GetClaim(globalIndex interface{}) *ClaimSponsorer_GetClaim_Call { + return &ClaimSponsorer_GetClaim_Call{Call: _e.mock.On("GetClaim", globalIndex)} } -func (_c *ClaimSponsorer_GetClaim_Call) Run(run func(ctx context.Context, globalIndex *big.Int)) *ClaimSponsorer_GetClaim_Call { +func (_c *ClaimSponsorer_GetClaim_Call) Run(run func(globalIndex *big.Int)) *ClaimSponsorer_GetClaim_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(*big.Int)) + run(args[0].(*big.Int)) }) return _c } @@ -125,7 +121,7 @@ func (_c *ClaimSponsorer_GetClaim_Call) Return(_a0 *claimsponsor.Claim, _a1 erro return _c } -func (_c *ClaimSponsorer_GetClaim_Call) RunAndReturn(run func(context.Context, *big.Int) (*claimsponsor.Claim, error)) *ClaimSponsorer_GetClaim_Call { +func (_c *ClaimSponsorer_GetClaim_Call) RunAndReturn(run func(*big.Int) (*claimsponsor.Claim, error)) *ClaimSponsorer_GetClaim_Call { _c.Call.Return(run) return _c } From effc267b047ab814f1a4a0e32c4f48a7fe01a5f8 Mon Sep 17 00:00:00 2001 From: Victor Castell <0x@vcastellm.xyz> Date: Wed, 6 Nov 2024 07:37:41 +0000 Subject: [PATCH 12/33] chore: update versions --- crates/cdk/versions.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/cdk/versions.json b/crates/cdk/versions.json index 7cbe12dd..0ee84361 100644 --- a/crates/cdk/versions.json +++ b/crates/cdk/versions.json @@ -1,5 +1,5 @@ { - "agglayer_image": "ghcr.io/agglayer/agglayer:feature-storage-adding-epoch-packing", + "agglayer_image": "ghcr.io/agglayer/agglayer:0.2.0", "cdk_erigon_node_image": "hermeznetwork/cdk-erigon:v2.1.1", "cdk_node_image": "ghcr.io/0xpolygon/cdk:0.4.0-beta1", "cdk_validium_node_image": "0xpolygon/cdk-validium-node:0.7.0-cdk", From a5422d2de3afc45de6f933208fa72162f98387ac Mon Sep 17 00:00:00 2001 From: Victor Castell <0x@vcastellm.xyz> Date: Wed, 6 Nov 2024 07:42:54 +0000 Subject: [PATCH 13/33] chore: bump cdk-erigon to v2.1.2 --- test/combinations/fork11-rollup.yml | 2 +- test/combinations/fork12-cdk-validium.yml | 2 +- test/combinations/fork12-rollup.yml | 2 +- test/combinations/fork9-cdk-validium.yml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/test/combinations/fork11-rollup.yml b/test/combinations/fork11-rollup.yml index 1afd8f79..fb941760 100644 --- a/test/combinations/fork11-rollup.yml +++ b/test/combinations/fork11-rollup.yml @@ -1,7 +1,7 @@ args: zkevm_contracts_image: leovct/zkevm-contracts:v7.0.0-rc.2-fork.11 zkevm_prover_image: hermeznetwork/zkevm-prover:v7.0.2-fork.11 - cdk_erigon_node_image: hermeznetwork/cdk-erigon:v2.1.0 + cdk_erigon_node_image: hermeznetwork/cdk-erigon:v2.1.2 zkevm_node_image: hermeznetwork/zkevm-node:v0.7.0-fork11-RC1 cdk_node_image: cdk zkevm_use_gas_token_contract: true diff --git a/test/combinations/fork12-cdk-validium.yml b/test/combinations/fork12-cdk-validium.yml index ed618754..9619b0f9 100644 --- a/test/combinations/fork12-cdk-validium.yml +++ b/test/combinations/fork12-cdk-validium.yml @@ -1,7 +1,7 @@ args: zkevm_contracts_image: leovct/zkevm-contracts:v8.0.0-rc.4-fork.12 zkevm_prover_image: hermeznetwork/zkevm-prover:v8.0.0-RC12-fork.12 - cdk_erigon_node_image: hermeznetwork/cdk-erigon:v2.1.0 + cdk_erigon_node_image: hermeznetwork/cdk-erigon:v2.1.2 cdk_node_image: cdk zkevm_use_gas_token_contract: true data_availability_mode: cdk-validium diff --git a/test/combinations/fork12-rollup.yml b/test/combinations/fork12-rollup.yml index c97a25cf..95a5111a 100644 --- a/test/combinations/fork12-rollup.yml +++ b/test/combinations/fork12-rollup.yml @@ -1,7 +1,7 @@ args: zkevm_contracts_image: leovct/zkevm-contracts:v8.0.0-rc.4-fork.12 zkevm_prover_image: hermeznetwork/zkevm-prover:v8.0.0-RC12-fork.12 - cdk_erigon_node_image: hermeznetwork/cdk-erigon:v2.1.0 + cdk_erigon_node_image: hermeznetwork/cdk-erigon:v2.1.2 cdk_node_image: cdk zkevm_use_gas_token_contract: true data_availability_mode: rollup diff --git a/test/combinations/fork9-cdk-validium.yml b/test/combinations/fork9-cdk-validium.yml index c28b2c49..e0543654 100644 --- a/test/combinations/fork9-cdk-validium.yml +++ b/test/combinations/fork9-cdk-validium.yml @@ -1,7 +1,7 @@ args: zkevm_contracts_image: leovct/zkevm-contracts:v6.0.0-rc.1-fork.9 zkevm_prover_image: hermeznetwork/zkevm-prover:v6.0.6 - cdk_erigon_node_image: hermeznetwork/cdk-erigon:v2.1.0 + cdk_erigon_node_image: hermeznetwork/cdk-erigon:v2.1.2 zkevm_node_image: hermeznetwork/zkevm-node:v0.7.3-RC1 cdk_validium_node_image: 0xpolygon/cdk-validium-node:0.7.0-cdk cdk_node_image: cdk From 910b23bbac1c45002eaeaae8ef51f13f00e49a20 Mon Sep 17 00:00:00 2001 From: Victor Castell <0x@vcastellm.xyz> Date: Wed, 6 Nov 2024 07:45:55 +0000 Subject: [PATCH 14/33] Revert "chore: bump cdk-erigon to v2.1.2" This reverts commit a5422d2de3afc45de6f933208fa72162f98387ac. --- test/combinations/fork11-rollup.yml | 2 +- test/combinations/fork12-cdk-validium.yml | 2 +- test/combinations/fork12-rollup.yml | 2 +- test/combinations/fork9-cdk-validium.yml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/test/combinations/fork11-rollup.yml b/test/combinations/fork11-rollup.yml index fb941760..1afd8f79 100644 --- a/test/combinations/fork11-rollup.yml +++ b/test/combinations/fork11-rollup.yml @@ -1,7 +1,7 @@ args: zkevm_contracts_image: leovct/zkevm-contracts:v7.0.0-rc.2-fork.11 zkevm_prover_image: hermeznetwork/zkevm-prover:v7.0.2-fork.11 - cdk_erigon_node_image: hermeznetwork/cdk-erigon:v2.1.2 + cdk_erigon_node_image: hermeznetwork/cdk-erigon:v2.1.0 zkevm_node_image: hermeznetwork/zkevm-node:v0.7.0-fork11-RC1 cdk_node_image: cdk zkevm_use_gas_token_contract: true diff --git a/test/combinations/fork12-cdk-validium.yml b/test/combinations/fork12-cdk-validium.yml index 9619b0f9..ed618754 100644 --- a/test/combinations/fork12-cdk-validium.yml +++ b/test/combinations/fork12-cdk-validium.yml @@ -1,7 +1,7 @@ args: zkevm_contracts_image: leovct/zkevm-contracts:v8.0.0-rc.4-fork.12 zkevm_prover_image: hermeznetwork/zkevm-prover:v8.0.0-RC12-fork.12 - cdk_erigon_node_image: hermeznetwork/cdk-erigon:v2.1.2 + cdk_erigon_node_image: hermeznetwork/cdk-erigon:v2.1.0 cdk_node_image: cdk zkevm_use_gas_token_contract: true data_availability_mode: cdk-validium diff --git a/test/combinations/fork12-rollup.yml b/test/combinations/fork12-rollup.yml index 95a5111a..c97a25cf 100644 --- a/test/combinations/fork12-rollup.yml +++ b/test/combinations/fork12-rollup.yml @@ -1,7 +1,7 @@ args: zkevm_contracts_image: leovct/zkevm-contracts:v8.0.0-rc.4-fork.12 zkevm_prover_image: hermeznetwork/zkevm-prover:v8.0.0-RC12-fork.12 - cdk_erigon_node_image: hermeznetwork/cdk-erigon:v2.1.2 + cdk_erigon_node_image: hermeznetwork/cdk-erigon:v2.1.0 cdk_node_image: cdk zkevm_use_gas_token_contract: true data_availability_mode: rollup diff --git a/test/combinations/fork9-cdk-validium.yml b/test/combinations/fork9-cdk-validium.yml index e0543654..c28b2c49 100644 --- a/test/combinations/fork9-cdk-validium.yml +++ b/test/combinations/fork9-cdk-validium.yml @@ -1,7 +1,7 @@ args: zkevm_contracts_image: leovct/zkevm-contracts:v6.0.0-rc.1-fork.9 zkevm_prover_image: hermeznetwork/zkevm-prover:v6.0.6 - cdk_erigon_node_image: hermeznetwork/cdk-erigon:v2.1.2 + cdk_erigon_node_image: hermeznetwork/cdk-erigon:v2.1.0 zkevm_node_image: hermeznetwork/zkevm-node:v0.7.3-RC1 cdk_validium_node_image: 0xpolygon/cdk-validium-node:0.7.0-cdk cdk_node_image: cdk From 85c5735ee0ca0b873725b63c957ded958228187f Mon Sep 17 00:00:00 2001 From: Victor Castell <0x@vcastellm.xyz> Date: Wed, 6 Nov 2024 15:35:04 +0000 Subject: [PATCH 15/33] apply feedback --- crates/cdk/build.rs | 20 ++++++-------------- crates/cdk/versions.json | 2 +- 2 files changed, 7 insertions(+), 15 deletions(-) diff --git a/crates/cdk/build.rs b/crates/cdk/build.rs index 802b68c0..cceff95c 100644 --- a/crates/cdk/build.rs +++ b/crates/cdk/build.rs @@ -67,7 +67,7 @@ fn build_versions() -> std::io::Result<()> { let mut file = File::create(&dest_path)?; file.write_all(content.as_bytes())?; - // Get lines 28 to 40 from the contents of the starlark file + // Get the corresponding lines from the contents of the starlark file let versions = content .lines() .skip(30) @@ -84,8 +84,6 @@ fn build_versions() -> std::io::Result<()> { // Replace the trailing comma on the last line let versions = versions.replace(", }", " }"); - print!("{}", versions); - // The versions string is a JSON object we can parse let versions_json: serde_json::Value = serde_json::from_str(&versions).unwrap(); @@ -93,18 +91,12 @@ fn build_versions() -> std::io::Result<()> { let dest_path = Path::new(".").join("versions.json"); let mut file = File::create(&dest_path)?; file.write_all( - serde_json::to_string_pretty(&versions_json) - .unwrap() - .as_bytes(), + format!( + "{}\n", + serde_json::to_string_pretty(&versions_json).unwrap() + ) + .as_bytes(), )?; - // Optionally, print the output of the make command - println!("cargo:rerun-if-changed=build.rs"); - - // Here you can also add additional commands to inform Cargo about - // how to rerun the build script. For example, to rerun this script - // only when a specific file changes: - // println!("cargo:rerun-if-changed=path/to/file"); - Ok(()) } diff --git a/crates/cdk/versions.json b/crates/cdk/versions.json index 0ee84361..13e1c430 100644 --- a/crates/cdk/versions.json +++ b/crates/cdk/versions.json @@ -12,4 +12,4 @@ "zkevm_pool_manager_image": "hermeznetwork/zkevm-pool-manager:v0.1.1", "zkevm_prover_image": "hermeznetwork/zkevm-prover:v8.0.0-RC14-fork.12", "zkevm_sequence_sender_image": "hermeznetwork/zkevm-sequence-sender:v0.2.4" -} \ No newline at end of file +} From 5bee873df5d06971d1c7c9545dfc2649b140bc5d Mon Sep 17 00:00:00 2001 From: Victor Castell Date: Wed, 6 Nov 2024 16:36:58 +0100 Subject: [PATCH 16/33] Reapply "chore: bump cdk-erigon to v2.1.2" (#162) This reverts commit 910b23bbac1c45002eaeaae8ef51f13f00e49a20. --- test/combinations/fork11-rollup.yml | 2 +- test/combinations/fork12-cdk-validium.yml | 2 +- test/combinations/fork12-rollup.yml | 2 +- test/combinations/fork9-cdk-validium.yml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/test/combinations/fork11-rollup.yml b/test/combinations/fork11-rollup.yml index 1afd8f79..fb941760 100644 --- a/test/combinations/fork11-rollup.yml +++ b/test/combinations/fork11-rollup.yml @@ -1,7 +1,7 @@ args: zkevm_contracts_image: leovct/zkevm-contracts:v7.0.0-rc.2-fork.11 zkevm_prover_image: hermeznetwork/zkevm-prover:v7.0.2-fork.11 - cdk_erigon_node_image: hermeznetwork/cdk-erigon:v2.1.0 + cdk_erigon_node_image: hermeznetwork/cdk-erigon:v2.1.2 zkevm_node_image: hermeznetwork/zkevm-node:v0.7.0-fork11-RC1 cdk_node_image: cdk zkevm_use_gas_token_contract: true diff --git a/test/combinations/fork12-cdk-validium.yml b/test/combinations/fork12-cdk-validium.yml index ed618754..9619b0f9 100644 --- a/test/combinations/fork12-cdk-validium.yml +++ b/test/combinations/fork12-cdk-validium.yml @@ -1,7 +1,7 @@ args: zkevm_contracts_image: leovct/zkevm-contracts:v8.0.0-rc.4-fork.12 zkevm_prover_image: hermeznetwork/zkevm-prover:v8.0.0-RC12-fork.12 - cdk_erigon_node_image: hermeznetwork/cdk-erigon:v2.1.0 + cdk_erigon_node_image: hermeznetwork/cdk-erigon:v2.1.2 cdk_node_image: cdk zkevm_use_gas_token_contract: true data_availability_mode: cdk-validium diff --git a/test/combinations/fork12-rollup.yml b/test/combinations/fork12-rollup.yml index c97a25cf..95a5111a 100644 --- a/test/combinations/fork12-rollup.yml +++ b/test/combinations/fork12-rollup.yml @@ -1,7 +1,7 @@ args: zkevm_contracts_image: leovct/zkevm-contracts:v8.0.0-rc.4-fork.12 zkevm_prover_image: hermeznetwork/zkevm-prover:v8.0.0-RC12-fork.12 - cdk_erigon_node_image: hermeznetwork/cdk-erigon:v2.1.0 + cdk_erigon_node_image: hermeznetwork/cdk-erigon:v2.1.2 cdk_node_image: cdk zkevm_use_gas_token_contract: true data_availability_mode: rollup diff --git a/test/combinations/fork9-cdk-validium.yml b/test/combinations/fork9-cdk-validium.yml index c28b2c49..e0543654 100644 --- a/test/combinations/fork9-cdk-validium.yml +++ b/test/combinations/fork9-cdk-validium.yml @@ -1,7 +1,7 @@ args: zkevm_contracts_image: leovct/zkevm-contracts:v6.0.0-rc.1-fork.9 zkevm_prover_image: hermeznetwork/zkevm-prover:v6.0.6 - cdk_erigon_node_image: hermeznetwork/cdk-erigon:v2.1.0 + cdk_erigon_node_image: hermeznetwork/cdk-erigon:v2.1.2 zkevm_node_image: hermeznetwork/zkevm-node:v0.7.3-RC1 cdk_validium_node_image: 0xpolygon/cdk-validium-node:0.7.0-cdk cdk_node_image: cdk From c0724a0aa8365e2e87a9741301dcbf28b1ef7889 Mon Sep 17 00:00:00 2001 From: Victor Castell <0x@vcastellm.xyz> Date: Thu, 7 Nov 2024 07:50:55 +0000 Subject: [PATCH 17/33] bump versions --- crates/cdk/versions.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/cdk/versions.json b/crates/cdk/versions.json index 13e1c430..36f2af1f 100644 --- a/crates/cdk/versions.json +++ b/crates/cdk/versions.json @@ -1,7 +1,7 @@ { - "agglayer_image": "ghcr.io/agglayer/agglayer:0.2.0", - "cdk_erigon_node_image": "hermeznetwork/cdk-erigon:v2.1.1", - "cdk_node_image": "ghcr.io/0xpolygon/cdk:0.4.0-beta1", + "agglayer_image": "ghcr.io/agglayer/agglayer:0.2.0-rc.5", + "cdk_erigon_node_image": "hermeznetwork/cdk-erigon:v2.1.2", + "cdk_node_image": "ghcr.io/0xpolygon/cdk:0.4.0-beta4", "cdk_validium_node_image": "0xpolygon/cdk-validium-node:0.7.0-cdk", "zkevm_bridge_proxy_image": "haproxy:3.0-bookworm", "zkevm_bridge_service_image": "hermeznetwork/zkevm-bridge-service:v0.6.0-RC1", From d7d994783362b45d7876f9aca683479906678b61 Mon Sep 17 00:00:00 2001 From: rbpol Date: Thu, 7 Nov 2024 14:24:14 +0000 Subject: [PATCH 18/33] feat: Use ListOffChainData instead of GetOffChainData (#152) --- .../datacommittee/datacommittee.go | 57 +++++++++---------- 1 file changed, 26 insertions(+), 31 deletions(-) diff --git a/dataavailability/datacommittee/datacommittee.go b/dataavailability/datacommittee/datacommittee.go index 01b96a13..474c5934 100644 --- a/dataavailability/datacommittee/datacommittee.go +++ b/dataavailability/datacommittee/datacommittee.go @@ -107,28 +107,16 @@ func (d *Backend) Init() error { // GetSequence gets backend data one hash at a time. This should be optimized on the DAC side to get them all at once. func (d *Backend) GetSequence(_ context.Context, hashes []common.Hash, _ []byte) ([][]byte, error) { - // TODO: optimize this on the DAC side by implementing a multi batch retrieve api) - batchData := make([][]byte, 0, len(hashes)) - for _, h := range hashes { - data, err := d.GetBatchL2Data(h) - if err != nil { - return nil, err - } - batchData = append(batchData, data) - } - - return batchData, nil -} - -// GetBatchL2Data returns the data from the DAC. It checks that it matches with the expected hash -func (d *Backend) GetBatchL2Data(hash common.Hash) ([]byte, error) { intialMember := d.selectedCommitteeMember - found := false + + var found bool for !found && intialMember != -1 { member := d.committeeMembers[d.selectedCommitteeMember] d.logger.Infof("trying to get data from %s at %s", member.Addr.Hex(), member.URL) + c := d.dataCommitteeClientFactory.New(member.URL) - data, err := c.GetOffChainData(d.ctx, hash) + + dataMap, err := c.ListOffChainData(d.ctx, hashes) if err != nil { d.logger.Warnf( "error getting data from DAC node %s at %s: %s", @@ -141,25 +129,32 @@ func (d *Backend) GetBatchL2Data(hash common.Hash) ([]byte, error) { continue } - actualTransactionsHash := crypto.Keccak256Hash(data) - if actualTransactionsHash != hash { - unexpectedHash := fmt.Errorf( - unexpectedHashTemplate, hash, actualTransactionsHash, - ) - d.logger.Warnf( - "error getting data from DAC node %s at %s: %s", - member.Addr.Hex(), member.URL, unexpectedHash, - ) - d.selectedCommitteeMember = (d.selectedCommitteeMember + 1) % len(d.committeeMembers) - if d.selectedCommitteeMember == intialMember { - break + + batchData := make([][]byte, 0, len(hashes)) + for _, hash := range hashes { + actualTransactionsHash := crypto.Keccak256Hash(dataMap[hash]) + if actualTransactionsHash != hash { + unexpectedHash := fmt.Errorf( + unexpectedHashTemplate, hash, actualTransactionsHash, + ) + d.logger.Warnf( + "error getting data from DAC node %s at %s: %s", + member.Addr.Hex(), member.URL, unexpectedHash, + ) + d.selectedCommitteeMember = (d.selectedCommitteeMember + 1) % len(d.committeeMembers) + if d.selectedCommitteeMember == intialMember { + break + } + + continue } - continue + batchData = append(batchData, dataMap[hash]) } - return data, nil + return batchData, nil } + if err := d.Init(); err != nil { return nil, fmt.Errorf("error loading data committee: %w", err) } From d9aa92a15eec307545b35d3f5d5be78202a7b588 Mon Sep 17 00:00:00 2001 From: Arnau Bennassar Date: Thu, 7 Nov 2024 09:11:19 -0600 Subject: [PATCH 19/33] feat: sqlite reorgdetector (#160) * wip * implementation * fix tests * wip * mdbx is gone * increase coverage * remove ifElseChain from golangci * remove ifElseChain from golangci * remove ifElseChain from golangci * increase coverage * increase coverage * identation * identation * identation * fix kurtosis config --- .golangci.yml | 2 + aggregator/aggregator.go | 1 - bridgesync/claimcalldata_test.go | 32 +++++ bridgesync/e2e_test.go | 2 +- config/default.go | 5 +- go.mod | 9 -- go.sum | 21 --- l1infotreesync/e2e_test.go | 4 +- reorgdetector/migrations/migrations.go | 21 +++ .../migrations/reorgdetector0001.sql | 11 ++ reorgdetector/reorgdetector.go | 43 ++++--- reorgdetector/reorgdetector_db.go | 120 +++++++----------- reorgdetector/reorgdetector_test.go | 68 +++++++++- reorgdetector/types.go | 10 +- rpc/bridge.go | 2 - test/aggoraclehelpers/aggoracle_e2e.go | 2 +- .../kurtosis-cdk-node-config.toml.template | 2 +- 17 files changed, 219 insertions(+), 136 deletions(-) create mode 100644 reorgdetector/migrations/migrations.go create mode 100644 reorgdetector/migrations/reorgdetector0001.sql diff --git a/.golangci.yml b/.golangci.yml index 98197d74..00f17235 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -49,6 +49,8 @@ linters-settings: gocritic: enabled-checks: - ruleguard + disabled-checks: + - ifElseChain revive: rules: - name: exported diff --git a/aggregator/aggregator.go b/aggregator/aggregator.go index 8aa78011..3541aaaf 100644 --- a/aggregator/aggregator.go +++ b/aggregator/aggregator.go @@ -711,7 +711,6 @@ func (a *Aggregator) validateEligibleFinalProof( batchNumberToVerify := lastVerifiedBatchNum + 1 if proof.BatchNumber != batchNumberToVerify { - //nolint:gocritic if proof.BatchNumber < batchNumberToVerify && proof.BatchNumberFinal >= batchNumberToVerify { // We have a proof that contains some batches below the last batch verified, anyway can be eligible as final proof diff --git a/bridgesync/claimcalldata_test.go b/bridgesync/claimcalldata_test.go index a4ab49de..ef2d60bd 100644 --- a/bridgesync/claimcalldata_test.go +++ b/bridgesync/claimcalldata_test.go @@ -127,6 +127,7 @@ func TestClaimCalldata(t *testing.T) { require.NoError(t, err) time.Sleep(1 * time.Second) r, err := client.TransactionReceipt(ctx, tx.Hash()) + require.NoError(t, err) testCases = append(testCases, testCase{ description: "direct call to claim asset", bridgeAddr: bridgeAddr, @@ -155,6 +156,7 @@ func TestClaimCalldata(t *testing.T) { require.NoError(t, err) time.Sleep(1 * time.Second) r, err = client.TransactionReceipt(ctx, tx.Hash()) + require.NoError(t, err) testCases = append(testCases, testCase{ description: "indirect call to claim asset", bridgeAddr: bridgeAddr, @@ -188,6 +190,7 @@ func TestClaimCalldata(t *testing.T) { require.NoError(t, err) time.Sleep(1 * time.Second) r, err = client.TransactionReceipt(ctx, tx.Hash()) + require.NoError(t, err) testCases = append(testCases, testCase{ description: "indirect call to claim asset bytes", bridgeAddr: bridgeAddr, @@ -215,6 +218,7 @@ func TestClaimCalldata(t *testing.T) { require.NoError(t, err) time.Sleep(1 * time.Second) r, err = client.TransactionReceipt(ctx, tx.Hash()) + require.NoError(t, err) testCases = append(testCases, testCase{ description: "direct call to claim message", bridgeAddr: bridgeAddr, @@ -243,6 +247,7 @@ func TestClaimCalldata(t *testing.T) { require.NoError(t, err) time.Sleep(1 * time.Second) r, err = client.TransactionReceipt(ctx, tx.Hash()) + require.NoError(t, err) testCases = append(testCases, testCase{ description: "indirect call to claim message", bridgeAddr: bridgeAddr, @@ -276,6 +281,7 @@ func TestClaimCalldata(t *testing.T) { require.NoError(t, err) time.Sleep(1 * time.Second) r, err = client.TransactionReceipt(ctx, tx.Hash()) + require.NoError(t, err) testCases = append(testCases, testCase{ description: "indirect call to claim message bytes", bridgeAddr: bridgeAddr, @@ -309,6 +315,7 @@ func TestClaimCalldata(t *testing.T) { require.NoError(t, err) time.Sleep(1 * time.Second) r, err = client.TransactionReceipt(ctx, tx.Hash()) + require.NoError(t, err) log.Infof("%+v", r.Logs) reverted := [2]bool{false, false} @@ -357,6 +364,7 @@ func TestClaimCalldata(t *testing.T) { require.NoError(t, err) time.Sleep(1 * time.Second) r, err = client.TransactionReceipt(ctx, tx.Hash()) + require.NoError(t, err) testCases = append(testCases, testCase{ description: "2 indirect call claim message 1 (same globalIndex)", bridgeAddr: bridgeAddr, @@ -414,6 +422,7 @@ func TestClaimCalldata(t *testing.T) { require.NoError(t, err) time.Sleep(1 * time.Second) r, err = client.TransactionReceipt(ctx, tx.Hash()) + require.NoError(t, err) testCases = append(testCases, testCase{ description: "2 indirect call claim message 1 (diff globalIndex)", bridgeAddr: bridgeAddr, @@ -473,6 +482,7 @@ func TestClaimCalldata(t *testing.T) { require.NoError(t, err) time.Sleep(1 * time.Second) r, err = client.TransactionReceipt(ctx, tx.Hash()) + require.NoError(t, err) testCases = append(testCases, testCase{ description: "2 indirect call claim message (same globalIndex) (1 ok, 1 reverted)", bridgeAddr: bridgeAddr, @@ -524,6 +534,7 @@ func TestClaimCalldata(t *testing.T) { require.NoError(t, err) time.Sleep(1 * time.Second) r, err = client.TransactionReceipt(ctx, tx.Hash()) + require.NoError(t, err) testCases = append(testCases, testCase{ description: "2 indirect call claim message (diff globalIndex) (1 ok, 1 reverted)", bridgeAddr: bridgeAddr, @@ -577,6 +588,7 @@ func TestClaimCalldata(t *testing.T) { require.NoError(t, err) time.Sleep(1 * time.Second) r, err = client.TransactionReceipt(ctx, tx.Hash()) + require.NoError(t, err) testCases = append(testCases, testCase{ description: "2 indirect call claim message (same globalIndex) (reverted,ok)", bridgeAddr: bridgeAddr, @@ -628,6 +640,7 @@ func TestClaimCalldata(t *testing.T) { require.NoError(t, err) time.Sleep(1 * time.Second) r, err = client.TransactionReceipt(ctx, tx.Hash()) + require.NoError(t, err) testCases = append(testCases, testCase{ description: "2 indirect call claim message (diff globalIndex) (reverted,ok)", bridgeAddr: bridgeAddr, @@ -681,6 +694,7 @@ func TestClaimCalldata(t *testing.T) { require.NoError(t, err) time.Sleep(1 * time.Second) r, err = client.TransactionReceipt(ctx, tx.Hash()) + require.NoError(t, err) testCases = append(testCases, testCase{ description: "2 indirect call claim asset 1 (same globalIndex)", bridgeAddr: bridgeAddr, @@ -738,6 +752,7 @@ func TestClaimCalldata(t *testing.T) { require.NoError(t, err) time.Sleep(1 * time.Second) r, err = client.TransactionReceipt(ctx, tx.Hash()) + require.NoError(t, err) testCases = append(testCases, testCase{ description: "2 indirect call claim asset 1 (diff globalIndex)", bridgeAddr: bridgeAddr, @@ -797,6 +812,7 @@ func TestClaimCalldata(t *testing.T) { require.NoError(t, err) time.Sleep(1 * time.Second) r, err = client.TransactionReceipt(ctx, tx.Hash()) + require.NoError(t, err) testCases = append(testCases, testCase{ description: "2 indirect call claim asset (same globalIndex) (1 ok, 1 reverted)", bridgeAddr: bridgeAddr, @@ -848,6 +864,7 @@ func TestClaimCalldata(t *testing.T) { require.NoError(t, err) time.Sleep(1 * time.Second) r, err = client.TransactionReceipt(ctx, tx.Hash()) + require.NoError(t, err) testCases = append(testCases, testCase{ description: "2 indirect call claim asset (diff globalIndex) (1 ok, 1 reverted)", bridgeAddr: bridgeAddr, @@ -901,6 +918,7 @@ func TestClaimCalldata(t *testing.T) { require.NoError(t, err) time.Sleep(1 * time.Second) r, err = client.TransactionReceipt(ctx, tx.Hash()) + require.NoError(t, err) testCases = append(testCases, testCase{ description: "2 indirect call claim asset (same globalIndex) (reverted,ok)", bridgeAddr: bridgeAddr, @@ -952,6 +970,7 @@ func TestClaimCalldata(t *testing.T) { require.NoError(t, err) time.Sleep(1 * time.Second) r, err = client.TransactionReceipt(ctx, tx.Hash()) + require.NoError(t, err) testCases = append(testCases, testCase{ description: "2 indirect call claim asset (diff globalIndex) (reverted,ok)", bridgeAddr: bridgeAddr, @@ -985,6 +1004,7 @@ func TestClaimCalldata(t *testing.T) { require.NoError(t, err) time.Sleep(1 * time.Second) r, err = client.TransactionReceipt(ctx, tx.Hash()) + require.NoError(t, err) testCases = append(testCases, testCase{ description: "indirect + indirect call to claim message bytes", bridgeAddr: bridgeAddr, @@ -1038,6 +1058,7 @@ func TestClaimCalldata(t *testing.T) { require.NoError(t, err) time.Sleep(1 * time.Second) r, err = client.TransactionReceipt(ctx, tx.Hash()) + require.NoError(t, err) testCases = append(testCases, testCase{ description: "2 indirect + indirect call claim message 1 (same globalIndex)", bridgeAddr: bridgeAddr, @@ -1115,6 +1136,7 @@ func TestClaimCalldata(t *testing.T) { require.NoError(t, err) time.Sleep(1 * time.Second) r, err = client.TransactionReceipt(ctx, tx.Hash()) + require.NoError(t, err) testCases = append(testCases, testCase{ description: "3 ok (indirectx2, indirect, indirectx2) call claim message 1 (same globalIndex)", bridgeAddr: bridgeAddr, @@ -1196,6 +1218,7 @@ func TestClaimCalldata(t *testing.T) { require.NoError(t, err) time.Sleep(1 * time.Second) r, err = client.TransactionReceipt(ctx, tx.Hash()) + require.NoError(t, err) testCases = append(testCases, testCase{ description: "3 ok (indirectx2, indirect, indirectx2) call claim message 1 (diff globalIndex)", bridgeAddr: bridgeAddr, @@ -1279,6 +1302,7 @@ func TestClaimCalldata(t *testing.T) { require.NoError(t, err) time.Sleep(1 * time.Second) r, err = client.TransactionReceipt(ctx, tx.Hash()) + require.NoError(t, err) testCases = append(testCases, testCase{ description: "1 ko 2 ok (indirectx2, indirect, indirectx2) call claim message 1 (diff globalIndex)", bridgeAddr: bridgeAddr, @@ -1356,6 +1380,7 @@ func TestClaimCalldata(t *testing.T) { require.NoError(t, err) time.Sleep(1 * time.Second) r, err = client.TransactionReceipt(ctx, tx.Hash()) + require.NoError(t, err) testCases = append(testCases, testCase{ description: "1 ko 2 ok (indirectx2, indirect, indirectx2) call claim message 1 (diff globalIndex)", bridgeAddr: bridgeAddr, @@ -1433,6 +1458,7 @@ func TestClaimCalldata(t *testing.T) { require.NoError(t, err) time.Sleep(1 * time.Second) r, err = client.TransactionReceipt(ctx, tx.Hash()) + require.NoError(t, err) testCases = append(testCases, testCase{ description: "1 ok 1 ok 1 ko (indirectx2, indirect, indirectx2) call claim message 1 (diff globalIndex)", bridgeAddr: bridgeAddr, @@ -1510,6 +1536,7 @@ func TestClaimCalldata(t *testing.T) { require.NoError(t, err) time.Sleep(1 * time.Second) r, err = client.TransactionReceipt(ctx, tx.Hash()) + require.NoError(t, err) testCases = append(testCases, testCase{ description: "1 ko 2 ok (indirectx2, indirect, indirectx2) call claim message 1 (same globalIndex)", bridgeAddr: bridgeAddr, @@ -1587,6 +1614,7 @@ func TestClaimCalldata(t *testing.T) { require.NoError(t, err) time.Sleep(1 * time.Second) r, err = client.TransactionReceipt(ctx, tx.Hash()) + require.NoError(t, err) testCases = append(testCases, testCase{ description: "1 ko 2 ok (indirectx2, indirect, indirectx2) call claim message 1 (same globalIndex)", bridgeAddr: bridgeAddr, @@ -1664,6 +1692,7 @@ func TestClaimCalldata(t *testing.T) { require.NoError(t, err) time.Sleep(1 * time.Second) r, err = client.TransactionReceipt(ctx, tx.Hash()) + require.NoError(t, err) testCases = append(testCases, testCase{ description: "1 ok 1 ok 1 ko (indirectx2, indirect, indirectx2) call claim message 1 (same globalIndex)", bridgeAddr: bridgeAddr, @@ -1741,6 +1770,7 @@ func TestClaimCalldata(t *testing.T) { require.NoError(t, err) time.Sleep(1 * time.Second) r, err = client.TransactionReceipt(ctx, tx.Hash()) + require.NoError(t, err) testCases = append(testCases, testCase{ description: "2 ko 1 ok (indirectx2, indirect, indirectx2) call claim message 1 (same globalIndex)", bridgeAddr: bridgeAddr, @@ -1812,6 +1842,7 @@ func TestClaimCalldata(t *testing.T) { require.NoError(t, err) time.Sleep(1 * time.Second) r, err = client.TransactionReceipt(ctx, tx.Hash()) + require.NoError(t, err) testCases = append(testCases, testCase{ description: "1 ok 2 ko (indirectx2, indirect, indirectx2) call claim message 1 (same globalIndex)", bridgeAddr: bridgeAddr, @@ -1883,6 +1914,7 @@ func TestClaimCalldata(t *testing.T) { require.NoError(t, err) time.Sleep(1 * time.Second) r, err = client.TransactionReceipt(ctx, tx.Hash()) + require.NoError(t, err) testCases = append(testCases, testCase{ description: "1 ko 1 ok 1 ko (indirectx2, indirect, indirectx2) call claim message 1 (same globalIndex)", bridgeAddr: bridgeAddr, diff --git a/bridgesync/e2e_test.go b/bridgesync/e2e_test.go index a8868ce1..6f1e10c4 100644 --- a/bridgesync/e2e_test.go +++ b/bridgesync/e2e_test.go @@ -20,7 +20,7 @@ import ( func TestBridgeEventE2E(t *testing.T) { ctx := context.Background() dbPathSyncer := path.Join(t.TempDir(), "file::memory:?cache=shared") - dbPathReorg := t.TempDir() + dbPathReorg := path.Join(t.TempDir(), "file::memory:?cache=shared") client, setup := helpers.SimulatedBackend(t, nil, 0) rd, err := reorgdetector.New(client.Client(), reorgdetector.Config{DBPath: dbPathReorg}) diff --git a/config/default.go b/config/default.go index 096d98de..bbf4d2e0 100644 --- a/config/default.go +++ b/config/default.go @@ -206,10 +206,10 @@ SyncModeOnlyEnabled = false NumRequests = 1000 Interval = "1s" [ReorgDetectorL1] -DBPath = "{{PathRWData}}/reorgdetectorl1" +DBPath = "{{PathRWData}}/reorgdetectorl1.sqlite" [ReorgDetectorL2] -DBPath = "{{PathRWData}}/reorgdetectorl2" +DBPath = "{{PathRWData}}/reorgdetectorl2.sqlite" [L1InfoTreeSync] DBPath = "{{PathRWData}}/L1InfoTreeSync.sqlite" @@ -316,7 +316,6 @@ WaitForNewBlocksPeriod = "3s" OriginNetwork=1 [LastGERSync] -# MDBX database path DBPath = "{{PathRWData}}/lastgersync.sqlite" BlockFinality = "LatestBlock" InitialBlockNum = 0 diff --git a/go.mod b/go.mod index c51772c1..0061c72f 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,6 @@ require ( github.com/knadh/koanf/parsers/toml v0.1.0 github.com/knadh/koanf/providers/rawbytes v0.1.0 github.com/knadh/koanf/v2 v2.1.1 - github.com/ledgerwatch/erigon-lib v1.0.0 github.com/mattn/go-sqlite3 v1.14.23 github.com/mitchellh/mapstructure v1.5.0 github.com/pelletier/go-toml/v2 v2.2.2 @@ -46,13 +45,11 @@ require ( github.com/Microsoft/go-winio v0.6.2 // indirect github.com/StackExchange/wmi v1.2.1 // indirect github.com/VictoriaMetrics/fastcache v1.12.2 // indirect - github.com/VictoriaMetrics/metrics v1.23.1 // indirect github.com/bahlo/generic-list-go v0.2.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bits-and-blooms/bitset v1.14.2 // indirect github.com/btcsuite/btcd/btcec/v2 v2.3.4 // indirect github.com/buger/jsonparser v1.1.1 // indirect - github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cockroachdb/errors v1.11.3 // indirect github.com/cockroachdb/fifo v0.0.0-20240816210425-c5d0cb0b6fc0 // indirect @@ -70,7 +67,6 @@ require ( github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect github.com/didip/tollbooth/v6 v6.1.2 // indirect github.com/dustin/go-humanize v1.0.1 // indirect - github.com/erigontech/mdbx-go v0.27.14 // indirect github.com/ethereum/c-kzg-4844 v1.0.3 // indirect github.com/ethereum/go-verkle v0.1.1-0.20240306133620-7d920df305f0 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect @@ -81,7 +77,6 @@ require ( github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.3.0 // indirect github.com/go-pkgz/expirable-cache v0.0.3 // indirect - github.com/go-stack/stack v1.8.1 // indirect github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect github.com/gofrs/flock v0.12.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect @@ -109,7 +104,6 @@ require ( github.com/knadh/koanf/maps v0.1.1 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect - github.com/ledgerwatch/log/v3 v3.9.0 // indirect github.com/logrusorgru/aurora v2.0.3+incompatible // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect @@ -125,7 +119,6 @@ require ( github.com/ncruces/go-strftime v0.1.9 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/onsi/gomega v1.27.10 // indirect - github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect @@ -154,8 +147,6 @@ require ( github.com/tklauser/numcpus v0.6.1 // indirect github.com/tyler-smith/go-bip39 v1.1.0 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect - github.com/valyala/fastrand v1.1.0 // indirect - github.com/valyala/histogram v1.2.0 // indirect github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect go.opentelemetry.io/otel/trace v1.24.0 // indirect diff --git a/go.sum b/go.sum index 28771a51..ceb905ac 100644 --- a/go.sum +++ b/go.sum @@ -20,8 +20,6 @@ github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDO github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI= github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI= -github.com/VictoriaMetrics/metrics v1.23.1 h1:/j8DzeJBxSpL2qSIdqnRFLvQQhbJyJbbEi22yMm7oL0= -github.com/VictoriaMetrics/metrics v1.23.1/go.mod h1:rAr/llLpEnAdTehiNlUxKgnjcOuROSzpw0GvjpEbvFc= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= github.com/allegro/bigcache v1.2.1 h1:hg1sY1raCwic3Vnsvje6TT7/pnZba83LeFck5NrFKSc= github.com/allegro/bigcache v1.2.1/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= @@ -37,8 +35,6 @@ github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOF github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= -github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b h1:6+ZFm0flnudZzdSE0JxlhR2hKnGPcNB35BjQf4RYQDY= -github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M= github.com/cespare/cp v1.1.1 h1:nCb6ZLdB7NRaqsm91JtQTAme2SKJzXVsdPIPkyJr1MU= github.com/cespare/cp v1.1.1/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -91,8 +87,6 @@ github.com/didip/tollbooth/v6 v6.1.2 h1:Kdqxmqw9YTv0uKajBUiWQg+GURL/k4vy9gmLCL01 github.com/didip/tollbooth/v6 v6.1.2/go.mod h1:xjcse6CTHCLuOkzsWrEgdy9WPJFv+p/x6v+MyfP+O9s= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/erigontech/mdbx-go v0.27.14 h1:IVVeQVCAjZRpAR8bThlP2ISxrOwdV35NZdGwAgotaRw= -github.com/erigontech/mdbx-go v0.27.14/go.mod h1:FAMxbOgqOnRDx51j8HjuJZIgznbDwjX7LItd+/UWyA4= github.com/ethereum/c-kzg-4844 v1.0.3 h1:IEnbOHwjixW2cTvKRUlAAUOeleV7nNM/umJR+qy4WDs= github.com/ethereum/c-kzg-4844 v1.0.3/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= github.com/ethereum/go-ethereum v1.14.8 h1:NgOWvXS+lauK+zFukEvi85UmmsS/OkV0N23UZ1VTIig= @@ -130,8 +124,6 @@ github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= -github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsMBaUOKXq6HSv655U1c= github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= @@ -174,8 +166,6 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= -github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE= github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= @@ -280,10 +270,6 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v1.0.0 h1:2o7EfgB/6CyjXAaQ8+Dh7AmY5rWvwSKg0kGp/U9kwqE= -github.com/ledgerwatch/erigon-lib v1.0.0/go.mod h1:l1i6+H9MgizD+ObQ5cXsfA9S3egYTOCnnYGjbrJMqR4= -github.com/ledgerwatch/log/v3 v3.9.0 h1:iDwrXe0PVwBC68Dd94YSsHbMgQ3ufsgjzXtFNFVZFRk= -github.com/ledgerwatch/log/v3 v3.9.0/go.mod h1:EiAY6upmI/6LkNhOVxb4eVsmsP11HZCnZ3PlJMjYiqE= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= @@ -305,7 +291,6 @@ github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= @@ -351,8 +336,6 @@ github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAl github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= -github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= -github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= @@ -454,12 +437,8 @@ github.com/urfave/cli/v2 v2.27.4 h1:o1owoI+02Eb+K107p27wEX9Bb8eqIoZCfLXloLUSWJ8= github.com/urfave/cli/v2 v2.27.4/go.mod h1:m4QzxcD2qpra4z7WhzEGn74WZLViBnMpb1ToCAKdGRQ= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fastrand v1.1.0 h1:f+5HkLW4rsgzdNoleUOB69hyT9IlD2ZQh9GyDMfb5G8= -github.com/valyala/fastrand v1.1.0/go.mod h1:HWqCzkrkg6QXT8V2EXWvXCoow7vLwOFN002oeRzjapQ= github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo= github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= -github.com/valyala/histogram v1.2.0 h1:wyYGAZZt3CpwUiIb9AU/Zbllg1llXyrtApRS815OLoQ= -github.com/valyala/histogram v1.2.0/go.mod h1:Hb4kBwb4UxsaNbbbh+RRz8ZR6pdodR57tzWUS3BUzXY= github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc= github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw= github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGCjxCBTO/36wtF6j2nSip77qHd4x4= diff --git a/l1infotreesync/e2e_test.go b/l1infotreesync/e2e_test.go index 70986cbf..94ec008c 100644 --- a/l1infotreesync/e2e_test.go +++ b/l1infotreesync/e2e_test.go @@ -154,7 +154,7 @@ func TestE2E(t *testing.T) { func TestWithReorgs(t *testing.T) { ctx := context.Background() dbPathSyncer := path.Join(t.TempDir(), "file::memory:?cache=shared") - dbPathReorg := t.TempDir() + dbPathReorg := path.Join(t.TempDir(), "file::memory:?cache=shared") client, auth, gerAddr, verifyAddr, gerSc, verifySC := newSimulatedClient(t) @@ -272,7 +272,7 @@ func TestStressAndReorgs(t *testing.T) { ctx := context.Background() dbPathSyncer := path.Join(t.TempDir(), "file:TestStressAndReorgs:memory:?cache=shared") - dbPathReorg := t.TempDir() + dbPathReorg := path.Join(t.TempDir(), "file::memory:?cache=shared") client, auth, gerAddr, verifyAddr, gerSc, verifySC := newSimulatedClient(t) diff --git a/reorgdetector/migrations/migrations.go b/reorgdetector/migrations/migrations.go new file mode 100644 index 00000000..ba619cde --- /dev/null +++ b/reorgdetector/migrations/migrations.go @@ -0,0 +1,21 @@ +package migrations + +import ( + _ "embed" + + "github.com/0xPolygon/cdk/db" + "github.com/0xPolygon/cdk/db/types" +) + +//go:embed reorgdetector0001.sql +var mig001 string + +func RunMigrations(dbPath string) error { + migrations := []types.Migration{ + { + ID: "reorgdetector0001", + SQL: mig001, + }, + } + return db.RunMigrations(dbPath, migrations) +} diff --git a/reorgdetector/migrations/reorgdetector0001.sql b/reorgdetector/migrations/reorgdetector0001.sql new file mode 100644 index 00000000..8b5092ba --- /dev/null +++ b/reorgdetector/migrations/reorgdetector0001.sql @@ -0,0 +1,11 @@ +-- +migrate Down +DROP TABLE IF EXISTS block; +DROP TABLE IF EXISTS claim; +DROP TABLE IF EXISTS bridge; + +-- +migrate Up +CREATE TABLE tracked_block ( + subscriber_id VARCHAR NOT NULL, + num BIGINT NOT NULL, + hash VARCHAR NOT NULL +); \ No newline at end of file diff --git a/reorgdetector/reorgdetector.go b/reorgdetector/reorgdetector.go index 496a844c..91d21354 100644 --- a/reorgdetector/reorgdetector.go +++ b/reorgdetector/reorgdetector.go @@ -2,18 +2,19 @@ package reorgdetector import ( "context" + "database/sql" "fmt" "math/big" "sync" "time" + "github.com/0xPolygon/cdk/db" "github.com/0xPolygon/cdk/log" + "github.com/0xPolygon/cdk/reorgdetector/migrations" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/rpc" - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/mdbx" "golang.org/x/sync/errgroup" ) @@ -25,7 +26,7 @@ type EthClient interface { type ReorgDetector struct { client EthClient - db kv.RwDB + db *sql.DB checkReorgInterval time.Duration trackedBlocksLock sync.RWMutex @@ -36,12 +37,13 @@ type ReorgDetector struct { } func New(client EthClient, cfg Config) (*ReorgDetector, error) { - db, err := mdbx.NewMDBX(nil). - Path(cfg.DBPath). - WithTableCfg(tableCfgFunc). - Open() + err := migrations.RunMigrations(cfg.DBPath) if err != nil { - return nil, fmt.Errorf("failed to open db: %w", err) + return nil, err + } + db, err := db.NewSQLiteDB(cfg.DBPath) + if err != nil { + return nil, err } return &ReorgDetector{ @@ -56,7 +58,7 @@ func New(client EthClient, cfg Config) (*ReorgDetector, error) { // Start starts the reorg detector func (rd *ReorgDetector) Start(ctx context.Context) (err error) { // Load tracked blocks from the DB - if err = rd.loadTrackedHeaders(ctx); err != nil { + if err = rd.loadTrackedHeaders(); err != nil { return fmt.Errorf("failed to load tracked headers: %w", err) } @@ -96,7 +98,7 @@ func (rd *ReorgDetector) AddBlockToTrack(ctx context.Context, id string, num uin // Store the given header to the tracked list hdr := newHeader(num, hash) - if err := rd.saveTrackedBlock(ctx, id, hdr); err != nil { + if err := rd.saveTrackedBlock(id, hdr); err != nil { return fmt.Errorf("failed to save tracked block: %w", err) } @@ -157,6 +159,10 @@ func (rd *ReorgDetector) detectReorgInTrackedList(ctx context.Context) error { if hdr.Num <= lastFinalisedBlock.Number.Uint64() { hdrs.removeRange(hdr.Num, hdr.Num) } + if err := rd.removeTrackedBlockRange(id, hdr.Num, hdr.Num); err != nil { + return fmt.Errorf("error removing blocks from DB for subscriber %s between blocks %d and %d: %w", + id, hdr.Num, hdr.Num, err) + } continue } @@ -164,17 +170,16 @@ func (rd *ReorgDetector) detectReorgInTrackedList(ctx context.Context) error { // Notify the subscriber about the reorg rd.notifySubscriber(id, hdr) - // Remove the reorged block and all the following blocks + // Remove the reorged block and all the following blocks from DB + if err := rd.removeTrackedBlockRange(id, hdr.Num, headers[len(headers)-1].Num); err != nil { + return fmt.Errorf("error removing blocks from DB for subscriber %s between blocks %d and %d: %w", + id, hdr.Num, headers[len(headers)-1].Num, err) + } + // Remove the reorged block and all the following blocks from memory hdrs.removeRange(hdr.Num, headers[len(headers)-1].Num) break } - - // Update the tracked blocks in the DB - if err := rd.updateTrackedBlocksDB(ctx, id, hdrs); err != nil { - return fmt.Errorf("failed to update tracked blocks for subscriber %s: %w", id, err) - } - return nil }) } @@ -183,12 +188,12 @@ func (rd *ReorgDetector) detectReorgInTrackedList(ctx context.Context) error { } // loadTrackedHeaders loads tracked headers from the DB and stores them in memory -func (rd *ReorgDetector) loadTrackedHeaders(ctx context.Context) (err error) { +func (rd *ReorgDetector) loadTrackedHeaders() (err error) { rd.trackedBlocksLock.Lock() defer rd.trackedBlocksLock.Unlock() // Load tracked blocks for all subscribers from the DB - if rd.trackedBlocks, err = rd.getTrackedBlocks(ctx); err != nil { + if rd.trackedBlocks, err = rd.getTrackedBlocks(); err != nil { return fmt.Errorf("failed to get tracked blocks: %w", err) } diff --git a/reorgdetector/reorgdetector_db.go b/reorgdetector/reorgdetector_db.go index 79bd6cd4..3a066b7f 100644 --- a/reorgdetector/reorgdetector_db.go +++ b/reorgdetector/reorgdetector_db.go @@ -1,69 +1,57 @@ package reorgdetector import ( - "context" - "encoding/json" + "errors" + "fmt" - "github.com/ledgerwatch/erigon-lib/kv" + "github.com/0xPolygon/cdk/db" + "github.com/russross/meddler" ) -const ( - subscriberBlocks = "reorgdetector-subscriberBlocks" -) - -func tableCfgFunc(_ kv.TableCfg) kv.TableCfg { - return kv.TableCfg{ - subscriberBlocks: {}, - } -} - // getTrackedBlocks returns a list of tracked blocks for each subscriber from db -func (rd *ReorgDetector) getTrackedBlocks(ctx context.Context) (map[string]*headersList, error) { - tx, err := rd.db.BeginRo(ctx) +func (rd *ReorgDetector) getTrackedBlocks() (map[string]*headersList, error) { + trackedBlocks := make(map[string]*headersList, 0) + var headersWithID []*headerWithSubscriberID + err := meddler.QueryAll(rd.db, &headersWithID, "SELECT * FROM tracked_block ORDER BY subscriber_id;") if err != nil { - return nil, err + if errors.Is(err, db.ErrNotFound) { + return trackedBlocks, nil + } + return nil, fmt.Errorf("error queryng tracked_block: %w", err) } - - defer tx.Rollback() - - cursor, err := tx.Cursor(subscriberBlocks) - if err != nil { - return nil, err + if len(headersWithID) == 0 { + return trackedBlocks, nil } - - defer cursor.Close() - - trackedBlocks := make(map[string]*headersList, 0) - - for k, v, err := cursor.First(); k != nil; k, v, err = cursor.Next() { - if err != nil { - return nil, err + currentID := headersWithID[0].SubscriberID + currentHeaders := []header{} + for i := 0; i < len(headersWithID); i++ { + if i == len(headersWithID)-1 { + currentHeaders = append(currentHeaders, header{ + Num: headersWithID[i].Num, + Hash: headersWithID[i].Hash, + }) + trackedBlocks[currentID] = newHeadersList(currentHeaders...) + } else if headersWithID[i].SubscriberID != currentID { + trackedBlocks[currentID] = newHeadersList(currentHeaders...) + currentHeaders = []header{{ + Num: headersWithID[i].Num, + Hash: headersWithID[i].Hash, + }} + currentID = headersWithID[i].SubscriberID + } else { + currentHeaders = append(currentHeaders, header{ + Num: headersWithID[i].Num, + Hash: headersWithID[i].Hash, + }) } - - var headers []header - if err := json.Unmarshal(v, &headers); err != nil { - return nil, err - } - - trackedBlocks[string(k)] = newHeadersList(headers...) } return trackedBlocks, nil } // saveTrackedBlock saves the tracked block for a subscriber in db and in memory -func (rd *ReorgDetector) saveTrackedBlock(ctx context.Context, id string, b header) error { +func (rd *ReorgDetector) saveTrackedBlock(id string, b header) error { rd.trackedBlocksLock.Lock() - - // this has to go after the lock, because of a possible deadlock - // between AddBlocksToTrack and detectReorgInTrackedList - tx, err := rd.db.BeginRw(ctx) - if err != nil { - return err - } - - defer tx.Rollback() - hdrs, ok := rd.trackedBlocks[id] if !ok || hdrs.isEmpty() { hdrs = newHeadersList(b) @@ -72,32 +60,18 @@ func (rd *ReorgDetector) saveTrackedBlock(ctx context.Context, id string, b head hdrs.add(b) } rd.trackedBlocksLock.Unlock() - - raw, err := json.Marshal(hdrs.getSorted()) - if err != nil { - return err - } - - return tx.Put(subscriberBlocks, []byte(id), raw) + return meddler.Insert(rd.db, "tracked_block", &headerWithSubscriberID{ + SubscriberID: id, + Num: b.Num, + Hash: b.Hash, + }) } // updateTrackedBlocksDB updates the tracked blocks for a subscriber in db -func (rd *ReorgDetector) updateTrackedBlocksDB(ctx context.Context, id string, blocks *headersList) error { - tx, err := rd.db.BeginRw(ctx) - if err != nil { - return err - } - - defer tx.Rollback() - - raw, err := json.Marshal(blocks.getSorted()) - if err != nil { - return err - } - - if err = tx.Put(subscriberBlocks, []byte(id), raw); err != nil { - return err - } - - return nil +func (rd *ReorgDetector) removeTrackedBlockRange(id string, fromBlock, toBlock uint64) error { + _, err := rd.db.Exec( + "DELETE FROM tracked_block WHERE num >= $1 AND NUM <= 2 AND subscriber_id = $3;", + fromBlock, toBlock, id, + ) + return err } diff --git a/reorgdetector/reorgdetector_test.go b/reorgdetector/reorgdetector_test.go index c99bb484..a496d33f 100644 --- a/reorgdetector/reorgdetector_test.go +++ b/reorgdetector/reorgdetector_test.go @@ -2,11 +2,14 @@ package reorgdetector import ( "context" + "path" + "strings" "testing" "time" cdktypes "github.com/0xPolygon/cdk/config/types" "github.com/0xPolygon/cdk/test/helpers" + common "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/require" ) @@ -19,7 +22,7 @@ func Test_ReorgDetector(t *testing.T) { clientL1, _ := helpers.SimulatedBackend(t, nil, 0) // Create test DB dir - testDir := t.TempDir() + testDir := path.Join(t.TempDir(), "file::memory:?cache=shared") reorgDetector, err := New(clientL1.Client(), Config{DBPath: testDir, CheckReorgsInterval: cdktypes.NewDuration(time.Millisecond * 100)}) require.NoError(t, err) @@ -69,3 +72,66 @@ func Test_ReorgDetector(t *testing.T) { require.Equal(t, 1, headersList.len()) // Only block 3 left require.Equal(t, remainingHeader.Hash(), headersList.get(4).Hash) } + +func TestGetTrackedBlocks(t *testing.T) { + clientL1, _ := helpers.SimulatedBackend(t, nil, 0) + testDir := path.Join(t.TempDir(), "file::memory:?cache=shared") + reorgDetector, err := New(clientL1.Client(), Config{DBPath: testDir, CheckReorgsInterval: cdktypes.NewDuration(time.Millisecond * 100)}) + require.NoError(t, err) + list, err := reorgDetector.getTrackedBlocks() + require.NoError(t, err) + require.Equal(t, len(list), 0) + + expectedList := make(map[string]*headersList) + headersMapFoo := make(map[uint64]header) + headerFoo2 := header{ + Num: 2, + Hash: common.HexToHash("foofoo"), + } + err = reorgDetector.saveTrackedBlock("foo", headerFoo2) + require.NoError(t, err) + headersMapFoo[2] = headerFoo2 + headerFoo3 := header{ + Num: 3, + Hash: common.HexToHash("foofoofoo"), + } + err = reorgDetector.saveTrackedBlock("foo", headerFoo3) + require.NoError(t, err) + headersMapFoo[3] = headerFoo3 + expectedList["foo"] = &headersList{ + headers: headersMapFoo, + } + list, err = reorgDetector.getTrackedBlocks() + require.NoError(t, err) + require.Equal(t, expectedList, list) + + headersMapBar := make(map[uint64]header) + headerBar2 := header{ + Num: 2, + Hash: common.HexToHash("BarBar"), + } + err = reorgDetector.saveTrackedBlock("Bar", headerBar2) + require.NoError(t, err) + headersMapBar[2] = headerBar2 + expectedList["Bar"] = &headersList{ + headers: headersMapBar, + } + list, err = reorgDetector.getTrackedBlocks() + require.NoError(t, err) + require.Equal(t, expectedList, list) + + require.NoError(t, reorgDetector.loadTrackedHeaders()) + _, ok := reorgDetector.subscriptions["foo"] + require.True(t, ok) + _, ok = reorgDetector.subscriptions["Bar"] + require.True(t, ok) +} + +func TestNotSubscribed(t *testing.T) { + clientL1, _ := helpers.SimulatedBackend(t, nil, 0) + testDir := path.Join(t.TempDir(), "file::memory:?cache=shared") + reorgDetector, err := New(clientL1.Client(), Config{DBPath: testDir, CheckReorgsInterval: cdktypes.NewDuration(time.Millisecond * 100)}) + require.NoError(t, err) + err = reorgDetector.AddBlockToTrack(context.Background(), "foo", 1, common.Hash{}) + require.True(t, strings.Contains(err.Error(), "is not subscribed")) +} diff --git a/reorgdetector/types.go b/reorgdetector/types.go index bee3eb44..20d4562c 100644 --- a/reorgdetector/types.go +++ b/reorgdetector/types.go @@ -8,8 +8,14 @@ import ( ) type header struct { - Num uint64 - Hash common.Hash + Num uint64 `meddler:"num"` + Hash common.Hash `meddler:"hash,hash"` +} + +type headerWithSubscriberID struct { + SubscriberID string `meddler:"subscriber_id"` + Num uint64 `meddler:"num"` + Hash common.Hash `meddler:"hash,hash"` } // newHeader returns a new instance of header diff --git a/rpc/bridge.go b/rpc/bridge.go index 7b52ed73..65d94971 100644 --- a/rpc/bridge.go +++ b/rpc/bridge.go @@ -292,7 +292,6 @@ func (b *BridgeEndpoints) getFirstL1InfoTreeIndexForL1Bridge(ctx context.Context if err != nil { return 0, err } - //nolint:gocritic // switch statement doesn't make sense here, I couldn't break if root.Index < depositCount { lowerLimit = targetBlock + 1 } else if root.Index == depositCount { @@ -346,7 +345,6 @@ func (b *BridgeEndpoints) getFirstL1InfoTreeIndexForL2Bridge(ctx context.Context if err != nil { return 0, err } - //nolint:gocritic // switch statement doesn't make sense here, I couldn't break if root.Index < depositCount { lowerLimit = targetBlock + 1 } else if root.Index == depositCount { diff --git a/test/aggoraclehelpers/aggoracle_e2e.go b/test/aggoraclehelpers/aggoracle_e2e.go index be362ccc..7830b941 100644 --- a/test/aggoraclehelpers/aggoracle_e2e.go +++ b/test/aggoraclehelpers/aggoracle_e2e.go @@ -105,7 +105,7 @@ func CommonSetup(t *testing.T) ( l1Client, authL1, gerL1Addr, gerL1Contract, bridgeL1Addr, bridgeL1Contract := newSimulatedL1(t) // Reorg detector - dbPathReorgDetector := t.TempDir() + dbPathReorgDetector := path.Join(t.TempDir(), "file::memory:?cache=shared") reorg, err := reorgdetector.New(l1Client.Client(), reorgdetector.Config{DBPath: dbPathReorgDetector}) require.NoError(t, err) diff --git a/test/config/kurtosis-cdk-node-config.toml.template b/test/config/kurtosis-cdk-node-config.toml.template index 1d70226d..5c885d5f 100644 --- a/test/config/kurtosis-cdk-node-config.toml.template +++ b/test/config/kurtosis-cdk-node-config.toml.template @@ -1,4 +1,4 @@ -PathRWData = "{{.path_rw_data}}/" +PathRWData = "/tmp/" L1URL="{{.l1_rpc_url}}" L2URL="http://{{.l2_rpc_name}}{{.deployment_suffix}}:{{.zkevm_rpc_http_port}}" AggLayerURL="{{.agglayer_url}}" From 61fe7f6b2ec042f5f7caafde7d6ba5a472aa18cb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toni=20Ram=C3=ADrez?= <58293609+ToniRamirezM@users.noreply.github.com> Date: Fri, 8 Nov 2024 13:59:17 +0100 Subject: [PATCH 20/33] feat: align Develop with changes in Release/0.4.0 (#174) * feat: calculate acc input hash locally (#154) --- aggregator/aggregator.go | 135 ++++++++++++++++++++++++++++------ aggregator/aggregator_test.go | 129 +++++++++++++++++++------------- common/common.go | 4 +- l1infotree/tree.go | 12 ++- l1infotree/tree_test.go | 54 -------------- scripts/local_config | 2 +- 6 files changed, 198 insertions(+), 138 deletions(-) diff --git a/aggregator/aggregator.go b/aggregator/aggregator.go index 3541aaaf..72c316be 100644 --- a/aggregator/aggregator.go +++ b/aggregator/aggregator.go @@ -6,6 +6,7 @@ import ( "encoding/json" "errors" "fmt" + "math" "math/big" "net" "strings" @@ -58,11 +59,13 @@ type Aggregator struct { cfg Config logger *log.Logger - state StateInterface - etherman Etherman - ethTxManager EthTxManagerClient - l1Syncr synchronizer.Synchronizer - halted atomic.Bool + state StateInterface + etherman Etherman + ethTxManager EthTxManagerClient + l1Syncr synchronizer.Synchronizer + halted atomic.Bool + accInputHashes map[uint64]common.Hash + accInputHashesMutex *sync.Mutex profitabilityChecker aggregatorTxProfitabilityChecker timeSendFinalProof time.Time @@ -155,6 +158,8 @@ func New( etherman: etherman, ethTxManager: ethTxManager, l1Syncr: l1Syncr, + accInputHashes: make(map[uint64]common.Hash), + accInputHashesMutex: &sync.Mutex{}, profitabilityChecker: profitabilityChecker, stateDBMutex: &sync.Mutex{}, timeSendFinalProofMutex: &sync.RWMutex{}, @@ -170,7 +175,7 @@ func New( a.ctx, a.exit = context.WithCancel(a.ctx) } - // Set function to handle the batches from the data stream + // Set function to handle events on L1 if !cfg.SyncModeOnlyEnabled { a.l1Syncr.SetCallbackOnReorgDone(a.handleReorg) a.l1Syncr.SetCallbackOnRollbackBatches(a.handleRollbackBatches) @@ -179,6 +184,26 @@ func New( return a, nil } +func (a *Aggregator) getAccInputHash(batchNumber uint64) common.Hash { + a.accInputHashesMutex.Lock() + defer a.accInputHashesMutex.Unlock() + return a.accInputHashes[batchNumber] +} + +func (a *Aggregator) setAccInputHash(batchNumber uint64, accInputHash common.Hash) { + a.accInputHashesMutex.Lock() + defer a.accInputHashesMutex.Unlock() + a.accInputHashes[batchNumber] = accInputHash +} + +func (a *Aggregator) removeAccInputHashes(firstBatch, lastBatch uint64) { + a.accInputHashesMutex.Lock() + defer a.accInputHashesMutex.Unlock() + for i := firstBatch; i <= lastBatch; i++ { + delete(a.accInputHashes, i) + } +} + func (a *Aggregator) handleReorg(reorgData synchronizer.ReorgExecutionResult) { a.logger.Warnf("Reorg detected, reorgData: %+v", reorgData) @@ -219,6 +244,7 @@ func (a *Aggregator) handleRollbackBatches(rollbackData synchronizer.RollbackBat a.logger.Warnf("Rollback batches event, rollbackBatchesData: %+v", rollbackData) var err error + var accInputHash *common.Hash // Get new last verified batch number from L1 lastVerifiedBatchNumber, err := a.etherman.GetLatestVerifiedBatchNum() @@ -226,6 +252,8 @@ func (a *Aggregator) handleRollbackBatches(rollbackData synchronizer.RollbackBat a.logger.Errorf("Error getting latest verified batch number: %v", err) } + a.logger.Infof("Last Verified Batch Number:%v", lastVerifiedBatchNumber) + // Check lastVerifiedBatchNumber makes sense if err == nil && lastVerifiedBatchNumber > rollbackData.LastBatchNumber { err = fmt.Errorf( @@ -234,6 +262,17 @@ func (a *Aggregator) handleRollbackBatches(rollbackData synchronizer.RollbackBat ) } + if err == nil { + accInputHash, err = a.getVerifiedBatchAccInputHash(a.ctx, lastVerifiedBatchNumber) + if err == nil { + a.accInputHashesMutex.Lock() + a.accInputHashes = make(map[uint64]common.Hash) + a.accInputHashesMutex.Unlock() + a.logger.Infof("Starting AccInputHash:%v", accInputHash.String()) + a.setAccInputHash(lastVerifiedBatchNumber, *accInputHash) + } + } + // Delete wip proofs if err == nil { err = a.state.DeleteUngeneratedProofs(a.ctx, nil) @@ -272,7 +311,6 @@ func (a *Aggregator) Start() error { err := a.l1Syncr.Sync(true) if err != nil { a.logger.Fatalf("Failed to synchronize from L1: %v", err) - return err } @@ -297,19 +335,27 @@ func (a *Aggregator) Start() error { healthService := newHealthChecker() grpchealth.RegisterHealthServer(a.srv, healthService) + // Delete ungenerated recursive proofs + err = a.state.DeleteUngeneratedProofs(a.ctx, nil) + if err != nil { + return fmt.Errorf("failed to initialize proofs cache %w", err) + } + // Get last verified batch number to set the starting point for verifications lastVerifiedBatchNumber, err := a.etherman.GetLatestVerifiedBatchNum() if err != nil { return err } - // Delete ungenerated recursive proofs - err = a.state.DeleteUngeneratedProofs(a.ctx, nil) + a.logger.Infof("Last Verified Batch Number:%v", lastVerifiedBatchNumber) + + accInputHash, err := a.getVerifiedBatchAccInputHash(a.ctx, lastVerifiedBatchNumber) if err != nil { - return fmt.Errorf("failed to initialize proofs cache %w", err) + return err } - a.logger.Infof("Last Verified Batch Number:%v", lastVerifiedBatchNumber) + a.logger.Infof("Starting AccInputHash:%v", accInputHash.String()) + a.setAccInputHash(lastVerifiedBatchNumber, *accInputHash) a.resetVerifyProofTime() @@ -1006,6 +1052,15 @@ func (a *Aggregator) tryAggregateProofs(ctx context.Context, prover ProverInterf return true, nil } +func (a *Aggregator) getVerifiedBatchAccInputHash(ctx context.Context, batchNumber uint64) (*common.Hash, error) { + accInputHash, err := a.etherman.GetBatchAccInputHash(ctx, batchNumber) + if err != nil { + return nil, err + } + + return &accInputHash, nil +} + func (a *Aggregator) getAndLockBatchToProve( ctx context.Context, prover ProverInterface, ) (*state.Batch, []byte, *state.Proof, error) { @@ -1039,6 +1094,22 @@ func (a *Aggregator) getAndLockBatchToProve( return nil, nil, nil, err } + + if proofExists { + accInputHash := a.getAccInputHash(batchNumberToVerify - 1) + if accInputHash == (common.Hash{}) && batchNumberToVerify > 1 { + tmpLogger.Warnf("AccInputHash for batch %d is not in memory, "+ + "deleting proofs to regenerate acc input hash chain in memory", batchNumberToVerify) + + err := a.state.CleanupGeneratedProofs(ctx, math.MaxInt, nil) + if err != nil { + tmpLogger.Infof("Error cleaning up generated proofs for batch %d", batchNumberToVerify) + return nil, nil, nil, err + } + batchNumberToVerify-- + break + } + } } // Check if the batch has been sequenced @@ -1092,15 +1163,37 @@ func (a *Aggregator) getAndLockBatchToProve( virtualBatch.L1InfoRoot = &l1InfoRoot } + // Calculate acc input hash as the RPC is not returning the correct one at the moment + accInputHash := cdkcommon.CalculateAccInputHash( + a.logger, + a.getAccInputHash(batchNumberToVerify-1), + virtualBatch.BatchL2Data, + *virtualBatch.L1InfoRoot, + uint64(sequence.Timestamp.Unix()), + rpcBatch.LastCoinbase(), + rpcBatch.ForcedBlockHashL1(), + ) + // Store the acc input hash + a.setAccInputHash(batchNumberToVerify, accInputHash) + + // Log params to calculate acc input hash + a.logger.Debugf("Calculated acc input hash for batch %d: %v", batchNumberToVerify, accInputHash) + a.logger.Debugf("L1InfoRoot: %v", virtualBatch.L1InfoRoot) + // a.logger.Debugf("LastL2BLockTimestamp: %v", rpcBatch.LastL2BLockTimestamp()) + a.logger.Debugf("TimestampLimit: %v", uint64(sequence.Timestamp.Unix())) + a.logger.Debugf("LastCoinbase: %v", rpcBatch.LastCoinbase()) + a.logger.Debugf("ForcedBlockHashL1: %v", rpcBatch.ForcedBlockHashL1()) + // Create state batch stateBatch := &state.Batch{ BatchNumber: rpcBatch.BatchNumber(), Coinbase: rpcBatch.LastCoinbase(), // Use L1 batch data - BatchL2Data: virtualBatch.BatchL2Data, - StateRoot: rpcBatch.StateRoot(), - LocalExitRoot: rpcBatch.LocalExitRoot(), - AccInputHash: rpcBatch.AccInputHash(), + BatchL2Data: virtualBatch.BatchL2Data, + StateRoot: rpcBatch.StateRoot(), + LocalExitRoot: rpcBatch.LocalExitRoot(), + // Use calculated acc input + AccInputHash: accInputHash, L1InfoTreeIndex: rpcBatch.L1InfoTreeIndex(), L1InfoRoot: *virtualBatch.L1InfoRoot, Timestamp: time.Unix(int64(rpcBatch.LastL2BLockTimestamp()), 0), @@ -1412,16 +1505,10 @@ func (a *Aggregator) buildInputProver( } } - // Get Old Acc Input Hash - rpcOldBatch, err := a.rpcClient.GetBatch(batchToVerify.BatchNumber - 1) - if err != nil { - return nil, err - } - inputProver := &prover.StatelessInputProver{ PublicInputs: &prover.StatelessPublicInputs{ Witness: witness, - OldAccInputHash: rpcOldBatch.AccInputHash().Bytes(), + OldAccInputHash: a.getAccInputHash(batchToVerify.BatchNumber - 1).Bytes(), OldBatchNum: batchToVerify.BatchNumber - 1, ChainId: batchToVerify.ChainID, ForkId: batchToVerify.ForkID, @@ -1521,6 +1608,10 @@ func (a *Aggregator) handleMonitoredTxResult(result ethtxtypes.MonitoredTxResult } mTxResultLogger.Debugf("deleted generated proofs from %d to %d", firstBatch, lastBatch) + + // Remove the acc input hashes from the map + // leaving the last batch acc input hash as it will be used as old acc input hash + a.removeAccInputHashes(firstBatch, lastBatch-1) } func (a *Aggregator) cleanupLockedProofs() { diff --git a/aggregator/aggregator_test.go b/aggregator/aggregator_test.go index fd03315f..506ce16c 100644 --- a/aggregator/aggregator_test.go +++ b/aggregator/aggregator_test.go @@ -83,6 +83,7 @@ func Test_Start(t *testing.T) { mockL1Syncr.On("Sync", mock.Anything).Return(nil) mockEtherman.On("GetLatestVerifiedBatchNum").Return(uint64(90), nil).Once() + mockEtherman.On("GetBatchAccInputHash", mock.Anything, uint64(90)).Return(common.Hash{}, nil).Once() mockState.On("DeleteUngeneratedProofs", mock.Anything, nil).Return(nil).Once() mockState.On("CleanupLockedProofs", mock.Anything, "", nil).Return(int64(0), nil) @@ -100,6 +101,8 @@ func Test_Start(t *testing.T) { stateDBMutex: &sync.Mutex{}, timeSendFinalProofMutex: &sync.RWMutex{}, timeCleanupLockedProofs: types.Duration{Duration: 5 * time.Second}, + accInputHashes: make(map[uint64]common.Hash), + accInputHashesMutex: &sync.Mutex{}, } go func() { err := a.Start() @@ -149,15 +152,18 @@ func Test_handleRollbackBatches(t *testing.T) { } mockEtherman.On("GetLatestVerifiedBatchNum").Return(uint64(90), nil).Once() + mockEtherman.On("GetBatchAccInputHash", mock.Anything, uint64(90)).Return(common.Hash{}, nil).Once() mockState.On("DeleteUngeneratedProofs", mock.Anything, mock.Anything).Return(nil).Once() mockState.On("DeleteGeneratedProofs", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() a := Aggregator{ - ctx: context.Background(), - etherman: mockEtherman, - state: mockState, - logger: log.GetDefaultLogger(), - halted: atomic.Bool{}, + ctx: context.Background(), + etherman: mockEtherman, + state: mockState, + logger: log.GetDefaultLogger(), + halted: atomic.Bool{}, + accInputHashes: make(map[uint64]common.Hash), + accInputHashesMutex: &sync.Mutex{}, } a.halted.Store(false) @@ -184,11 +190,13 @@ func Test_handleRollbackBatchesHalt(t *testing.T) { } a := Aggregator{ - ctx: context.Background(), - etherman: mockEtherman, - state: mockState, - logger: log.GetDefaultLogger(), - halted: atomic.Bool{}, + ctx: context.Background(), + etherman: mockEtherman, + state: mockState, + logger: log.GetDefaultLogger(), + halted: atomic.Bool{}, + accInputHashes: make(map[uint64]common.Hash), + accInputHashesMutex: &sync.Mutex{}, } a.halted.Store(false) @@ -213,11 +221,13 @@ func Test_handleRollbackBatchesError(t *testing.T) { } a := Aggregator{ - ctx: context.Background(), - etherman: mockEtherman, - state: mockState, - logger: log.GetDefaultLogger(), - halted: atomic.Bool{}, + ctx: context.Background(), + etherman: mockEtherman, + state: mockState, + logger: log.GetDefaultLogger(), + halted: atomic.Bool{}, + accInputHashes: make(map[uint64]common.Hash), + accInputHashesMutex: &sync.Mutex{}, } a.halted.Store(false) @@ -320,6 +330,8 @@ func Test_sendFinalProofSuccess(t *testing.T) { timeSendFinalProofMutex: &sync.RWMutex{}, sequencerPrivateKey: privateKey, rpcClient: rpcMock, + accInputHashes: make(map[uint64]common.Hash), + accInputHashesMutex: &sync.Mutex{}, } a.ctx, a.exit = context.WithCancel(context.Background()) @@ -509,6 +521,8 @@ func Test_sendFinalProofError(t *testing.T) { timeSendFinalProofMutex: &sync.RWMutex{}, sequencerPrivateKey: privateKey, rpcClient: rpcMock, + accInputHashes: make(map[uint64]common.Hash), + accInputHashesMutex: &sync.Mutex{}, } a.ctx, a.exit = context.WithCancel(context.Background()) @@ -625,7 +639,9 @@ func Test_buildFinalProof(t *testing.T) { cfg: Config{ SenderAddress: common.BytesToAddress([]byte("from")).Hex(), }, - rpcClient: rpcMock, + rpcClient: rpcMock, + accInputHashes: make(map[uint64]common.Hash), + accInputHashesMutex: &sync.Mutex{}, } tc.setup(m, &a) @@ -884,6 +900,8 @@ func Test_tryBuildFinalProof(t *testing.T) { timeSendFinalProofMutex: &sync.RWMutex{}, timeCleanupLockedProofs: cfg.CleanupLockedProofsInterval, finalProof: make(chan finalProofMsg), + accInputHashes: make(map[uint64]common.Hash), + accInputHashesMutex: &sync.Mutex{}, } aggregatorCtx := context.WithValue(context.Background(), "owner", ownerAggregator) //nolint:staticcheck @@ -1389,6 +1407,8 @@ func Test_tryAggregateProofs(t *testing.T) { timeSendFinalProofMutex: &sync.RWMutex{}, timeCleanupLockedProofs: cfg.CleanupLockedProofsInterval, finalProof: make(chan finalProofMsg), + accInputHashes: make(map[uint64]common.Hash), + accInputHashesMutex: &sync.Mutex{}, } aggregatorCtx := context.WithValue(context.Background(), "owner", ownerAggregator) //nolint:staticcheck a.ctx, a.exit = context.WithCancel(aggregatorCtx) @@ -1507,35 +1527,27 @@ func Test_tryGenerateBatchProof(t *testing.T) { batchL2Data, err := hex.DecodeString(codedL2Block1) require.NoError(err) l1InfoRoot := common.HexToHash("0x057e9950fbd39b002e323f37c2330d0c096e66919e24cc96fb4b2dfa8f4af782") - batch := state.Batch{ - BatchNumber: lastVerifiedBatchNum + 1, - BatchL2Data: batchL2Data, - L1InfoRoot: l1InfoRoot, - Timestamp: time.Now(), - Coinbase: common.Address{}, - ChainID: uint64(1), - ForkID: uint64(12), - } + virtualBatch := synchronizer.VirtualBatch{ BatchNumber: lastVerifiedBatchNum + 1, BatchL2Data: batchL2Data, L1InfoRoot: &l1InfoRoot, } - m.synchronizerMock.On("GetVirtualBatchByBatchNumber", mock.Anything, lastVerifiedBatchNum+1).Return(&virtualBatch, nil).Once() + m.synchronizerMock.On("GetVirtualBatchByBatchNumber", mock.Anything, lastVerifiedBatchNum).Return(&virtualBatch, nil).Once() m.etherman.On("GetLatestVerifiedBatchNum").Return(lastVerifiedBatchNum, nil).Once() - m.stateMock.On("CheckProofExistsForBatch", mock.MatchedBy(matchProverCtxFn), mock.AnythingOfType("uint64"), nil).Return(false, nil).Once() + m.stateMock.On("CheckProofExistsForBatch", mock.MatchedBy(matchProverCtxFn), lastVerifiedBatchNum+1, nil).Return(true, nil).Once() + m.stateMock.On("CleanupGeneratedProofs", mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() sequence := synchronizer.SequencedBatches{ FromBatchNumber: uint64(10), ToBatchNumber: uint64(20), } - m.synchronizerMock.On("GetSequenceByBatchNumber", mock.MatchedBy(matchProverCtxFn), lastVerifiedBatchNum+1).Return(&sequence, nil).Once() + m.synchronizerMock.On("GetSequenceByBatchNumber", mock.MatchedBy(matchProverCtxFn), lastVerifiedBatchNum).Return(&sequence, nil).Once() rpcBatch := rpctypes.NewRPCBatch(lastVerifiedBatchNum+1, common.Hash{}, []string{}, batchL2Data, common.Hash{}, common.BytesToHash([]byte("mock LocalExitRoot")), common.BytesToHash([]byte("mock StateRoot")), common.Address{}, false) rpcBatch.SetLastL2BLockTimestamp(uint64(time.Now().Unix())) - m.rpcMock.On("GetBatch", lastVerifiedBatchNum+1).Return(rpcBatch, nil) - m.rpcMock.On("GetWitness", lastVerifiedBatchNum+1, false).Return([]byte("witness"), nil) - + m.rpcMock.On("GetWitness", lastVerifiedBatchNum, false).Return([]byte("witness"), nil) + m.rpcMock.On("GetBatch", lastVerifiedBatchNum).Return(rpcBatch, nil) m.stateMock.On("AddSequence", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Return(nil).Once() m.stateMock.On("AddGeneratedProof", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Run( func(args mock.Arguments) { @@ -1550,18 +1562,14 @@ func Test_tryGenerateBatchProof(t *testing.T) { assert.InDelta(time.Now().Unix(), proof.GeneratingSince.Unix(), float64(time.Second)) }, ).Return(nil).Once() - m.synchronizerMock.On("GetLeafsByL1InfoRoot", mock.Anything, l1InfoRoot).Return(l1InfoTreeLeaf, nil).Twice() + m.synchronizerMock.On("GetLeafsByL1InfoRoot", mock.Anything, l1InfoRoot).Return(l1InfoTreeLeaf, nil) m.synchronizerMock.On("GetL1InfoTreeLeaves", mock.Anything, mock.Anything).Return(map[uint32]synchronizer.L1InfoTreeLeaf{ 1: { BlockNumber: uint64(35), }, - }, nil).Twice() - - m.rpcMock.On("GetBatch", lastVerifiedBatchNum).Return(rpcBatch, nil) - expectedInputProver, err := a.buildInputProver(context.Background(), &batch, []byte("witness")) - require.NoError(err) + }, nil) - m.proverMock.On("BatchProof", expectedInputProver).Return(nil, errTest).Once() + m.proverMock.On("BatchProof", mock.Anything).Return(nil, errTest).Once() m.stateMock.On("DeleteGeneratedProofs", mock.MatchedBy(matchAggregatorCtxFn), batchToProve.BatchNumber, batchToProve.BatchNumber, nil).Return(nil).Once() }, asserts: func(result bool, a *Aggregator, err error) { @@ -1606,7 +1614,6 @@ func Test_tryGenerateBatchProof(t *testing.T) { m.synchronizerMock.On("GetSequenceByBatchNumber", mock.MatchedBy(matchProverCtxFn), lastVerifiedBatchNum+1).Return(&sequence, nil).Once() rpcBatch := rpctypes.NewRPCBatch(lastVerifiedBatchNum+1, common.Hash{}, []string{}, batchL2Data, common.Hash{}, common.BytesToHash([]byte("mock LocalExitRoot")), common.BytesToHash([]byte("mock StateRoot")), common.Address{}, false) rpcBatch.SetLastL2BLockTimestamp(uint64(time.Now().Unix())) - m.rpcMock.On("GetBatch", lastVerifiedBatchNum+1).Return(rpcBatch, nil) m.rpcMock.On("GetWitness", lastVerifiedBatchNum+1, false).Return([]byte("witness"), nil) m.stateMock.On("AddSequence", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Return(nil).Once() m.stateMock.On("AddGeneratedProof", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Run( @@ -1630,7 +1637,7 @@ func Test_tryGenerateBatchProof(t *testing.T) { }, }, nil).Twice() - m.rpcMock.On("GetBatch", lastVerifiedBatchNum).Return(rpcBatch, nil).Twice() + m.rpcMock.On("GetBatch", lastVerifiedBatchNum+1).Return(rpcBatch, nil) expectedInputProver, err := a.buildInputProver(context.Background(), &batch, []byte("witness")) require.NoError(err) @@ -1672,7 +1679,7 @@ func Test_tryGenerateBatchProof(t *testing.T) { m.synchronizerMock.On("GetSequenceByBatchNumber", mock.MatchedBy(matchProverCtxFn), lastVerifiedBatchNum+1).Return(&sequence, nil).Once() rpcBatch := rpctypes.NewRPCBatch(lastVerifiedBatchNum+1, common.Hash{}, []string{}, batchL2Data, common.Hash{}, common.BytesToHash([]byte("mock LocalExitRoot")), common.BytesToHash([]byte("mock StateRoot")), common.Address{}, false) rpcBatch.SetLastL2BLockTimestamp(uint64(time.Now().Unix())) - m.rpcMock.On("GetBatch", lastVerifiedBatchNum+1).Return(rpcBatch, nil).Once() + m.rpcMock.On("GetBatch", lastVerifiedBatchNum+1).Return(rpcBatch, nil) m.stateMock.On("AddSequence", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Return(nil).Once() m.stateMock.On("AddGeneratedProof", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Run( func(args mock.Arguments) { @@ -1695,7 +1702,6 @@ func Test_tryGenerateBatchProof(t *testing.T) { }, }, nil).Twice() - m.rpcMock.On("GetBatch", lastVerifiedBatchNum).Return(rpcBatch, nil).Twice() expectedInputProver, err := a.buildInputProver(context.Background(), &batch, []byte("witness")) require.NoError(err) @@ -1769,12 +1775,9 @@ func Test_tryGenerateBatchProof(t *testing.T) { }, }, nil).Twice() - rpcBatch := rpctypes.NewRPCBatch(lastVerifiedBatchNum, common.Hash{}, []string{}, batchL2Data, common.Hash{}, common.BytesToHash([]byte("mock LocalExitRoot")), common.BytesToHash([]byte("mock StateRoot")), common.Address{}, false) + rpcBatch := rpctypes.NewRPCBatch(lastVerifiedBatchNum+1, common.Hash{}, []string{}, batchL2Data, common.Hash{}, common.BytesToHash([]byte("mock LocalExitRoot")), common.BytesToHash([]byte("mock StateRoot")), common.Address{}, false) rpcBatch.SetLastL2BLockTimestamp(uint64(time.Now().Unix())) - rpcBatch2 := rpctypes.NewRPCBatch(lastVerifiedBatchNum+1, common.Hash{}, []string{}, batchL2Data, common.Hash{}, common.BytesToHash([]byte("mock LocalExitRoot")), common.BytesToHash([]byte("mock StateRoot")), common.Address{}, false) - rpcBatch2.SetLastL2BLockTimestamp(uint64(time.Now().Unix())) - m.rpcMock.On("GetBatch", lastVerifiedBatchNum).Return(rpcBatch, nil) - m.rpcMock.On("GetBatch", lastVerifiedBatchNum+1).Return(rpcBatch2, nil) + m.rpcMock.On("GetBatch", lastVerifiedBatchNum+1).Return(rpcBatch, nil) m.rpcMock.On("GetWitness", lastVerifiedBatchNum+1, false).Return([]byte("witness"), nil) virtualBatch := synchronizer.VirtualBatch{ @@ -1841,12 +1844,6 @@ func Test_tryGenerateBatchProof(t *testing.T) { } m.synchronizerMock.On("GetSequenceByBatchNumber", mock.MatchedBy(matchProverCtxFn), lastVerifiedBatchNum+1).Return(&sequence, nil).Once() - rpcBatch := rpctypes.NewRPCBatch(lastVerifiedBatchNum, common.Hash{}, []string{}, batchL2Data, common.Hash{}, common.BytesToHash([]byte("mock LocalExitRoot")), common.BytesToHash([]byte("mock StateRoot")), common.Address{}, false) - rpcBatch.SetLastL2BLockTimestamp(uint64(time.Now().Unix())) - rpcBatch2 := rpctypes.NewRPCBatch(lastVerifiedBatchNum+1, common.Hash{}, []string{}, batchL2Data, common.Hash{}, common.BytesToHash([]byte("mock LocalExitRoot")), common.BytesToHash([]byte("mock StateRoot")), common.Address{}, false) - rpcBatch2.SetLastL2BLockTimestamp(uint64(time.Now().Unix())) - m.rpcMock.On("GetBatch", lastVerifiedBatchNum).Return(rpcBatch, nil) - m.rpcMock.On("GetBatch", lastVerifiedBatchNum+1).Return(rpcBatch2, nil) m.rpcMock.On("GetWitness", lastVerifiedBatchNum+1, false).Return([]byte("witness"), nil) virtualBatch := synchronizer.VirtualBatch{ @@ -1858,6 +1855,9 @@ func Test_tryGenerateBatchProof(t *testing.T) { m.synchronizerMock.On("GetVirtualBatchByBatchNumber", mock.Anything, lastVerifiedBatchNum+1).Return(&virtualBatch, nil).Once() m.rpcMock.On("GetWitness", lastVerifiedBatchNum+1, false).Return([]byte("witness"), nil) + rpcBatch := rpctypes.NewRPCBatch(lastVerifiedBatchNum+1, common.Hash{}, []string{}, batchL2Data, common.Hash{}, common.BytesToHash([]byte("mock LocalExitRoot")), common.BytesToHash([]byte("mock StateRoot")), common.Address{}, false) + rpcBatch.SetLastL2BLockTimestamp(uint64(time.Now().Unix())) + m.rpcMock.On("GetBatch", lastVerifiedBatchNum+1).Return(rpcBatch, nil) m.stateMock.On("AddSequence", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Return(nil).Once() m.stateMock.On("AddGeneratedProof", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Run( @@ -1932,6 +1932,8 @@ func Test_tryGenerateBatchProof(t *testing.T) { profitabilityChecker: NewTxProfitabilityCheckerAcceptAll(stateMock, cfg.IntervalAfterWhichBatchConsolidateAnyway.Duration), l1Syncr: synchronizerMock, rpcClient: mockRPC, + accInputHashes: make(map[uint64]common.Hash), + accInputHashesMutex: &sync.Mutex{}, } aggregatorCtx := context.WithValue(context.Background(), "owner", ownerAggregator) //nolint:staticcheck a.ctx, a.exit = context.WithCancel(aggregatorCtx) @@ -1957,3 +1959,24 @@ func Test_tryGenerateBatchProof(t *testing.T) { }) } } + +func Test_accInputHashFunctions(t *testing.T) { + aggregator := Aggregator{ + accInputHashes: make(map[uint64]common.Hash), + accInputHashesMutex: &sync.Mutex{}, + } + + hash1 := common.BytesToHash([]byte("hash1")) + hash2 := common.BytesToHash([]byte("hash2")) + + aggregator.setAccInputHash(1, hash1) + aggregator.setAccInputHash(2, hash2) + + assert.Equal(t, 2, len(aggregator.accInputHashes)) + + hash3 := aggregator.getAccInputHash(1) + assert.Equal(t, hash1, hash3) + + aggregator.removeAccInputHashes(1, 2) + assert.Equal(t, 0, len(aggregator.accInputHashes)) +} diff --git a/common/common.go b/common/common.go index f8b92d16..15206902 100644 --- a/common/common.go +++ b/common/common.go @@ -83,6 +83,7 @@ func CalculateAccInputHash( } v2 = keccak256.Hash(v2) + calculatedAccInputHash := common.BytesToHash(keccak256.Hash(v1, v2, v3, v4, v5, v6)) logger.Debugf("OldAccInputHash: %v", oldAccInputHash) logger.Debugf("BatchHashData: %v", common.Bytes2Hex(v2)) @@ -90,8 +91,9 @@ func CalculateAccInputHash( logger.Debugf("TimeStampLimit: %v", timestampLimit) logger.Debugf("Sequencer Address: %v", sequencerAddr) logger.Debugf("Forced BlockHashL1: %v", forcedBlockhashL1) + logger.Debugf("CalculatedAccInputHash: %v", calculatedAccInputHash) - return common.BytesToHash(keccak256.Hash(v1, v2, v3, v4, v5, v6)) + return calculatedAccInputHash } // NewKeyFromKeystore creates a private key from a keystore file diff --git a/l1infotree/tree.go b/l1infotree/tree.go index 17258ba0..f3ad6d36 100644 --- a/l1infotree/tree.go +++ b/l1infotree/tree.go @@ -109,17 +109,15 @@ func (mt *L1InfoTree) ComputeMerkleProof(gerIndex uint32, leaves [][32]byte) ([] if len(leaves)%2 == 1 { leaves = append(leaves, mt.zeroHashes[h]) } - if index%2 == 1 { // If it is odd - siblings = append(siblings, leaves[index-1]) - } else if len(leaves) > 1 { // It is even - if index >= uint32(len(leaves)) { - // siblings = append(siblings, mt.zeroHashes[h]) + if index >= uint32(len(leaves)) { + siblings = append(siblings, mt.zeroHashes[h]) + } else { + if index%2 == 1 { // If it is odd siblings = append(siblings, leaves[index-1]) - } else { + } else { // It is even siblings = append(siblings, leaves[index+1]) } } - var ( nsi [][][]byte hashes [][32]byte diff --git a/l1infotree/tree_test.go b/l1infotree/tree_test.go index a0fe9b97..6af4b8b3 100644 --- a/l1infotree/tree_test.go +++ b/l1infotree/tree_test.go @@ -3,7 +3,6 @@ package l1infotree_test import ( "encoding/hex" "encoding/json" - "fmt" "os" "testing" @@ -130,56 +129,3 @@ func TestAddLeaf2(t *testing.T) { require.Equal(t, testVector.NewRoot, newRoot) } } - -func TestAddLeaf2TestLastLeaf(t *testing.T) { - mt, err := l1infotree.NewL1InfoTree(log.GetDefaultLogger(), uint8(32), [][32]byte{}) - require.NoError(t, err) - leaves := [][32]byte{ - common.HexToHash("0x6a617315ffc0a6831d2de6331f8d3e053889e9385696c13f11853fdcba50e123"), - common.HexToHash("0x1cff355b898cf285bcc3f84a8d6ed51c19fe87ab654f4146f2dc7723a59fc741"), - } - siblings, root, err := mt.ComputeMerkleProof(2, leaves) - require.NoError(t, err) - fmt.Printf("Root: %s\n", root.String()) - for i := 0; i < len(siblings); i++ { - hash := common.BytesToHash(siblings[i][:]) - fmt.Printf("Sibling %d: %s\n", i, hash.String()) - } - expectedProof := []string{ - "0x1cff355b898cf285bcc3f84a8d6ed51c19fe87ab654f4146f2dc7723a59fc741", - "0x7ae3eca221dee534b82adffb8003ad3826ddf116132e4ff55c681ff723bc7e42", - "0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30", - "0x21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85", - "0xe58769b32a1beaf1ea27375a44095a0d1fb664ce2dd358e7fcbfb78c26a19344", - "0x0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d", - "0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968", - "0xffd70157e48063fc33c97a050f7f640233bf646cc98d9524c6b92bcf3ab56f83", - "0x9867cc5f7f196b93bae1e27e6320742445d290f2263827498b54fec539f756af", - "0xcefad4e508c098b9a7e1d8feb19955fb02ba9675585078710969d3440f5054e0", - "0xf9dc3e7fe016e050eff260334f18a5d4fe391d82092319f5964f2e2eb7c1c3a5", - "0xf8b13a49e282f609c317a833fb8d976d11517c571d1221a265d25af778ecf892", - "0x3490c6ceeb450aecdc82e28293031d10c7d73bf85e57bf041a97360aa2c5d99c", - "0xc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb", - "0x5c67add7c6caf302256adedf7ab114da0acfe870d449a3a489f781d659e8becc", - "0xda7bce9f4e8618b6bd2f4132ce798cdc7a60e7e1460a7299e3c6342a579626d2", - "0x2733e50f526ec2fa19a22b31e8ed50f23cd1fdf94c9154ed3a7609a2f1ff981f", - "0xe1d3b5c807b281e4683cc6d6315cf95b9ade8641defcb32372f1c126e398ef7a", - "0x5a2dce0a8a7f68bb74560f8f71837c2c2ebbcbf7fffb42ae1896f13f7c7479a0", - "0xb46a28b6f55540f89444f63de0378e3d121be09e06cc9ded1c20e65876d36aa0", - "0xc65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2", - "0xf4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd9", - "0x5a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e377", - "0x4df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652", - "0xcdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef", - "0x0abf5ac974a1ed57f4050aa510dd9c74f508277b39d7973bb2dfccc5eeb0618d", - "0xb8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d0", - "0x838c5655cb21c6cb83313b5a631175dff4963772cce9108188b34ac87c81c41e", - "0x662ee4dd2dd7b2bc707961b1e646c4047669dcb6584f0d8d770daf5d7e7deb2e", - "0x388ab20e2573d171a88108e79d820e98f26c0b84aa8b2f4aa4968dbb818ea322", - "0x93237c50ba75ee485f4c22adf2f741400bdf8d6a9cc7df7ecae576221665d735", - "0x8448818bb4ae4562849e949e17ac16e0be16688e156b5cf15e098c627c0056a9"} - for i := 0; i < len(siblings); i++ { - require.Equal(t, expectedProof[i], "0x"+hex.EncodeToString(siblings[i][:])) - } - require.Equal(t, "0xb85687d05a6bdccadcc1170a0e2bbba6855c35c984a0bc91697bc066bd38a338", root.String()) -} diff --git a/scripts/local_config b/scripts/local_config index d1a47b2c..b65210ac 100755 --- a/scripts/local_config +++ b/scripts/local_config @@ -206,7 +206,7 @@ function export_portnum_from_kurtosis_or_fail(){ ############################################################################### function export_ports_from_kurtosis(){ export_portnum_from_kurtosis_or_fail l1_rpc_port el-1-geth-lighthouse rpc - export_portnum_from_kurtosis_or_fail zkevm_rpc_http_port cdk-erigon-node-001 http-rpc rpc + export_portnum_from_kurtosis_or_fail zkevm_rpc_http_port cdk-erigon-rpc-001 http-rpc rpc export_portnum_from_kurtosis_or_fail zkevm_data_streamer_port cdk-erigon-sequencer-001 data-streamer export_portnum_from_kurtosis_or_fail aggregator_db_port postgres-001 postgres export_portnum_from_kurtosis_or_fail agglayer_port agglayer agglayer From d4cf2db2979e9701ade72a72e27dfdee163c3af2 Mon Sep 17 00:00:00 2001 From: Goran Rojovic <100121253+goran-ethernal@users.noreply.github.com> Date: Fri, 8 Nov 2024 14:27:16 +0100 Subject: [PATCH 21/33] fix: Various pessimistic proofs fixes and adaption to `kurtosis-cdk` pessimistic proof branch (#165) * fix: certificate with no importedBridges set '[]' instead of 'null' * fix: certificate with no importedBridges set '[]' instead of 'null' * feat: adapt to kurtosis-cdk pp * feat: change para SaveCertificatesToFiles to SaveCertificatesToFilesPath * fix: get candidate and proven certificates as well * fix: remove test * fix: small changes * fix: db tx rollback * fix: replace existing certificate * fix: lint and coverage * feat: check for nil fields in certificate * feat: no claims test * fix: comments * fix: lint * fix: shallow copy imported bridge exits and bridge exits * fix: local_config for debug * fix: cdk-erigon-node-001 rename to cdk-erigon-rpc-001 * feat: add logs to check cert * feat: store hash as text, add logs * fix: lint * fix: bump kurtosis-cdk version to 0.2.18 * fix: comments * fix: string conversion error on BridgeExit * fix: lint * fix: update minter key * fix: e2e * fix: e2e tests --------- Co-authored-by: joanestebanr <129153821+joanestebanr@users.noreply.github.com> Co-authored-by: Victor Castell <0x@vcastellm.xyz> --- .github/workflows/test-e2e.yml | 2 +- .github/workflows/test-resequence.yml | 2 +- agglayer/client.go | 4 +- agglayer/types.go | 127 +++++++++++++++ agglayer/types_test.go | 88 +++++++++++ aggsender/aggsender.go | 118 ++++++++------ aggsender/aggsender_test.go | 174 ++++++++++++++++++--- aggsender/config.go | 16 +- aggsender/db/aggsender_db_storage.go | 59 +++++-- aggsender/db/aggsender_db_storage_test.go | 107 +++++++++++-- aggsender/mocks/mock_aggsender_storage.go | 87 +++++------ aggsender/types/types.go | 4 +- scripts/local_config | 116 +++++++++++--- test/bridge-e2e.bats | 5 +- test/helpers/common-setup.bash | 2 +- test/scripts/batch_verification_monitor.sh | 2 +- test/scripts/env.sh | 2 +- 17 files changed, 746 insertions(+), 169 deletions(-) diff --git a/.github/workflows/test-e2e.yml b/.github/workflows/test-e2e.yml index 7fdb5a2b..980ad990 100644 --- a/.github/workflows/test-e2e.yml +++ b/.github/workflows/test-e2e.yml @@ -70,7 +70,7 @@ jobs: with: repository: 0xPolygon/kurtosis-cdk path: "kurtosis-cdk" - ref: "v0.2.15" + ref: "v0.2.18" - name: Setup Bats and bats libs uses: bats-core/bats-action@2.0.0 diff --git a/.github/workflows/test-resequence.yml b/.github/workflows/test-resequence.yml index 71ebc7d7..23d73423 100644 --- a/.github/workflows/test-resequence.yml +++ b/.github/workflows/test-resequence.yml @@ -92,7 +92,7 @@ jobs: run: | mkdir -p ci_logs cd ci_logs - kurtosis service logs cdk-v1 cdk-erigon-node-001 --all > cdk-erigon-node-001.log + kurtosis service logs cdk-v1 cdk-erigon-rpc-001 --all > cdk-erigon-rpc-001.log kurtosis service logs cdk-v1 cdk-erigon-sequencer-001 --all > cdk-erigon-sequencer-001.log kurtosis service logs cdk-v1 zkevm-agglayer-001 --all > zkevm-agglayer-001.log kurtosis service logs cdk-v1 zkevm-prover-001 --all > zkevm-prover-001.log diff --git a/agglayer/client.go b/agglayer/client.go index 132c2716..e60c1c7c 100644 --- a/agglayer/client.go +++ b/agglayer/client.go @@ -91,7 +91,9 @@ func (c *AggLayerClient) WaitTxToBeMined(hash common.Hash, ctx context.Context) // SendCertificate sends a certificate to the AggLayer func (c *AggLayerClient) SendCertificate(certificate *SignedCertificate) (common.Hash, error) { - response, err := rpc.JSONRPCCall(c.url, "interop_sendCertificate", certificate) + certificateToSend := certificate.CopyWithDefaulting() + + response, err := rpc.JSONRPCCall(c.url, "interop_sendCertificate", certificateToSend) if err != nil { return common.Hash{}, err } diff --git a/agglayer/types.go b/agglayer/types.go index 825c9db2..9350e791 100644 --- a/agglayer/types.go +++ b/agglayer/types.go @@ -86,6 +86,30 @@ type Certificate struct { Metadata common.Hash `json:"metadata"` } +func (c *Certificate) String() string { + res := fmt.Sprintf("NetworkID: %d, Height: %d, PrevLocalExitRoot: %s, NewLocalExitRoot: %s, Metadata: %s\n", + c.NetworkID, c.Height, common.Bytes2Hex(c.PrevLocalExitRoot[:]), + common.Bytes2Hex(c.NewLocalExitRoot[:]), common.Bytes2Hex(c.Metadata[:])) + + if c.BridgeExits == nil { + res += " BridgeExits: nil\n" + } else { + for i, bridgeExit := range c.BridgeExits { + res += fmt.Sprintf(", BridgeExit[%d]: %s\n", i, bridgeExit.String()) + } + } + + if c.ImportedBridgeExits == nil { + res += " ImportedBridgeExits: nil\n" + } else { + for i, importedBridgeExit := range c.ImportedBridgeExits { + res += fmt.Sprintf(" ImportedBridgeExit[%d]: %s\n", i, importedBridgeExit.String()) + } + } + + return res +} + // Hash returns a hash that uniquely identifies the certificate func (c *Certificate) Hash() common.Hash { bridgeExitsHashes := make([][]byte, len(c.BridgeExits)) @@ -131,6 +155,33 @@ type SignedCertificate struct { Signature *Signature `json:"signature"` } +func (s *SignedCertificate) String() string { + return fmt.Sprintf("Certificate:%s,\nSignature: %s", s.Certificate.String(), s.Signature.String()) +} + +// CopyWithDefaulting returns a shallow copy of the signed certificate +func (s *SignedCertificate) CopyWithDefaulting() *SignedCertificate { + certificateCopy := *s.Certificate + + if certificateCopy.BridgeExits == nil { + certificateCopy.BridgeExits = make([]*BridgeExit, 0) + } + + if certificateCopy.ImportedBridgeExits == nil { + certificateCopy.ImportedBridgeExits = make([]*ImportedBridgeExit, 0) + } + + signature := s.Signature + if signature == nil { + signature = &Signature{} + } + + return &SignedCertificate{ + Certificate: &certificateCopy, + Signature: signature, + } +} + // Signature is the data structure that will hold the signature of the given certificate type Signature struct { R common.Hash `json:"r"` @@ -138,12 +189,20 @@ type Signature struct { OddParity bool `json:"odd_y_parity"` } +func (s *Signature) String() string { + return fmt.Sprintf("R: %s, S: %s, OddParity: %t", s.R.String(), s.S.String(), s.OddParity) +} + // TokenInfo encapsulates the information to uniquely identify a token on the origin network. type TokenInfo struct { OriginNetwork uint32 `json:"origin_network"` OriginTokenAddress common.Address `json:"origin_token_address"` } +func (t *TokenInfo) String() string { + return fmt.Sprintf("OriginNetwork: %d, OriginTokenAddress: %s", t.OriginNetwork, t.OriginTokenAddress.String()) +} + // GlobalIndex represents the global index of an imported bridge exit type GlobalIndex struct { MainnetFlag bool `json:"mainnet_flag"` @@ -159,6 +218,11 @@ func (g *GlobalIndex) Hash() common.Hash { ) } +func (g *GlobalIndex) String() string { + return fmt.Sprintf("MainnetFlag: %t, RollupIndex: %d, LeafIndex: %d", + g.MainnetFlag, g.RollupIndex, g.LeafIndex) +} + // BridgeExit represents a token bridge exit type BridgeExit struct { LeafType LeafType `json:"leaf_type"` @@ -169,6 +233,20 @@ type BridgeExit struct { Metadata []byte `json:"metadata"` } +func (b *BridgeExit) String() string { + res := fmt.Sprintf("LeafType: %s, DestinationNetwork: %d, DestinationAddress: %s, Amount: %s, Metadata: %s", + b.LeafType.String(), b.DestinationNetwork, b.DestinationAddress.String(), + b.Amount.String(), common.Bytes2Hex(b.Metadata)) + + if b.TokenInfo == nil { + res += ", TokenInfo: nil" + } else { + res += fmt.Sprintf(", TokenInfo: %s", b.TokenInfo.String()) + } + + return res +} + // Hash returns a hash that uniquely identifies the bridge exit func (b *BridgeExit) Hash() common.Hash { if b.Amount == nil { @@ -252,6 +330,10 @@ func (m *MerkleProof) Hash() common.Hash { ) } +func (m *MerkleProof) String() string { + return fmt.Sprintf("Root: %s, Proof: %v", m.Root.String(), m.Proof) +} + // L1InfoTreeLeafInner represents the inner part of the L1 info tree leaf type L1InfoTreeLeafInner struct { GlobalExitRoot common.Hash `json:"global_exit_root"` @@ -281,6 +363,11 @@ func (l *L1InfoTreeLeafInner) MarshalJSON() ([]byte, error) { }) } +func (l *L1InfoTreeLeafInner) String() string { + return fmt.Sprintf("GlobalExitRoot: %s, BlockHash: %s, Timestamp: %d", + l.GlobalExitRoot.String(), l.BlockHash.String(), l.Timestamp) +} + // L1InfoTreeLeaf represents the leaf of the L1 info tree type L1InfoTreeLeaf struct { L1InfoTreeIndex uint32 `json:"l1_info_tree_index"` @@ -294,11 +381,21 @@ func (l *L1InfoTreeLeaf) Hash() common.Hash { return l.Inner.Hash() } +func (l *L1InfoTreeLeaf) String() string { + return fmt.Sprintf("L1InfoTreeIndex: %d, RollupExitRoot: %s, MainnetExitRoot: %s, Inner: %s", + l.L1InfoTreeIndex, + common.Bytes2Hex(l.RollupExitRoot[:]), + common.Bytes2Hex(l.MainnetExitRoot[:]), + l.Inner.String(), + ) +} + // Claim is the interface that will be implemented by the different types of claims type Claim interface { Type() string Hash() common.Hash MarshalJSON() ([]byte, error) + String() string } // ClaimFromMainnnet represents a claim originating from the mainnet @@ -335,6 +432,11 @@ func (c *ClaimFromMainnnet) Hash() common.Hash { ) } +func (c *ClaimFromMainnnet) String() string { + return fmt.Sprintf("ProofLeafMER: %s, ProofGERToL1Root: %s, L1Leaf: %s", + c.ProofLeafMER.String(), c.ProofGERToL1Root.String(), c.L1Leaf.String()) +} + // ClaimFromRollup represents a claim originating from a rollup type ClaimFromRollup struct { ProofLeafLER *MerkleProof `json:"proof_leaf_ler"` @@ -372,6 +474,11 @@ func (c *ClaimFromRollup) Hash() common.Hash { ) } +func (c *ClaimFromRollup) String() string { + return fmt.Sprintf("ProofLeafLER: %s, ProofLERToRER: %s, ProofGERToL1Root: %s, L1Leaf: %s", + c.ProofLeafLER.String(), c.ProofLERToRER.String(), c.ProofGERToL1Root.String(), c.L1Leaf.String()) +} + // ImportedBridgeExit represents a token bridge exit originating on another network but claimed on the current network. type ImportedBridgeExit struct { BridgeExit *BridgeExit `json:"bridge_exit"` @@ -379,6 +486,26 @@ type ImportedBridgeExit struct { GlobalIndex *GlobalIndex `json:"global_index"` } +func (c *ImportedBridgeExit) String() string { + var res string + + if c.BridgeExit == nil { + res = "BridgeExit: nil" + } else { + res = fmt.Sprintf("BridgeExit: %s", c.BridgeExit.String()) + } + + if c.GlobalIndex == nil { + res += ", GlobalIndex: nil" + } else { + res += fmt.Sprintf(", GlobalIndex: %s", c.GlobalIndex.String()) + } + + res += fmt.Sprintf("ClaimData: %s", c.ClaimData.String()) + + return res +} + // Hash returns a hash that uniquely identifies the imported bridge exit func (c *ImportedBridgeExit) Hash() common.Hash { return crypto.Keccak256Hash( diff --git a/agglayer/types_test.go b/agglayer/types_test.go index 325c0b88..95033141 100644 --- a/agglayer/types_test.go +++ b/agglayer/types_test.go @@ -64,3 +64,91 @@ func TestMarshalJSON(t *testing.T) { log.Info(string(data)) require.Equal(t, expectedSignedCertificateyMetadataJSON, string(data)) } + +func TestSignedCertificate_Copy(t *testing.T) { + t.Parallel() + + t.Run("copy with non-nil fields", func(t *testing.T) { + t.Parallel() + + original := &SignedCertificate{ + Certificate: &Certificate{ + NetworkID: 1, + Height: 100, + PrevLocalExitRoot: [32]byte{0x01}, + NewLocalExitRoot: [32]byte{0x02}, + BridgeExits: []*BridgeExit{ + { + LeafType: LeafTypeAsset, + TokenInfo: &TokenInfo{OriginNetwork: 1, OriginTokenAddress: common.HexToAddress("0x123")}, + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x456"), + Amount: big.NewInt(1000), + Metadata: []byte{0x01, 0x02}, + }, + }, + ImportedBridgeExits: []*ImportedBridgeExit{ + { + BridgeExit: &BridgeExit{ + LeafType: LeafTypeMessage, + TokenInfo: &TokenInfo{OriginNetwork: 1, OriginTokenAddress: common.HexToAddress("0x789")}, + DestinationNetwork: 3, + DestinationAddress: common.HexToAddress("0xabc"), + Amount: big.NewInt(2000), + Metadata: []byte{0x03, 0x04}, + }, + ClaimData: &ClaimFromMainnnet{}, + GlobalIndex: &GlobalIndex{MainnetFlag: true, RollupIndex: 1, LeafIndex: 2}, + }, + }, + Metadata: common.HexToHash("0xdef"), + }, + Signature: &Signature{ + R: common.HexToHash("0x111"), + S: common.HexToHash("0x222"), + OddParity: true, + }, + } + + certificateCopy := original.CopyWithDefaulting() + + require.NotNil(t, certificateCopy) + require.NotSame(t, original, certificateCopy) + require.NotSame(t, original.Certificate, certificateCopy.Certificate) + require.Same(t, original.Signature, certificateCopy.Signature) + require.Equal(t, original, certificateCopy) + }) + + t.Run("copy with nil BridgeExits, ImportedBridgeExits and Signature", func(t *testing.T) { + t.Parallel() + + original := &SignedCertificate{ + Certificate: &Certificate{ + NetworkID: 1, + Height: 100, + PrevLocalExitRoot: [32]byte{0x01}, + NewLocalExitRoot: [32]byte{0x02}, + BridgeExits: nil, + ImportedBridgeExits: nil, + Metadata: common.HexToHash("0xdef"), + }, + Signature: nil, + } + + certificateCopy := original.CopyWithDefaulting() + + require.NotNil(t, certificateCopy) + require.NotSame(t, original, certificateCopy) + require.NotSame(t, original.Certificate, certificateCopy.Certificate) + require.NotNil(t, certificateCopy.Signature) + require.Equal(t, original.NetworkID, certificateCopy.NetworkID) + require.Equal(t, original.Height, certificateCopy.Height) + require.Equal(t, original.PrevLocalExitRoot, certificateCopy.PrevLocalExitRoot) + require.Equal(t, original.NewLocalExitRoot, certificateCopy.NewLocalExitRoot) + require.Equal(t, original.Metadata, certificateCopy.Metadata) + require.NotNil(t, certificateCopy.BridgeExits) + require.NotNil(t, certificateCopy.ImportedBridgeExits) + require.Empty(t, certificateCopy.BridgeExits) + require.Empty(t, certificateCopy.ImportedBridgeExits) + }) +} diff --git a/aggsender/aggsender.go b/aggsender/aggsender.go index f1df20ff..73953633 100644 --- a/aggsender/aggsender.go +++ b/aggsender/aggsender.go @@ -27,7 +27,8 @@ var ( errNoBridgesAndClaims = errors.New("no bridges and claims to build certificate") errInvalidSignatureSize = errors.New("invalid signature size") - zeroLER = common.HexToHash("0x27ae5ba08d7291c96c8cbddcc148bf48a6d68c7974b94356f53754ef6171d757") + zeroLER = common.HexToHash("0x27ae5ba08d7291c96c8cbddcc148bf48a6d68c7974b94356f53754ef6171d757") + nonSettledStatuses = []agglayer.CertificateStatus{agglayer.Pending, agglayer.Candidate, agglayer.Proven} ) // AggSender is a component that will send certificates to the aggLayer @@ -63,6 +64,8 @@ func New( return nil, err } + logger.Infof("Aggsender Config: %s.", cfg.String()) + return &AggSender{ cfg: cfg, log: logger, @@ -87,7 +90,7 @@ func (a *AggSender) sendCertificates(ctx context.Context) { for { select { case <-ticker.C: - if err := a.sendCertificate(ctx); err != nil { + if _, err := a.sendCertificate(ctx); err != nil { log.Error(err) } case <-ctx.Done(): @@ -98,27 +101,27 @@ func (a *AggSender) sendCertificates(ctx context.Context) { } // sendCertificate sends certificate for a network -func (a *AggSender) sendCertificate(ctx context.Context) error { +func (a *AggSender) sendCertificate(ctx context.Context) (*agglayer.SignedCertificate, error) { a.log.Infof("trying to send a new certificate...") - shouldSend, err := a.shouldSendCertificate(ctx) + shouldSend, err := a.shouldSendCertificate() if err != nil { - return err + return nil, err } if !shouldSend { a.log.Infof("waiting for pending certificates to be settled") - return nil + return nil, nil } lasL2BlockSynced, err := a.l2Syncer.GetLastProcessedBlock(ctx) if err != nil { - return fmt.Errorf("error getting last processed block from l2: %w", err) + return nil, fmt.Errorf("error getting last processed block from l2: %w", err) } - lastSentCertificateInfo, err := a.storage.GetLastSentCertificate(ctx) + lastSentCertificateInfo, err := a.storage.GetLastSentCertificate() if err != nil { - return err + return nil, err } previousToBlock := lastSentCertificateInfo.ToBlock @@ -131,7 +134,7 @@ func (a *AggSender) sendCertificate(ctx context.Context) error { if previousToBlock >= lasL2BlockSynced { a.log.Infof("no new blocks to send a certificate, last certificate block: %d, last L2 block: %d", previousToBlock, lasL2BlockSynced) - return nil + return nil, nil } fromBlock := previousToBlock + 1 @@ -139,64 +142,68 @@ func (a *AggSender) sendCertificate(ctx context.Context) error { bridges, err := a.l2Syncer.GetBridgesPublished(ctx, fromBlock, toBlock) if err != nil { - return fmt.Errorf("error getting bridges: %w", err) + return nil, fmt.Errorf("error getting bridges: %w", err) } if len(bridges) == 0 { a.log.Infof("no bridges consumed, no need to send a certificate from block: %d to block: %d", fromBlock, toBlock) - return nil + return nil, nil } claims, err := a.l2Syncer.GetClaims(ctx, fromBlock, toBlock) if err != nil { - return fmt.Errorf("error getting claims: %w", err) + return nil, fmt.Errorf("error getting claims: %w", err) } a.log.Infof("building certificate for block: %d to block: %d", fromBlock, toBlock) certificate, err := a.buildCertificate(ctx, bridges, claims, lastSentCertificateInfo, toBlock) if err != nil { - return fmt.Errorf("error building certificate: %w", err) + return nil, fmt.Errorf("error building certificate: %w", err) } signedCertificate, err := a.signCertificate(certificate) if err != nil { - return fmt.Errorf("error signing certificate: %w", err) + return nil, fmt.Errorf("error signing certificate: %w", err) } a.saveCertificateToFile(signedCertificate) + a.log.Debugf("certificate ready to be send to AggLayer: %s", signedCertificate.String()) certificateHash, err := a.aggLayerClient.SendCertificate(signedCertificate) if err != nil { - return fmt.Errorf("error sending certificate: %w", err) + return nil, fmt.Errorf("error sending certificate: %w", err) } - log.Infof("certificate send: Height: %d hash: %s", signedCertificate.Height, certificateHash.String()) - if err := a.storage.SaveLastSentCertificate(ctx, aggsendertypes.CertificateInfo{ + a.log.Debugf("certificate send: Height: %d hash: %s", signedCertificate.Height, certificateHash.String()) + + certInfo := aggsendertypes.CertificateInfo{ Height: certificate.Height, CertificateID: certificateHash, NewLocalExitRoot: certificate.NewLocalExitRoot, FromBlock: fromBlock, ToBlock: toBlock, - }); err != nil { - return fmt.Errorf("error saving last sent certificate in db: %w", err) } - a.log.Infof("certificate: %s sent successfully for range of l2 blocks (from block: %d, to block: %d)", - certificateHash, fromBlock, toBlock) + if err := a.storage.SaveLastSentCertificate(ctx, certInfo); err != nil { + return nil, fmt.Errorf("error saving last sent certificate %s in db: %w", certInfo.String(), err) + } + + a.log.Infof("certificate: %s sent successfully for range of l2 blocks (from block: %d, to block: %d) cert:%s", + certificateHash, fromBlock, toBlock, signedCertificate.String()) - return nil + return signedCertificate, nil } // saveCertificate saves the certificate to a tmp file func (a *AggSender) saveCertificateToFile(signedCertificate *agglayer.SignedCertificate) { - if signedCertificate == nil || !a.cfg.SaveCertificatesToFiles { + if signedCertificate == nil || a.cfg.SaveCertificatesToFilesPath == "" { return } - - fn := fmt.Sprintf("/tmp/certificate_%04d.json", signedCertificate.Height) + fn := fmt.Sprintf("%s/certificate_%04d-%07d.json", + a.cfg.SaveCertificatesToFilesPath, signedCertificate.Height, time.Now().Unix()) a.log.Infof("saving certificate to file: %s", fn) - jsonData, err := json.Marshal(signedCertificate) + jsonData, err := json.MarshalIndent(signedCertificate, "", " ") if err != nil { a.log.Errorf("error marshalling certificate: %w", err) } @@ -206,6 +213,27 @@ func (a *AggSender) saveCertificateToFile(signedCertificate *agglayer.SignedCert } } +// getNextHeightAndPreviousLER returns the height and previous LER for the new certificate +func (a *AggSender) getNextHeightAndPreviousLER( + lastSentCertificateInfo *aggsendertypes.CertificateInfo) (uint64, common.Hash) { + height := lastSentCertificateInfo.Height + 1 + if lastSentCertificateInfo.Status == agglayer.InError { + // previous certificate was in error, so we need to resend it + a.log.Debugf("Last certificate %s failed so reusing height %d", + lastSentCertificateInfo.CertificateID, lastSentCertificateInfo.Height) + height = lastSentCertificateInfo.Height + } + + previousLER := lastSentCertificateInfo.NewLocalExitRoot + if lastSentCertificateInfo.NewLocalExitRoot == (common.Hash{}) { + // meaning this is the first certificate + height = 0 + previousLER = zeroLER + } + + return height, previousLER +} + // buildCertificate builds a certificate from the bridge events func (a *AggSender) buildCertificate(ctx context.Context, bridges []bridgesync.Bridge, @@ -223,6 +251,7 @@ func (a *AggSender) buildCertificate(ctx context.Context, } var depositCount uint32 + if len(bridges) > 0 { depositCount = bridges[len(bridges)-1].DepositCount } @@ -232,13 +261,7 @@ func (a *AggSender) buildCertificate(ctx context.Context, return nil, fmt.Errorf("error getting exit root by index: %d. Error: %w", depositCount, err) } - height := lastSentCertificateInfo.Height + 1 - previousLER := lastSentCertificateInfo.NewLocalExitRoot - if lastSentCertificateInfo.NewLocalExitRoot == (common.Hash{}) { - // meaning this is the first certificate - height = 0 - previousLER = zeroLER - } + height, previousLER := a.getNextHeightAndPreviousLER(&lastSentCertificateInfo) return &agglayer.Certificate{ NetworkID: a.l2Syncer.OriginNetwork(), @@ -312,7 +335,7 @@ func (a *AggSender) getImportedBridgeExits( ) ([]*agglayer.ImportedBridgeExit, error) { if len(claims) == 0 { // no claims to convert - return nil, nil + return []*agglayer.ImportedBridgeExit{}, nil } var ( @@ -459,26 +482,31 @@ func (a *AggSender) checkIfCertificatesAreSettled(ctx context.Context) { // checkPendingCertificatesStatus checks the status of pending certificates // and updates in the storage if it changed on agglayer func (a *AggSender) checkPendingCertificatesStatus(ctx context.Context) { - pendingCertificates, err := a.storage.GetCertificatesByStatus(ctx, []agglayer.CertificateStatus{agglayer.Pending}) + pendingCertificates, err := a.storage.GetCertificatesByStatus(nonSettledStatuses) if err != nil { a.log.Errorf("error getting pending certificates: %w", err) + return } - + a.log.Debugf("checkPendingCertificatesStatus num of pendingCertificates: %d", len(pendingCertificates)) for _, certificate := range pendingCertificates { certificateHeader, err := a.aggLayerClient.GetCertificateHeader(certificate.CertificateID) if err != nil { - a.log.Errorf("error getting header of certificate %s with height: %d from agglayer: %w", - certificate.CertificateID, certificate.Height, err) + a.log.Errorf("error getting certificate header of %s from agglayer: %w", + certificate.String(), err) continue } + a.log.Debugf("aggLayerClient.GetCertificateHeader status [%s] of certificate %s ", + certificateHeader.Status, + certificateHeader.String()) - if certificateHeader.Status != agglayer.Pending { - certificate.Status = certificateHeader.Status + if certificateHeader.Status != certificate.Status { + a.log.Infof("certificate %s changed status from [%s] to [%s]", + certificateHeader.String(), certificate.Status, certificateHeader.Status) - a.log.Infof("certificate %s changed status to %s", certificateHeader.String(), certificate.Status) + certificate.Status = certificateHeader.Status if err := a.storage.UpdateCertificateStatus(ctx, *certificate); err != nil { - a.log.Errorf("error updating certificate status in storage: %w", err) + a.log.Errorf("error updating certificate %s status in storage: %w", certificateHeader.String(), err) continue } } @@ -487,8 +515,8 @@ func (a *AggSender) checkPendingCertificatesStatus(ctx context.Context) { // shouldSendCertificate checks if a certificate should be sent at given time // if we have pending certificates, then we wait until they are settled -func (a *AggSender) shouldSendCertificate(ctx context.Context) (bool, error) { - pendingCertificates, err := a.storage.GetCertificatesByStatus(ctx, []agglayer.CertificateStatus{agglayer.Pending}) +func (a *AggSender) shouldSendCertificate() (bool, error) { + pendingCertificates, err := a.storage.GetCertificatesByStatus(nonSettledStatuses) if err != nil { return false, fmt.Errorf("error getting pending certificates: %w", err) } diff --git a/aggsender/aggsender_test.go b/aggsender/aggsender_test.go index 71878679..e55422e0 100644 --- a/aggsender/aggsender_test.go +++ b/aggsender/aggsender_test.go @@ -34,6 +34,29 @@ func TestExploratoryGetCertificateHeader(t *testing.T) { fmt.Print(certificateHeader) } +func TestConfigString(t *testing.T) { + config := Config{ + StoragePath: "/path/to/storage", + AggLayerURL: "http://agglayer.url", + BlockGetInterval: types.Duration{Duration: 10 * time.Second}, + CheckSettledInterval: types.Duration{Duration: 20 * time.Second}, + AggsenderPrivateKey: types.KeystoreFileConfig{Path: "/path/to/key", Password: "password"}, + URLRPCL2: "http://l2.rpc.url", + SaveCertificatesToFilesPath: "/path/to/certificates", + } + + expected := "StoragePath: /path/to/storage\n" + + "AggLayerURL: http://agglayer.url\n" + + "BlockGetInterval: 10s\n" + + "CheckSettledInterval: 20s\n" + + "AggsenderPrivateKeyPath: /path/to/key\n" + + "AggsenderPrivateKeyPassword: password\n" + + "URLRPCL2: http://l2.rpc.url\n" + + "SaveCertificatesToFilesPath: /path/to/certificates\n" + + require.Equal(t, expected, config.String()) +} + func TestConvertClaimToImportedBridgeExit(t *testing.T) { t.Parallel() @@ -456,7 +479,7 @@ func TestGetImportedBridgeExits(t *testing.T) { name: "No claims", claims: []bridgesync.Claim{}, expectedError: false, - expectedExits: nil, + expectedExits: []*agglayer.ImportedBridgeExit{}, }, } @@ -801,9 +824,10 @@ func TestCheckIfCertificatesAreSettled(t *testing.T) { mockStorage := mocks.NewAggSenderStorageMock(t) mockAggLayerClient := agglayer.NewAgglayerClientMock(t) - mockLogger := mocks.NewLoggerMock(t) + mockLogger := log.WithFields("test", "unittest") - mockStorage.On("GetCertificatesByStatus", mock.Anything, []agglayer.CertificateStatus{agglayer.Pending}).Return(tt.pendingCertificates, tt.getFromDBError) + mockStorage.On("GetCertificatesByStatus", nonSettledStatuses).Return( + tt.pendingCertificates, tt.getFromDBError) for certID, header := range tt.certificateHeaders { mockAggLayerClient.On("GetCertificateHeader", certID).Return(header, tt.clientError) } @@ -813,20 +837,6 @@ func TestCheckIfCertificatesAreSettled(t *testing.T) { mockStorage.On("UpdateCertificateStatus", mock.Anything, mock.Anything).Return(nil) } - if tt.clientError != nil { - for _, msg := range tt.expectedErrorLogMessages { - mockLogger.On("Errorf", msg, mock.Anything, mock.Anything, mock.Anything).Return() - } - } else { - for _, msg := range tt.expectedErrorLogMessages { - mockLogger.On("Errorf", msg, mock.Anything).Return() - } - - for _, msg := range tt.expectedInfoMessages { - mockLogger.On("Infof", msg, mock.Anything, mock.Anything).Return() - } - } - aggSender := &AggSender{ log: mockLogger, storage: mockStorage, @@ -845,7 +855,6 @@ func TestCheckIfCertificatesAreSettled(t *testing.T) { time.Sleep(2 * time.Second) cancel() - mockLogger.AssertExpectations(t) mockAggLayerClient.AssertExpectations(t) mockStorage.AssertExpectations(t) }) @@ -893,13 +902,13 @@ func TestSendCertificate(t *testing.T) { if cfg.shouldSendCertificate != nil || cfg.getLastSentCertificate != nil || cfg.saveLastSentCertificate != nil { mockStorage = mocks.NewAggSenderStorageMock(t) - mockStorage.On("GetCertificatesByStatus", mock.Anything, []agglayer.CertificateStatus{agglayer.Pending}). + mockStorage.On("GetCertificatesByStatus", nonSettledStatuses). Return(cfg.shouldSendCertificate...).Once() aggsender.storage = mockStorage if cfg.getLastSentCertificate != nil { - mockStorage.On("GetLastSentCertificate", mock.Anything).Return(cfg.getLastSentCertificate...).Once() + mockStorage.On("GetLastSentCertificate").Return(cfg.getLastSentCertificate...).Once() } if cfg.saveLastSentCertificate != nil { @@ -1235,7 +1244,7 @@ func TestSendCertificate(t *testing.T) { aggsender, mockStorage, mockL2Syncer, mockAggLayerClient, mockL1InfoTreeSyncer := setupTest(tt) - err := aggsender.sendCertificate(context.Background()) + _, err := aggsender.sendCertificate(context.Background()) if tt.expectedError != "" { require.ErrorContains(t, err, tt.expectedError) @@ -1408,3 +1417,126 @@ func TestExploratoryGenerateCert(t *testing.T) { encoder.SetIndent("", " ") require.NoError(t, encoder.Encode(certificate)) } + +func TestGetNextHeightAndPreviousLER(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + lastSentCertificateInfo aggsendertypes.CertificateInfo + expectedHeight uint64 + expectedPreviousLER common.Hash + }{ + { + name: "Normal case", + lastSentCertificateInfo: aggsendertypes.CertificateInfo{ + Height: 10, + NewLocalExitRoot: common.HexToHash("0x123"), + Status: agglayer.Settled, + }, + expectedHeight: 11, + expectedPreviousLER: common.HexToHash("0x123"), + }, + { + name: "Previous certificate in error", + lastSentCertificateInfo: aggsendertypes.CertificateInfo{ + Height: 10, + NewLocalExitRoot: common.HexToHash("0x123"), + Status: agglayer.InError, + }, + expectedHeight: 10, + expectedPreviousLER: common.HexToHash("0x123"), + }, + { + name: "First certificate", + lastSentCertificateInfo: aggsendertypes.CertificateInfo{ + Height: 0, + NewLocalExitRoot: common.Hash{}, + Status: agglayer.Settled, + }, + expectedHeight: 0, + expectedPreviousLER: zeroLER, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + aggSender := &AggSender{log: log.WithFields("aggsender-test", "getNextHeightAndPreviousLER")} + height, previousLER := aggSender.getNextHeightAndPreviousLER(&tt.lastSentCertificateInfo) + + require.Equal(t, tt.expectedHeight, height) + require.Equal(t, tt.expectedPreviousLER, previousLER) + }) + } +} + +func TestSendCertificate_NoClaims(t *testing.T) { + t.Parallel() + + privateKey, err := crypto.GenerateKey() + require.NoError(t, err) + + ctx := context.Background() + mockStorage := mocks.NewAggSenderStorageMock(t) + mockL2Syncer := mocks.NewL2BridgeSyncerMock(t) + mockAggLayerClient := agglayer.NewAgglayerClientMock(t) + mockL1InfoTreeSyncer := mocks.NewL1InfoTreeSyncerMock(t) + + aggSender := &AggSender{ + log: log.WithFields("aggsender-test", "no claims test"), + storage: mockStorage, + l2Syncer: mockL2Syncer, + aggLayerClient: mockAggLayerClient, + l1infoTreeSyncer: mockL1InfoTreeSyncer, + sequencerKey: privateKey, + cfg: Config{ + BlockGetInterval: types.Duration{Duration: time.Second}, + CheckSettledInterval: types.Duration{Duration: time.Second}, + }, + } + + mockStorage.On("GetCertificatesByStatus", nonSettledStatuses).Return([]*aggsendertypes.CertificateInfo{}, nil).Once() + mockStorage.On("GetLastSentCertificate").Return(aggsendertypes.CertificateInfo{ + NewLocalExitRoot: common.HexToHash("0x123"), + Height: 1, + FromBlock: 0, + ToBlock: 10, + }, nil).Once() + mockStorage.On("SaveLastSentCertificate", mock.Anything, mock.Anything).Return(nil).Once() + mockL2Syncer.On("GetLastProcessedBlock", mock.Anything).Return(uint64(50), nil) + mockL2Syncer.On("GetBridgesPublished", mock.Anything, uint64(11), uint64(50)).Return([]bridgesync.Bridge{ + { + BlockNum: 30, + BlockPos: 0, + LeafType: agglayer.LeafTypeAsset.Uint8(), + OriginNetwork: 1, + OriginAddress: common.HexToAddress("0x1"), + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x2"), + Amount: big.NewInt(100), + Metadata: []byte("metadata"), + DepositCount: 1, + }, + }, nil).Once() + mockL2Syncer.On("GetClaims", mock.Anything, uint64(11), uint64(50)).Return([]bridgesync.Claim{}, nil).Once() + mockL2Syncer.On("GetExitRootByIndex", mock.Anything, uint32(1)).Return(treeTypes.Root{}, nil).Once() + mockL2Syncer.On("OriginNetwork").Return(uint32(1), nil).Once() + mockAggLayerClient.On("SendCertificate", mock.Anything).Return(common.Hash{}, nil).Once() + + signedCertificate, err := aggSender.sendCertificate(ctx) + require.NoError(t, err) + require.NotNil(t, signedCertificate) + require.NotNil(t, signedCertificate.Signature) + require.NotNil(t, signedCertificate.Certificate) + require.NotNil(t, signedCertificate.Certificate.ImportedBridgeExits) + require.Len(t, signedCertificate.Certificate.BridgeExits, 1) + + mockStorage.AssertExpectations(t) + mockL2Syncer.AssertExpectations(t) + mockAggLayerClient.AssertExpectations(t) + mockL1InfoTreeSyncer.AssertExpectations(t) +} diff --git a/aggsender/config.go b/aggsender/config.go index 506b4e9a..4ff78f96 100644 --- a/aggsender/config.go +++ b/aggsender/config.go @@ -18,6 +18,18 @@ type Config struct { AggsenderPrivateKey types.KeystoreFileConfig `mapstructure:"AggsenderPrivateKey"` // URLRPCL2 is the URL of the L2 RPC node URLRPCL2 string `mapstructure:"URLRPCL2"` - // SaveCertificatesToFiles is a flag which tells the AggSender to save the certificates to a file - SaveCertificatesToFiles bool `mapstructure:"SaveCertificatesToFiles"` + // SaveCertificatesToFilesPath if != "" tells the AggSender to save the certificates to a file in this path + SaveCertificatesToFilesPath string `mapstructure:"SaveCertificatesToFilesPath"` +} + +// String returns a string representation of the Config +func (c Config) String() string { + return "StoragePath: " + c.StoragePath + "\n" + + "AggLayerURL: " + c.AggLayerURL + "\n" + + "BlockGetInterval: " + c.BlockGetInterval.String() + "\n" + + "CheckSettledInterval: " + c.CheckSettledInterval.String() + "\n" + + "AggsenderPrivateKeyPath: " + c.AggsenderPrivateKey.Path + "\n" + + "AggsenderPrivateKeyPassword: " + c.AggsenderPrivateKey.Password + "\n" + + "URLRPCL2: " + c.URLRPCL2 + "\n" + + "SaveCertificatesToFilesPath: " + c.SaveCertificatesToFilesPath + "\n" } diff --git a/aggsender/db/aggsender_db_storage.go b/aggsender/db/aggsender_db_storage.go index 25b31392..15866c29 100644 --- a/aggsender/db/aggsender_db_storage.go +++ b/aggsender/db/aggsender_db_storage.go @@ -21,15 +21,15 @@ const errWhileRollbackFormat = "error while rolling back tx: %w" // AggSenderStorage is the interface that defines the methods to interact with the storage type AggSenderStorage interface { // GetCertificateByHeight returns a certificate by its height - GetCertificateByHeight(ctx context.Context, height uint64) (types.CertificateInfo, error) + GetCertificateByHeight(height uint64) (types.CertificateInfo, error) // GetLastSentCertificate returns the last certificate sent to the aggLayer - GetLastSentCertificate(ctx context.Context) (types.CertificateInfo, error) + GetLastSentCertificate() (types.CertificateInfo, error) // SaveLastSentCertificate saves the last certificate sent to the aggLayer SaveLastSentCertificate(ctx context.Context, certificate types.CertificateInfo) error // DeleteCertificate deletes a certificate from the storage DeleteCertificate(ctx context.Context, certificateID common.Hash) error // GetCertificatesByStatus returns a list of certificates by their status - GetCertificatesByStatus(ctx context.Context, status []agglayer.CertificateStatus) ([]*types.CertificateInfo, error) + GetCertificatesByStatus(status []agglayer.CertificateStatus) ([]*types.CertificateInfo, error) // UpdateCertificateStatus updates the status of a certificate UpdateCertificateStatus(ctx context.Context, certificate types.CertificateInfo) error } @@ -59,7 +59,7 @@ func NewAggSenderSQLStorage(logger *log.Logger, dbPath string) (*AggSenderSQLSto }, nil } -func (a *AggSenderSQLStorage) GetCertificatesByStatus(ctx context.Context, +func (a *AggSenderSQLStorage) GetCertificatesByStatus( statuses []agglayer.CertificateStatus) ([]*types.CertificateInfo, error) { query := "SELECT * FROM certificate_info" args := make([]interface{}, len(statuses)) @@ -88,10 +88,15 @@ func (a *AggSenderSQLStorage) GetCertificatesByStatus(ctx context.Context, } // GetCertificateByHeight returns a certificate by its height -func (a *AggSenderSQLStorage) GetCertificateByHeight(ctx context.Context, +func (a *AggSenderSQLStorage) GetCertificateByHeight(height uint64) (types.CertificateInfo, error) { + return getCertificateByHeight(a.db, height) +} + +// getCertificateByHeight returns a certificate by its height using the provided db +func getCertificateByHeight(db meddler.DB, height uint64) (types.CertificateInfo, error) { var certificateInfo types.CertificateInfo - if err := meddler.QueryRow(a.db, &certificateInfo, + if err := meddler.QueryRow(db, &certificateInfo, "SELECT * FROM certificate_info WHERE height = $1;", height); err != nil { return types.CertificateInfo{}, getSelectQueryError(height, err) } @@ -100,7 +105,7 @@ func (a *AggSenderSQLStorage) GetCertificateByHeight(ctx context.Context, } // GetLastSentCertificate returns the last certificate sent to the aggLayer -func (a *AggSenderSQLStorage) GetLastSentCertificate(ctx context.Context) (types.CertificateInfo, error) { +func (a *AggSenderSQLStorage) GetLastSentCertificate() (types.CertificateInfo, error) { var certificateInfo types.CertificateInfo if err := meddler.QueryRow(a.db, &certificateInfo, "SELECT * FROM certificate_info ORDER BY height DESC LIMIT 1;"); err != nil { @@ -124,10 +129,24 @@ func (a *AggSenderSQLStorage) SaveLastSentCertificate(ctx context.Context, certi } }() - if err := meddler.Insert(tx, "certificate_info", &certificate); err != nil { + cert, err := getCertificateByHeight(tx, certificate.Height) + if err != nil && !errors.Is(err, db.ErrNotFound) { + return err + } + + if cert.CertificateID != (common.Hash{}) { + // we already have a certificate with this height + // we need to delete it before inserting the new one + if err = deleteCertificate(tx, cert.CertificateID); err != nil { + return err + } + } + + if err = meddler.Insert(tx, "certificate_info", &certificate); err != nil { return fmt.Errorf("error inserting certificate info: %w", err) } - if err := tx.Commit(); err != nil { + + if err = tx.Commit(); err != nil { return err } @@ -150,10 +169,11 @@ func (a *AggSenderSQLStorage) DeleteCertificate(ctx context.Context, certificate } }() - if _, err := tx.Exec(`DELETE FROM certificate_info WHERE certificate_id = $1;`, certificateID); err != nil { - return fmt.Errorf("error deleting certificate info: %w", err) + if err = deleteCertificate(a.db, certificateID); err != nil { + return err } - if err := tx.Commit(); err != nil { + + if err = tx.Commit(); err != nil { return err } @@ -162,6 +182,15 @@ func (a *AggSenderSQLStorage) DeleteCertificate(ctx context.Context, certificate return nil } +// deleteCertificate deletes a certificate from the storage using the provided db +func deleteCertificate(db meddler.DB, certificateID common.Hash) error { + if _, err := db.Exec(`DELETE FROM certificate_info WHERE certificate_id = $1;`, certificateID.String()); err != nil { + return fmt.Errorf("error deleting certificate info: %w", err) + } + + return nil +} + // UpdateCertificateStatus updates the status of a certificate func (a *AggSenderSQLStorage) UpdateCertificateStatus(ctx context.Context, certificate types.CertificateInfo) error { tx, err := db.NewTx(ctx, a.db) @@ -176,11 +205,11 @@ func (a *AggSenderSQLStorage) UpdateCertificateStatus(ctx context.Context, certi } }() - if _, err := tx.Exec(`UPDATE certificate_info SET status = $1 WHERE certificate_id = $2;`, - certificate.Status, certificate.CertificateID); err != nil { + if _, err = tx.Exec(`UPDATE certificate_info SET status = $1 WHERE certificate_id = $2;`, + certificate.Status, certificate.CertificateID.String()); err != nil { return fmt.Errorf("error updating certificate info: %w", err) } - if err := tx.Commit(); err != nil { + if err = tx.Commit(); err != nil { return err } diff --git a/aggsender/db/aggsender_db_storage_test.go b/aggsender/db/aggsender_db_storage_test.go index cfb7af7c..6a656a95 100644 --- a/aggsender/db/aggsender_db_storage_test.go +++ b/aggsender/db/aggsender_db_storage_test.go @@ -35,7 +35,7 @@ func Test_Storage(t *testing.T) { } require.NoError(t, storage.SaveLastSentCertificate(ctx, certificate)) - certificateFromDB, err := storage.GetCertificateByHeight(ctx, certificate.Height) + certificateFromDB, err := storage.GetCertificateByHeight(certificate.Height) require.NoError(t, err) require.Equal(t, certificate, certificateFromDB) @@ -55,7 +55,7 @@ func Test_Storage(t *testing.T) { require.NoError(t, storage.DeleteCertificate(ctx, certificate.CertificateID)) - certificateFromDB, err := storage.GetCertificateByHeight(ctx, certificate.Height) + certificateFromDB, err := storage.GetCertificateByHeight(certificate.Height) require.ErrorIs(t, err, db.ErrNotFound) require.Equal(t, types.CertificateInfo{}, certificateFromDB) require.NoError(t, storage.clean()) @@ -63,7 +63,7 @@ func Test_Storage(t *testing.T) { t.Run("GetLastSentCertificate", func(t *testing.T) { // try getting a certificate that doesn't exist - certificateFromDB, err := storage.GetLastSentCertificate(ctx) + certificateFromDB, err := storage.GetLastSentCertificate() require.NoError(t, err) require.Equal(t, types.CertificateInfo{}, certificateFromDB) @@ -78,7 +78,7 @@ func Test_Storage(t *testing.T) { } require.NoError(t, storage.SaveLastSentCertificate(ctx, certificate)) - certificateFromDB, err = storage.GetLastSentCertificate(ctx) + certificateFromDB, err = storage.GetLastSentCertificate() require.NoError(t, err) require.Equal(t, certificate, certificateFromDB) @@ -87,12 +87,12 @@ func Test_Storage(t *testing.T) { t.Run("GetCertificateByHeight", func(t *testing.T) { // try getting height 0 - certificateFromDB, err := storage.GetCertificateByHeight(ctx, 0) + certificateFromDB, err := storage.GetCertificateByHeight(0) require.NoError(t, err) require.Equal(t, types.CertificateInfo{}, certificateFromDB) // try getting a certificate that doesn't exist - certificateFromDB, err = storage.GetCertificateByHeight(ctx, 4) + certificateFromDB, err = storage.GetCertificateByHeight(4) require.ErrorIs(t, err, db.ErrNotFound) require.Equal(t, types.CertificateInfo{}, certificateFromDB) @@ -107,7 +107,7 @@ func Test_Storage(t *testing.T) { } require.NoError(t, storage.SaveLastSentCertificate(ctx, certificate)) - certificateFromDB, err = storage.GetCertificateByHeight(ctx, certificate.Height) + certificateFromDB, err = storage.GetCertificateByHeight(certificate.Height) require.NoError(t, err) require.Equal(t, certificate, certificateFromDB) @@ -149,28 +149,28 @@ func Test_Storage(t *testing.T) { // Test fetching certificates with status Settled statuses := []agglayer.CertificateStatus{agglayer.Settled} - certificatesFromDB, err := storage.GetCertificatesByStatus(ctx, statuses) + certificatesFromDB, err := storage.GetCertificatesByStatus(statuses) require.NoError(t, err) require.Len(t, certificatesFromDB, 1) require.ElementsMatch(t, []*types.CertificateInfo{certificates[0]}, certificatesFromDB) // Test fetching certificates with status Pending statuses = []agglayer.CertificateStatus{agglayer.Pending} - certificatesFromDB, err = storage.GetCertificatesByStatus(ctx, statuses) + certificatesFromDB, err = storage.GetCertificatesByStatus(statuses) require.NoError(t, err) require.Len(t, certificatesFromDB, 1) require.ElementsMatch(t, []*types.CertificateInfo{certificates[1]}, certificatesFromDB) // Test fetching certificates with status InError statuses = []agglayer.CertificateStatus{agglayer.InError} - certificatesFromDB, err = storage.GetCertificatesByStatus(ctx, statuses) + certificatesFromDB, err = storage.GetCertificatesByStatus(statuses) require.NoError(t, err) require.Len(t, certificatesFromDB, 1) require.ElementsMatch(t, []*types.CertificateInfo{certificates[2]}, certificatesFromDB) // Test fetching certificates with status InError and Pending statuses = []agglayer.CertificateStatus{agglayer.InError, agglayer.Pending} - certificatesFromDB, err = storage.GetCertificatesByStatus(ctx, statuses) + certificatesFromDB, err = storage.GetCertificatesByStatus(statuses) require.NoError(t, err) require.Len(t, certificatesFromDB, 2) require.ElementsMatch(t, []*types.CertificateInfo{certificates[1], certificates[2]}, certificatesFromDB) @@ -195,10 +195,93 @@ func Test_Storage(t *testing.T) { require.NoError(t, storage.UpdateCertificateStatus(ctx, certificate)) // Fetch the certificate and verify the status has been updated - certificateFromDB, err := storage.GetCertificateByHeight(ctx, certificate.Height) + certificateFromDB, err := storage.GetCertificateByHeight(certificate.Height) require.NoError(t, err) require.Equal(t, certificate.Status, certificateFromDB.Status) require.NoError(t, storage.clean()) }) } + +func Test_SaveLastSentCertificate(t *testing.T) { + ctx := context.Background() + + path := path.Join(t.TempDir(), "file::memory:?cache=shared") + log.Debugf("sqlite path: %s", path) + require.NoError(t, migrations.RunMigrations(path)) + + storage, err := NewAggSenderSQLStorage(log.WithFields("aggsender-db"), path) + require.NoError(t, err) + + t.Run("SaveNewCertificate", func(t *testing.T) { + certificate := types.CertificateInfo{ + Height: 1, + CertificateID: common.HexToHash("0x1"), + NewLocalExitRoot: common.HexToHash("0x2"), + FromBlock: 1, + ToBlock: 2, + Status: agglayer.Settled, + } + require.NoError(t, storage.SaveLastSentCertificate(ctx, certificate)) + + certificateFromDB, err := storage.GetCertificateByHeight(certificate.Height) + require.NoError(t, err) + require.Equal(t, certificate, certificateFromDB) + require.NoError(t, storage.clean()) + }) + + t.Run("UpdateExistingCertificate", func(t *testing.T) { + certificate := types.CertificateInfo{ + Height: 2, + CertificateID: common.HexToHash("0x3"), + NewLocalExitRoot: common.HexToHash("0x4"), + FromBlock: 3, + ToBlock: 4, + Status: agglayer.InError, + } + require.NoError(t, storage.SaveLastSentCertificate(ctx, certificate)) + + // Update the certificate with the same height + updatedCertificate := types.CertificateInfo{ + Height: 2, + CertificateID: common.HexToHash("0x5"), + NewLocalExitRoot: common.HexToHash("0x6"), + FromBlock: 3, + ToBlock: 6, + Status: agglayer.Pending, + } + require.NoError(t, storage.SaveLastSentCertificate(ctx, updatedCertificate)) + + certificateFromDB, err := storage.GetCertificateByHeight(updatedCertificate.Height) + require.NoError(t, err) + require.Equal(t, updatedCertificate, certificateFromDB) + require.NoError(t, storage.clean()) + }) + + t.Run("SaveCertificateWithRollback", func(t *testing.T) { + // Simulate an error during the transaction to trigger a rollback + certificate := types.CertificateInfo{ + Height: 3, + CertificateID: common.HexToHash("0x7"), + NewLocalExitRoot: common.HexToHash("0x8"), + FromBlock: 7, + ToBlock: 8, + Status: agglayer.Settled, + } + + // Close the database to force an error + require.NoError(t, storage.db.Close()) + + err := storage.SaveLastSentCertificate(ctx, certificate) + require.Error(t, err) + + // Reopen the database and check that the certificate was not saved + storage.db, err = db.NewSQLiteDB(path) + require.NoError(t, err) + + certificateFromDB, err := storage.GetCertificateByHeight(certificate.Height) + require.ErrorIs(t, err, db.ErrNotFound) + require.Equal(t, types.CertificateInfo{}, certificateFromDB) + require.NoError(t, storage.clean()) + }) +} diff --git a/aggsender/mocks/mock_aggsender_storage.go b/aggsender/mocks/mock_aggsender_storage.go index a5f193fc..17f8d227 100644 --- a/aggsender/mocks/mock_aggsender_storage.go +++ b/aggsender/mocks/mock_aggsender_storage.go @@ -73,9 +73,9 @@ func (_c *AggSenderStorageMock_DeleteCertificate_Call) RunAndReturn(run func(con return _c } -// GetCertificateByHeight provides a mock function with given fields: ctx, height -func (_m *AggSenderStorageMock) GetCertificateByHeight(ctx context.Context, height uint64) (types.CertificateInfo, error) { - ret := _m.Called(ctx, height) +// GetCertificateByHeight provides a mock function with given fields: height +func (_m *AggSenderStorageMock) GetCertificateByHeight(height uint64) (types.CertificateInfo, error) { + ret := _m.Called(height) if len(ret) == 0 { panic("no return value specified for GetCertificateByHeight") @@ -83,17 +83,17 @@ func (_m *AggSenderStorageMock) GetCertificateByHeight(ctx context.Context, heig var r0 types.CertificateInfo var r1 error - if rf, ok := ret.Get(0).(func(context.Context, uint64) (types.CertificateInfo, error)); ok { - return rf(ctx, height) + if rf, ok := ret.Get(0).(func(uint64) (types.CertificateInfo, error)); ok { + return rf(height) } - if rf, ok := ret.Get(0).(func(context.Context, uint64) types.CertificateInfo); ok { - r0 = rf(ctx, height) + if rf, ok := ret.Get(0).(func(uint64) types.CertificateInfo); ok { + r0 = rf(height) } else { r0 = ret.Get(0).(types.CertificateInfo) } - if rf, ok := ret.Get(1).(func(context.Context, uint64) error); ok { - r1 = rf(ctx, height) + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(height) } else { r1 = ret.Error(1) } @@ -107,15 +107,14 @@ type AggSenderStorageMock_GetCertificateByHeight_Call struct { } // GetCertificateByHeight is a helper method to define mock.On call -// - ctx context.Context // - height uint64 -func (_e *AggSenderStorageMock_Expecter) GetCertificateByHeight(ctx interface{}, height interface{}) *AggSenderStorageMock_GetCertificateByHeight_Call { - return &AggSenderStorageMock_GetCertificateByHeight_Call{Call: _e.mock.On("GetCertificateByHeight", ctx, height)} +func (_e *AggSenderStorageMock_Expecter) GetCertificateByHeight(height interface{}) *AggSenderStorageMock_GetCertificateByHeight_Call { + return &AggSenderStorageMock_GetCertificateByHeight_Call{Call: _e.mock.On("GetCertificateByHeight", height)} } -func (_c *AggSenderStorageMock_GetCertificateByHeight_Call) Run(run func(ctx context.Context, height uint64)) *AggSenderStorageMock_GetCertificateByHeight_Call { +func (_c *AggSenderStorageMock_GetCertificateByHeight_Call) Run(run func(height uint64)) *AggSenderStorageMock_GetCertificateByHeight_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(uint64)) + run(args[0].(uint64)) }) return _c } @@ -125,14 +124,14 @@ func (_c *AggSenderStorageMock_GetCertificateByHeight_Call) Return(_a0 types.Cer return _c } -func (_c *AggSenderStorageMock_GetCertificateByHeight_Call) RunAndReturn(run func(context.Context, uint64) (types.CertificateInfo, error)) *AggSenderStorageMock_GetCertificateByHeight_Call { +func (_c *AggSenderStorageMock_GetCertificateByHeight_Call) RunAndReturn(run func(uint64) (types.CertificateInfo, error)) *AggSenderStorageMock_GetCertificateByHeight_Call { _c.Call.Return(run) return _c } -// GetCertificatesByStatus provides a mock function with given fields: ctx, status -func (_m *AggSenderStorageMock) GetCertificatesByStatus(ctx context.Context, status []agglayer.CertificateStatus) ([]*types.CertificateInfo, error) { - ret := _m.Called(ctx, status) +// GetCertificatesByStatus provides a mock function with given fields: status +func (_m *AggSenderStorageMock) GetCertificatesByStatus(status []agglayer.CertificateStatus) ([]*types.CertificateInfo, error) { + ret := _m.Called(status) if len(ret) == 0 { panic("no return value specified for GetCertificatesByStatus") @@ -140,19 +139,19 @@ func (_m *AggSenderStorageMock) GetCertificatesByStatus(ctx context.Context, sta var r0 []*types.CertificateInfo var r1 error - if rf, ok := ret.Get(0).(func(context.Context, []agglayer.CertificateStatus) ([]*types.CertificateInfo, error)); ok { - return rf(ctx, status) + if rf, ok := ret.Get(0).(func([]agglayer.CertificateStatus) ([]*types.CertificateInfo, error)); ok { + return rf(status) } - if rf, ok := ret.Get(0).(func(context.Context, []agglayer.CertificateStatus) []*types.CertificateInfo); ok { - r0 = rf(ctx, status) + if rf, ok := ret.Get(0).(func([]agglayer.CertificateStatus) []*types.CertificateInfo); ok { + r0 = rf(status) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*types.CertificateInfo) } } - if rf, ok := ret.Get(1).(func(context.Context, []agglayer.CertificateStatus) error); ok { - r1 = rf(ctx, status) + if rf, ok := ret.Get(1).(func([]agglayer.CertificateStatus) error); ok { + r1 = rf(status) } else { r1 = ret.Error(1) } @@ -166,15 +165,14 @@ type AggSenderStorageMock_GetCertificatesByStatus_Call struct { } // GetCertificatesByStatus is a helper method to define mock.On call -// - ctx context.Context // - status []agglayer.CertificateStatus -func (_e *AggSenderStorageMock_Expecter) GetCertificatesByStatus(ctx interface{}, status interface{}) *AggSenderStorageMock_GetCertificatesByStatus_Call { - return &AggSenderStorageMock_GetCertificatesByStatus_Call{Call: _e.mock.On("GetCertificatesByStatus", ctx, status)} +func (_e *AggSenderStorageMock_Expecter) GetCertificatesByStatus(status interface{}) *AggSenderStorageMock_GetCertificatesByStatus_Call { + return &AggSenderStorageMock_GetCertificatesByStatus_Call{Call: _e.mock.On("GetCertificatesByStatus", status)} } -func (_c *AggSenderStorageMock_GetCertificatesByStatus_Call) Run(run func(ctx context.Context, status []agglayer.CertificateStatus)) *AggSenderStorageMock_GetCertificatesByStatus_Call { +func (_c *AggSenderStorageMock_GetCertificatesByStatus_Call) Run(run func(status []agglayer.CertificateStatus)) *AggSenderStorageMock_GetCertificatesByStatus_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].([]agglayer.CertificateStatus)) + run(args[0].([]agglayer.CertificateStatus)) }) return _c } @@ -184,14 +182,14 @@ func (_c *AggSenderStorageMock_GetCertificatesByStatus_Call) Return(_a0 []*types return _c } -func (_c *AggSenderStorageMock_GetCertificatesByStatus_Call) RunAndReturn(run func(context.Context, []agglayer.CertificateStatus) ([]*types.CertificateInfo, error)) *AggSenderStorageMock_GetCertificatesByStatus_Call { +func (_c *AggSenderStorageMock_GetCertificatesByStatus_Call) RunAndReturn(run func([]agglayer.CertificateStatus) ([]*types.CertificateInfo, error)) *AggSenderStorageMock_GetCertificatesByStatus_Call { _c.Call.Return(run) return _c } -// GetLastSentCertificate provides a mock function with given fields: ctx -func (_m *AggSenderStorageMock) GetLastSentCertificate(ctx context.Context) (types.CertificateInfo, error) { - ret := _m.Called(ctx) +// GetLastSentCertificate provides a mock function with given fields: +func (_m *AggSenderStorageMock) GetLastSentCertificate() (types.CertificateInfo, error) { + ret := _m.Called() if len(ret) == 0 { panic("no return value specified for GetLastSentCertificate") @@ -199,17 +197,17 @@ func (_m *AggSenderStorageMock) GetLastSentCertificate(ctx context.Context) (typ var r0 types.CertificateInfo var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (types.CertificateInfo, error)); ok { - return rf(ctx) + if rf, ok := ret.Get(0).(func() (types.CertificateInfo, error)); ok { + return rf() } - if rf, ok := ret.Get(0).(func(context.Context) types.CertificateInfo); ok { - r0 = rf(ctx) + if rf, ok := ret.Get(0).(func() types.CertificateInfo); ok { + r0 = rf() } else { r0 = ret.Get(0).(types.CertificateInfo) } - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() } else { r1 = ret.Error(1) } @@ -223,14 +221,13 @@ type AggSenderStorageMock_GetLastSentCertificate_Call struct { } // GetLastSentCertificate is a helper method to define mock.On call -// - ctx context.Context -func (_e *AggSenderStorageMock_Expecter) GetLastSentCertificate(ctx interface{}) *AggSenderStorageMock_GetLastSentCertificate_Call { - return &AggSenderStorageMock_GetLastSentCertificate_Call{Call: _e.mock.On("GetLastSentCertificate", ctx)} +func (_e *AggSenderStorageMock_Expecter) GetLastSentCertificate() *AggSenderStorageMock_GetLastSentCertificate_Call { + return &AggSenderStorageMock_GetLastSentCertificate_Call{Call: _e.mock.On("GetLastSentCertificate")} } -func (_c *AggSenderStorageMock_GetLastSentCertificate_Call) Run(run func(ctx context.Context)) *AggSenderStorageMock_GetLastSentCertificate_Call { +func (_c *AggSenderStorageMock_GetLastSentCertificate_Call) Run(run func()) *AggSenderStorageMock_GetLastSentCertificate_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context)) + run() }) return _c } @@ -240,7 +237,7 @@ func (_c *AggSenderStorageMock_GetLastSentCertificate_Call) Return(_a0 types.Cer return _c } -func (_c *AggSenderStorageMock_GetLastSentCertificate_Call) RunAndReturn(run func(context.Context) (types.CertificateInfo, error)) *AggSenderStorageMock_GetLastSentCertificate_Call { +func (_c *AggSenderStorageMock_GetLastSentCertificate_Call) RunAndReturn(run func() (types.CertificateInfo, error)) *AggSenderStorageMock_GetLastSentCertificate_Call { _c.Call.Return(run) return _c } diff --git a/aggsender/types/types.go b/aggsender/types/types.go index d6421132..ffdf4d24 100644 --- a/aggsender/types/types.go +++ b/aggsender/types/types.go @@ -52,8 +52,8 @@ type Logger interface { type CertificateInfo struct { Height uint64 `meddler:"height"` - CertificateID common.Hash `meddler:"certificate_id"` - NewLocalExitRoot common.Hash `meddler:"new_local_exit_root"` + CertificateID common.Hash `meddler:"certificate_id,hash"` + NewLocalExitRoot common.Hash `meddler:"new_local_exit_root,hash"` FromBlock uint64 `meddler:"from_block"` ToBlock uint64 `meddler:"to_block"` Status agglayer.CertificateStatus `meddler:"status"` diff --git a/scripts/local_config b/scripts/local_config index b65210ac..0a3b9473 100755 --- a/scripts/local_config +++ b/scripts/local_config @@ -29,6 +29,10 @@ function get_value_from_toml_file(){ local _KEY="$3" local _LINE local _inside_section=0 + if [ $_SECTION == "." ]; then + _SECTION="" + _inside_section=1 + fi local _return_next_line=0 local _TMP_FILE=$(mktemp) cat $_FILE > $_TMP_FILE @@ -72,29 +76,55 @@ function get_value_from_toml_file(){ } ############################################################################### function export_key_from_toml_file_or_fatal(){ + export_key_from_toml_file "$1" "$2" "$3" "$4" + if [ $? -ne 0 ]; then + local _EXPORTED_VAR_NAME="$1" + local _FILE="$2" + local _SECTION="$3" + local _KEY="$4" + log_fatal "$FUNCNAME: key [$_KEY] not found in section [$_SECTION] in file [$_FILE]" + fi +} + +############################################################################### +function export_key_from_toml_file(){ local _EXPORTED_VAR_NAME="$1" local _FILE="$2" local _SECTION="$3" local _KEY="$4" local _VALUE=$(get_value_from_toml_file $_FILE $_SECTION $_KEY) if [ -z "$_VALUE" ]; then - log_fatal "$FUNCNAME: key $_KEY not found in section $_SECTION in file $_FILE" + log_debug "$FUNCNAME: key [$_KEY] not found in section [$_SECTION] in file [$_FILE]" + return 1 fi export $_EXPORTED_VAR_NAME="$_VALUE" log_debug "$_EXPORTED_VAR_NAME=${!_EXPORTED_VAR_NAME} \t\t\t# file:$_FILE section:$_SECTION key:$_KEY" + return 0 } - ############################################################################### function export_obj_key_from_toml_file_or_fatal(){ + export_obj_key_from_toml_file $* + if [ $? -ne 0 ]; then + local _EXPORTED_VAR_NAME="$1" + local _FILE="$2" + local _SECTION="$3" + local _KEY="$4" + log_fatal "$FUNCNAME: obj_key [$_KEY] not found in section [$_SECTION] in file [$_FILE]" + fi +} + +############################################################################### +function export_obj_key_from_toml_file(){ local _EXPORTED_VAR_NAME="$1" local _FILE="$2" local _SECTION="$3" local _KEY="$4" local _OBJ_KEY="$5" - log_debug "export_obj_key_from_toml_file_or_fatal: $_EXPORTED_VAR_NAME $_FILE $_SECTION $_KEY $_OBJ_KEY" + log_debug "export_obj_key_from_toml_file: $_EXPORTED_VAR_NAME $_FILE $_SECTION $_KEY $_OBJ_KEY" local _VALUE=$(get_value_from_toml_file $_FILE $_SECTION $_KEY) if [ -z "$_VALUE" ]; then - log_fatal "export_obj_key_from_toml_file_or_fatal: obj_key $_KEY not found in section [$_SECTION]" + log_debug "export_obj_key_from_toml_file: obj_key $_KEY not found in section [$_SECTION]" + return 1 fi local _CLEAN_VALUE=$(echo $_VALUE | tr -d '{' | tr -d '}' | tr ',' '\n') while read -r _LINE; do @@ -113,7 +143,8 @@ function export_obj_key_from_toml_file_or_fatal(){ return 0 fi done <<< "$_CLEAN_VALUE" - log_fatal "export_obj_key_from_toml_file_or_fatal: obj_key $_OBJ_KEY not found in section $_SECTION/ $_KEY = $_VALUE" + log_debug "export_obj_key_from_toml_file: obj_key $_OBJ_KEY not found in section $_SECTION/ $_KEY = $_VALUE" + return 1 } ############################################################################### @@ -133,23 +164,55 @@ function export_values_of_genesis(){ ############################################################################### function export_values_of_cdk_node_config(){ local _CDK_CONFIG_FILE=$1 - export_key_from_toml_file_or_fatal zkevm_l2_sequencer_address $_CDK_CONFIG_FILE SequenceSender L2Coinbase - export_obj_key_from_toml_file_or_fatal zkevm_l2_sequencer_keystore_password $_CDK_CONFIG_FILE SequenceSender PrivateKey Password - export_key_from_toml_file_or_fatal l1_chain_id $_CDK_CONFIG_FILE SequenceSender.EthTxManager.Etherman L1ChainID - export_key_from_toml_file_or_fatal zkevm_is_validium $_CDK_CONFIG_FILE Common IsValidiumMode - export_key_from_toml_file_or_fatal zkevm_contract_versions $_CDK_CONFIG_FILE Common ContractVersions - export_key_from_toml_file_or_fatal l2_chain_id $_CDK_CONFIG_FILE Aggregator ChainID + export_key_from_toml_file zkevm_l2_sequencer_address $_CDK_CONFIG_FILE SequenceSender L2Coinbase + if [ $? -ne 0 ]; then + export_key_from_toml_file_or_fatal zkevm_l2_sequencer_address $_CDK_CONFIG_FILE "." L2Coinbase + fi + export_obj_key_from_toml_file zkevm_l2_sequencer_keystore_password $_CDK_CONFIG_FILE SequenceSender PrivateKey Password + if [ $? -ne 0 ]; then + export_key_from_toml_file_or_fatal zkevm_l2_sequencer_keystore_password $_CDK_CONFIG_FILE "." SequencerPrivateKeyPassword + fi + export_key_from_toml_file l1_chain_id $_CDK_CONFIG_FILE SequenceSender.EthTxManager.Etherman L1ChainID + if [ $? -ne 0 ]; then + export_key_from_toml_file_or_fatal l1_chain_id $_CDK_CONFIG_FILE L1Config chainId + fi + export_key_from_toml_file zkevm_is_validium $_CDK_CONFIG_FILE Common IsValidiumMode + if [ $? -ne 0 ]; then + export_key_from_toml_file_or_fatal zkevm_is_validium $_CDK_CONFIG_FILE "." IsValidiumMode + fi + export_key_from_toml_file zkevm_contract_versions $_CDK_CONFIG_FILE Common ContractVersions + if [ $? -ne 0 ]; then + export_key_from_toml_file_or_fatal zkevm_contract_versions $_CDK_CONFIG_FILE "." ContractVersions + fi + export_key_from_toml_file l2_chain_id $_CDK_CONFIG_FILE Aggregator ChainID + if [ $? -ne 0 ]; then + log_debug "l2_chain_id not found in Aggregator section, using 0" + export l2_chain_id="0" + fi export_key_from_toml_file_or_fatal zkevm_aggregator_port $_CDK_CONFIG_FILE Aggregator Port - export_key_from_toml_file_or_fatal zkevm_l2_agglayer_address $_CDK_CONFIG_FILE Aggregator SenderAddress + export_key_from_toml_file zkevm_l2_agglayer_address $_CDK_CONFIG_FILE Aggregator SenderAddress + if [ $? -ne 0 ]; then + export_key_from_toml_file zkevm_l2_agglayer_address $_CDK_CONFIG_FILE "." SenderProofToL1Addr + fi export_key_from_toml_file_or_fatal aggregator_db_name $_CDK_CONFIG_FILE Aggregator.DB Name export_key_from_toml_file_or_fatal aggregator_db_user $_CDK_CONFIG_FILE Aggregator.DB User export_key_from_toml_file_or_fatal aggregator_db_password $_CDK_CONFIG_FILE Aggregator.DB Password - export_obj_key_from_toml_file_or_fatal zkevm_l2_aggregator_keystore_password $_CDK_CONFIG_FILE Aggregator.EthTxManager PrivateKeys Password - - export_key_from_toml_file_or_fatal zkevm_rollup_fork_id $_CDK_CONFIG_FILE Aggregator ForkId - export_key_from_toml_file_or_fatal zkevm_l2_agglayer_keystore_password $_CDK_CONFIG_FILE AggSender.SequencerPrivateKey Password - export_key_from_toml_file_or_fatal zkevm_bridge_address $_CDK_CONFIG_FILE BridgeL1Sync BridgeAddr - + export_obj_key_from_toml_file zkevm_l2_aggregator_keystore_password $_CDK_CONFIG_FILE Aggregator.EthTxManager PrivateKeys Password + if [ $? -ne 0 ]; then + export_key_from_toml_file zkevm_l2_aggregator_keystore_password $_CDK_CONFIG_FILE "." AggregatorPrivateKeyPassword + fi + export_key_from_toml_file zkevm_rollup_fork_id $_CDK_CONFIG_FILE Aggregator ForkId + if [ $? -ne 0 ]; then + export_key_from_toml_file_or_fatal zkevm_rollup_fork_id $_CDK_CONFIG_FILE "." ForkId + fi + export_key_from_toml_file zkevm_l2_agglayer_keystore_password $_CDK_CONFIG_FILE AggSender.SequencerPrivateKey Password + if [ $? -ne 0 ]; then + export_key_from_toml_file_or_fatal zkevm_l2_agglayer_keystore_password $_CDK_CONFIG_FILE "." SequencerPrivateKeyPassword + fi + export_key_from_toml_file zkevm_bridge_address $_CDK_CONFIG_FILE BridgeL1Sync BridgeAddr + if [ $? -ne 0 ]; then + export_key_from_toml_file_or_fatal zkevm_bridge_address $_CDK_CONFIG_FILE "." polygonBridgeAddr + fi export is_cdk_validium=$zkevm_is_validium export zkevm_rollup_chain_id=$l2_chain_id @@ -281,6 +344,18 @@ function download_kurtosis_artifacts(){ } ############################################################################### +function add_translation_rules_for_validium(){ + if [ $is_cdk_validium != "true" ]; then + return + fi + log_debug " For Validium mode, we need to reach the DAC SERVER: adding translation rules" + + echo "[Aggregator.Synchronizer.Etherman.Validium.Translator]" + echo "FullMatchRules = [" + echo " {Old=\"http://zkevm-dac-001:8484\", New=\"http://127.0.0.1:${dac_port}\"}," + echo " ]" +} +############################################################################### function check_generated_config_file(){ grep "" $DEST_TEMPLATE_FILE > /dev/null if [ $? -ne 1 ]; then @@ -337,15 +412,16 @@ ok_or_fatal "Error generating template" check_generated_config_file +add_translation_rules_for_validium echo " " echo "file generated at:" $DEST/test.kurtosis.toml echo "- to restart kurtosis:" -echo " kurtosis clean --all; kurtosis run --enclave cdk-v1 --args-file params.yml --image-download always ." +echo " kurtosis clean --all; kurtosis run --enclave cdk --args-file params.yml --image-download always ." echo " " echo "- Stop cdk-node:" -echo " kurtosis service stop cdk-v1 cdk-node-001" +echo " kurtosis service stop cdk cdk-node-001" echo " " echo "- Add next configuration to vscode launch.json" echo " -----------------------------------------------------------" diff --git a/test/bridge-e2e.bats b/test/bridge-e2e.bats index d504c1c9..01411a11 100644 --- a/test/bridge-e2e.bats +++ b/test/bridge-e2e.bats @@ -76,6 +76,9 @@ setup() { local initial_receiver_balance=$(cast balance "$receiver" --rpc-url "$l2_rpc_url") echo "Initial receiver balance of native token on L2 $initial_receiver_balance" >&3 + local initial_mint_balance=$(cast balance "0x8943545177806ED17B9F23F0a21ee5948eCaa776" --rpc-url "$l1_rpc_url") + echo "Initial minter balance on L1 $initial_mint_balance" >&3 + # Query for initial sender balance run query_contract "$l1_rpc_url" "$gas_token_addr" "$balance_of_fn_sig" "$sender_addr" assert_success @@ -85,7 +88,7 @@ setup() { # Mint gas token on L1 local tokens_amount="0.1ether" local wei_amount=$(cast --to-unit $tokens_amount wei) - local minter_key=${MINTER_KEY:-"42b6e34dc21598a807dc19d7784c71b2a7a01f6480dc6f58258f78e539f1a1fa"} + local minter_key=${MINTER_KEY:-"bcdf20249abf0ed6d944c0288fad489e33f66b3960d9e6229c1cd214ed3bbe31"} run mint_erc20_tokens "$l1_rpc_url" "$gas_token_addr" "$minter_key" "$sender_addr" "$tokens_amount" assert_success diff --git a/test/helpers/common-setup.bash b/test/helpers/common-setup.bash index dac81beb..5f53cbf8 100644 --- a/test/helpers/common-setup.bash +++ b/test/helpers/common-setup.bash @@ -21,6 +21,6 @@ _common_setup() { readonly enclave=${KURTOSIS_ENCLAVE:-cdk} readonly contracts_container=${KURTOSIS_CONTRACTS:-contracts-001} readonly contracts_service_wrapper=${KURTOSIS_CONTRACTS_WRAPPER:-"kurtosis service exec $enclave $contracts_container"} - readonly erigon_rpc_node=${KURTOSIS_ERIGON_RPC:-cdk-erigon-node-001} + readonly erigon_rpc_node=${KURTOSIS_ERIGON_RPC:-cdk-erigon-rpc-001} readonly l2_rpc_url=${L2_ETH_RPC_URL:-"$(kurtosis port print $enclave $erigon_rpc_node rpc)"} } diff --git a/test/scripts/batch_verification_monitor.sh b/test/scripts/batch_verification_monitor.sh index 9c923888..a0bfaefd 100755 --- a/test/scripts/batch_verification_monitor.sh +++ b/test/scripts/batch_verification_monitor.sh @@ -17,7 +17,7 @@ timeout="$2" start_time=$(date +%s) end_time=$((start_time + timeout)) -rpc_url="$(kurtosis port print cdk cdk-erigon-node-001 rpc)" +rpc_url="$(kurtosis port print cdk cdk-erigon-rpc-001 rpc)" while true; do verified_batches="$(cast to-dec "$(cast rpc --rpc-url "$rpc_url" zkevm_verifiedBatchNumber | sed 's/"//g')")" diff --git a/test/scripts/env.sh b/test/scripts/env.sh index 063b7d61..298d4f73 100644 --- a/test/scripts/env.sh +++ b/test/scripts/env.sh @@ -3,5 +3,5 @@ KURTOSIS_ENCLAVE=cdk TMP_CDK_FOLDER=tmp/cdk DEST_KURTOSIS_PARAMS_YML=../$TMP_CDK_FOLDER/e2e-params.yml -KURTOSIS_FOLDER=../kurtosis-cdk +KURTOSIS_FOLDER=${KURTOSIS_FOLDER:=../kurtosis-cdk} USE_L1_GAS_TOKEN_CONTRACT=true From c1d0f133a061aeafc8b4ee3d28dfdcf3d9597616 Mon Sep 17 00:00:00 2001 From: Goran Rojovic <100121253+goran-ethernal@users.noreply.github.com> Date: Fri, 8 Nov 2024 15:16:43 +0100 Subject: [PATCH 22/33] feat: add timestamps to certificate (#175) * feat: created and updated timestamps * feat: save raw certificate to db * fix: raw to signed_certificate * fix: indentation --- aggsender/aggsender.go | 20 ++++-- aggsender/db/aggsender_db_storage_test.go | 83 +++++++++++++++++++++++ aggsender/db/migrations/0001.sql | 5 +- aggsender/types/types.go | 36 +++++++--- 4 files changed, 130 insertions(+), 14 deletions(-) diff --git a/aggsender/aggsender.go b/aggsender/aggsender.go index 73953633..e3242bdf 100644 --- a/aggsender/aggsender.go +++ b/aggsender/aggsender.go @@ -177,12 +177,21 @@ func (a *AggSender) sendCertificate(ctx context.Context) (*agglayer.SignedCertif a.log.Debugf("certificate send: Height: %d hash: %s", signedCertificate.Height, certificateHash.String()) + raw, err := json.Marshal(signedCertificate) + if err != nil { + return nil, fmt.Errorf("error marshalling signed certificate: %w", err) + } + + createdTime := time.Now().UTC().UnixMilli() certInfo := aggsendertypes.CertificateInfo{ - Height: certificate.Height, - CertificateID: certificateHash, - NewLocalExitRoot: certificate.NewLocalExitRoot, - FromBlock: fromBlock, - ToBlock: toBlock, + Height: certificate.Height, + CertificateID: certificateHash, + NewLocalExitRoot: certificate.NewLocalExitRoot, + FromBlock: fromBlock, + ToBlock: toBlock, + CreatedAt: createdTime, + UpdatedAt: createdTime, + SignedCertificate: string(raw), } if err := a.storage.SaveLastSentCertificate(ctx, certInfo); err != nil { @@ -504,6 +513,7 @@ func (a *AggSender) checkPendingCertificatesStatus(ctx context.Context) { certificateHeader.String(), certificate.Status, certificateHeader.Status) certificate.Status = certificateHeader.Status + certificate.UpdatedAt = time.Now().UTC().UnixMilli() if err := a.storage.UpdateCertificateStatus(ctx, *certificate); err != nil { a.log.Errorf("error updating certificate %s status in storage: %w", certificateHeader.String(), err) diff --git a/aggsender/db/aggsender_db_storage_test.go b/aggsender/db/aggsender_db_storage_test.go index 6a656a95..a0a20894 100644 --- a/aggsender/db/aggsender_db_storage_test.go +++ b/aggsender/db/aggsender_db_storage_test.go @@ -2,8 +2,11 @@ package db import ( "context" + "encoding/json" + "math/big" "path" "testing" + "time" "github.com/0xPolygon/cdk/agglayer" "github.com/0xPolygon/cdk/aggsender/db/migrations" @@ -24,6 +27,8 @@ func Test_Storage(t *testing.T) { storage, err := NewAggSenderSQLStorage(log.WithFields("aggsender-db"), path) require.NoError(t, err) + updateTime := time.Now().UTC().UnixMilli() + t.Run("SaveLastSentCertificate", func(t *testing.T) { certificate := types.CertificateInfo{ Height: 1, @@ -32,6 +37,8 @@ func Test_Storage(t *testing.T) { FromBlock: 1, ToBlock: 2, Status: agglayer.Settled, + CreatedAt: updateTime, + UpdatedAt: updateTime, } require.NoError(t, storage.SaveLastSentCertificate(ctx, certificate)) @@ -50,6 +57,8 @@ func Test_Storage(t *testing.T) { FromBlock: 3, ToBlock: 4, Status: agglayer.Settled, + CreatedAt: updateTime, + UpdatedAt: updateTime, } require.NoError(t, storage.SaveLastSentCertificate(ctx, certificate)) @@ -75,6 +84,8 @@ func Test_Storage(t *testing.T) { FromBlock: 5, ToBlock: 6, Status: agglayer.Pending, + CreatedAt: updateTime, + UpdatedAt: updateTime, } require.NoError(t, storage.SaveLastSentCertificate(ctx, certificate)) @@ -104,6 +115,8 @@ func Test_Storage(t *testing.T) { FromBlock: 17, ToBlock: 18, Status: agglayer.Pending, + CreatedAt: updateTime, + UpdatedAt: updateTime, } require.NoError(t, storage.SaveLastSentCertificate(ctx, certificate)) @@ -124,6 +137,8 @@ func Test_Storage(t *testing.T) { FromBlock: 7, ToBlock: 8, Status: agglayer.Settled, + CreatedAt: updateTime, + UpdatedAt: updateTime, }, { Height: 9, @@ -132,6 +147,8 @@ func Test_Storage(t *testing.T) { FromBlock: 9, ToBlock: 10, Status: agglayer.Pending, + CreatedAt: updateTime, + UpdatedAt: updateTime, }, { Height: 11, @@ -140,6 +157,8 @@ func Test_Storage(t *testing.T) { FromBlock: 11, ToBlock: 12, Status: agglayer.InError, + CreatedAt: updateTime, + UpdatedAt: updateTime, }, } @@ -187,6 +206,8 @@ func Test_Storage(t *testing.T) { FromBlock: 13, ToBlock: 14, Status: agglayer.Pending, + CreatedAt: updateTime, + UpdatedAt: updateTime, } require.NoError(t, storage.SaveLastSentCertificate(ctx, certificate)) @@ -213,6 +234,8 @@ func Test_SaveLastSentCertificate(t *testing.T) { storage, err := NewAggSenderSQLStorage(log.WithFields("aggsender-db"), path) require.NoError(t, err) + updateTime := time.Now().UTC().UnixMilli() + t.Run("SaveNewCertificate", func(t *testing.T) { certificate := types.CertificateInfo{ Height: 1, @@ -221,6 +244,8 @@ func Test_SaveLastSentCertificate(t *testing.T) { FromBlock: 1, ToBlock: 2, Status: agglayer.Settled, + CreatedAt: updateTime, + UpdatedAt: updateTime, } require.NoError(t, storage.SaveLastSentCertificate(ctx, certificate)) @@ -238,6 +263,8 @@ func Test_SaveLastSentCertificate(t *testing.T) { FromBlock: 3, ToBlock: 4, Status: agglayer.InError, + CreatedAt: updateTime, + UpdatedAt: updateTime, } require.NoError(t, storage.SaveLastSentCertificate(ctx, certificate)) @@ -267,6 +294,8 @@ func Test_SaveLastSentCertificate(t *testing.T) { FromBlock: 7, ToBlock: 8, Status: agglayer.Settled, + CreatedAt: updateTime, + UpdatedAt: updateTime, } // Close the database to force an error @@ -284,4 +313,58 @@ func Test_SaveLastSentCertificate(t *testing.T) { require.Equal(t, types.CertificateInfo{}, certificateFromDB) require.NoError(t, storage.clean()) }) + + t.Run("SaveCertificate with raw data", func(t *testing.T) { + certfiicate := &agglayer.SignedCertificate{ + Certificate: &agglayer.Certificate{ + NetworkID: 1, + Height: 1, + PrevLocalExitRoot: common.HexToHash("0x1"), + NewLocalExitRoot: common.HexToHash("0x2"), + Metadata: common.HexToHash("0x3"), + BridgeExits: []*agglayer.BridgeExit{ + { + LeafType: agglayer.LeafTypeAsset, + TokenInfo: &agglayer.TokenInfo{ + OriginNetwork: 1, + OriginTokenAddress: common.HexToAddress("0x1"), + }, + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x2"), + Amount: big.NewInt(100), + Metadata: []byte("metadata"), + }, + }, + ImportedBridgeExits: []*agglayer.ImportedBridgeExit{}, + }, + Signature: &agglayer.Signature{ + R: common.HexToHash("0x4"), + S: common.HexToHash("0x5"), + OddParity: false, + }, + } + + raw, err := json.Marshal(certfiicate) + require.NoError(t, err) + + certificate := types.CertificateInfo{ + Height: 1, + CertificateID: common.HexToHash("0x9"), + NewLocalExitRoot: common.HexToHash("0x2"), + FromBlock: 1, + ToBlock: 10, + Status: agglayer.Pending, + CreatedAt: updateTime, + UpdatedAt: updateTime, + SignedCertificate: string(raw), + } + require.NoError(t, storage.SaveLastSentCertificate(ctx, certificate)) + + certificateFromDB, err := storage.GetCertificateByHeight(certificate.Height) + require.NoError(t, err) + require.Equal(t, certificate, certificateFromDB) + require.Equal(t, raw, []byte(certificateFromDB.SignedCertificate)) + + require.NoError(t, storage.clean()) + }) } diff --git a/aggsender/db/migrations/0001.sql b/aggsender/db/migrations/0001.sql index 3ed7f997..b2d600b8 100644 --- a/aggsender/db/migrations/0001.sql +++ b/aggsender/db/migrations/0001.sql @@ -8,5 +8,8 @@ CREATE TABLE certificate_info ( status INTEGER NOT NULL, new_local_exit_root VARCHAR NOT NULL, from_block INTEGER NOT NULL, - to_block INTEGER NOT NULL + to_block INTEGER NOT NULL, + created_at INTEGER NOT NULL, + updated_at INTEGER NOT NULL, + signed_certificate TEXT ); \ No newline at end of file diff --git a/aggsender/types/types.go b/aggsender/types/types.go index ffdf4d24..46d31176 100644 --- a/aggsender/types/types.go +++ b/aggsender/types/types.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "math/big" + "time" "github.com/0xPolygon/cdk/agglayer" "github.com/0xPolygon/cdk/bridgesync" @@ -51,15 +52,34 @@ type Logger interface { } type CertificateInfo struct { - Height uint64 `meddler:"height"` - CertificateID common.Hash `meddler:"certificate_id,hash"` - NewLocalExitRoot common.Hash `meddler:"new_local_exit_root,hash"` - FromBlock uint64 `meddler:"from_block"` - ToBlock uint64 `meddler:"to_block"` - Status agglayer.CertificateStatus `meddler:"status"` + Height uint64 `meddler:"height"` + CertificateID common.Hash `meddler:"certificate_id,hash"` + NewLocalExitRoot common.Hash `meddler:"new_local_exit_root,hash"` + FromBlock uint64 `meddler:"from_block"` + ToBlock uint64 `meddler:"to_block"` + Status agglayer.CertificateStatus `meddler:"status"` + CreatedAt int64 `meddler:"created_at"` + UpdatedAt int64 `meddler:"updated_at"` + SignedCertificate string `meddler:"signed_certificate"` } func (c CertificateInfo) String() string { - return fmt.Sprintf("Height: %d, CertificateID: %s, FromBlock: %d, ToBlock: %d, NewLocalExitRoot: %s", - c.Height, c.CertificateID.String(), c.FromBlock, c.ToBlock, c.NewLocalExitRoot.String()) + return fmt.Sprintf( + "Height: %d\n"+ + "CertificateID: %s\n"+ + "FromBlock: %d\n"+ + "ToBlock: %d\n"+ + "NewLocalExitRoot: %s\n"+ + "Status: %s\n"+ + "CreatedAt: %s\n"+ + "UpdatedAt: %s\n", + c.Height, + c.CertificateID.String(), + c.FromBlock, + c.ToBlock, + c.NewLocalExitRoot.String(), + c.Status.String(), + time.UnixMilli(c.CreatedAt), + time.UnixMilli(c.UpdatedAt), + ) } From 7d144a0c5ed0372bd458cbb990973de7cd041980 Mon Sep 17 00:00:00 2001 From: Victor Castell Date: Fri, 8 Nov 2024 16:22:01 +0100 Subject: [PATCH 23/33] chore: bump kustoris (#168) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore: bump kustoris * Adapt to changes in services names * fix: update minter key * Apply feedback Co-authored-by: Stefan Negovanović <93934272+Stefan-Ethernal@users.noreply.github.com> --------- Co-authored-by: Stefan Negovanović <93934272+Stefan-Ethernal@users.noreply.github.com> --- .github/workflows/test-e2e.yml | 2 +- .github/workflows/test-resequence.yml | 26 ++++++++++++-------------- crates/cdk/versions.json | 2 +- scripts/local_config | 2 +- test/bridge-e2e.bats | 4 ++-- 5 files changed, 17 insertions(+), 19 deletions(-) diff --git a/.github/workflows/test-e2e.yml b/.github/workflows/test-e2e.yml index 980ad990..9efddba0 100644 --- a/.github/workflows/test-e2e.yml +++ b/.github/workflows/test-e2e.yml @@ -39,7 +39,7 @@ jobs: run: | echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list sudo apt update - sudo apt install kurtosis-cli=1.3.0 + sudo apt install kurtosis-cli=1.4.1 kurtosis version - name: Disable kurtosis analytics diff --git a/.github/workflows/test-resequence.yml b/.github/workflows/test-resequence.yml index 23d73423..66bc437a 100644 --- a/.github/workflows/test-resequence.yml +++ b/.github/workflows/test-resequence.yml @@ -86,22 +86,20 @@ jobs: working-directory: ./cdk-erigon run: .github/scripts/test_resequence.sh - - name: Prepare logs - if: always() - working-directory: ./kurtosis-cdk + - name: Dump enclave logs + if: failure() + run: kurtosis dump ./dump + + - name: Generate archive name + if: failure() run: | - mkdir -p ci_logs - cd ci_logs - kurtosis service logs cdk-v1 cdk-erigon-rpc-001 --all > cdk-erigon-rpc-001.log - kurtosis service logs cdk-v1 cdk-erigon-sequencer-001 --all > cdk-erigon-sequencer-001.log - kurtosis service logs cdk-v1 zkevm-agglayer-001 --all > zkevm-agglayer-001.log - kurtosis service logs cdk-v1 zkevm-prover-001 --all > zkevm-prover-001.log - kurtosis service logs cdk-v1 cdk-node-001 --all > cdk-node-001.log - kurtosis service logs cdk-v1 zkevm-bridge-service-001 --all > zkevm-bridge-service-001.log + archive_name="dump_run_with_args_${{matrix.e2e-group}}_${{ github.run_id }}" + echo "ARCHIVE_NAME=${archive_name}" >> "$GITHUB_ENV" + echo "Generated archive name: ${archive_name}" - name: Upload logs - if: always() + if: failure() uses: actions/upload-artifact@v4 with: - name: logs_${{ github.run_id }} - path: ./kurtosis-cdk/ci_logs + name: ${{ env.ARCHIVE_NAME }} + path: ./dump diff --git a/crates/cdk/versions.json b/crates/cdk/versions.json index 36f2af1f..bafbd00b 100644 --- a/crates/cdk/versions.json +++ b/crates/cdk/versions.json @@ -1,7 +1,7 @@ { "agglayer_image": "ghcr.io/agglayer/agglayer:0.2.0-rc.5", "cdk_erigon_node_image": "hermeznetwork/cdk-erigon:v2.1.2", - "cdk_node_image": "ghcr.io/0xpolygon/cdk:0.4.0-beta4", + "cdk_node_image": "ghcr.io/0xpolygon/cdk:0.4.0-beta5", "cdk_validium_node_image": "0xpolygon/cdk-validium-node:0.7.0-cdk", "zkevm_bridge_proxy_image": "haproxy:3.0-bookworm", "zkevm_bridge_service_image": "hermeznetwork/zkevm-bridge-service:v0.6.0-RC1", diff --git a/scripts/local_config b/scripts/local_config index 0a3b9473..274ec803 100755 --- a/scripts/local_config +++ b/scripts/local_config @@ -447,4 +447,4 @@ EOF echo " -----------------------------------------------------------" echo " " echo " - rembember to clean previous execution data: " -echo " rm -Rf ${path_rw_data}/*" \ No newline at end of file +echo " rm -Rf ${path_rw_data}/*" diff --git a/test/bridge-e2e.bats b/test/bridge-e2e.bats index 01411a11..ed599c7d 100644 --- a/test/bridge-e2e.bats +++ b/test/bridge-e2e.bats @@ -76,8 +76,8 @@ setup() { local initial_receiver_balance=$(cast balance "$receiver" --rpc-url "$l2_rpc_url") echo "Initial receiver balance of native token on L2 $initial_receiver_balance" >&3 - local initial_mint_balance=$(cast balance "0x8943545177806ED17B9F23F0a21ee5948eCaa776" --rpc-url "$l1_rpc_url") - echo "Initial minter balance on L1 $initial_mint_balance" >&3 + local l1_minter_balance=$(cast balance "0x8943545177806ED17B9F23F0a21ee5948eCaa776" --rpc-url "$l1_rpc_url") + echo "Initial minter balance on L1 $l1_minter_balance" >&3 # Query for initial sender balance run query_contract "$l1_rpc_url" "$gas_token_addr" "$balance_of_fn_sig" "$sender_addr" From 242f2f3068dc71b29d9c102e539b8ffc7e9ce97e Mon Sep 17 00:00:00 2001 From: Joan Esteban <129153821+joanestebanr@users.noreply.github.com> Date: Fri, 8 Nov 2024 17:37:04 +0100 Subject: [PATCH 24/33] fix: local configuration bumping kurtosis to 0.2.29 (#176) * fix: var zkevm_path_rw_data is defined in kurtosis/main but not yet on 0.2.8, try to override it * fix: bump kurtosis 0.2.19 to have the new variable --- .github/workflows/test-e2e.yml | 2 +- scripts/local_config | 4 ++-- test/combinations/fork11-rollup.yml | 1 + test/combinations/fork12-cdk-validium.yml | 2 ++ .../kurtosis-cdk-node-config.toml.template | 18 +++++++++++++++--- 5 files changed, 21 insertions(+), 6 deletions(-) diff --git a/.github/workflows/test-e2e.yml b/.github/workflows/test-e2e.yml index 9efddba0..8994d8e6 100644 --- a/.github/workflows/test-e2e.yml +++ b/.github/workflows/test-e2e.yml @@ -70,7 +70,7 @@ jobs: with: repository: 0xPolygon/kurtosis-cdk path: "kurtosis-cdk" - ref: "v0.2.18" + ref: "v0.2.19" - name: Setup Bats and bats libs uses: bats-core/bats-action@2.0.0 diff --git a/scripts/local_config b/scripts/local_config index 274ec803..09e0167a 100755 --- a/scripts/local_config +++ b/scripts/local_config @@ -316,10 +316,10 @@ EOF ############################################################################### function create_dest_folder(){ export DEST=${TMP_CDK_FOLDER}/local_config - export path_rw_data=${TMP_CDK_FOLDER}/runtime + export zkevm_path_rw_data=${TMP_CDK_FOLDER}/runtime [ ! -d ${DEST} ] && mkdir -p ${DEST} rm $DEST/* - mkdir $path_rw_data + mkdir $zkevm_path_rw_data } ############################################################################### function download_kurtosis_artifacts(){ diff --git a/test/combinations/fork11-rollup.yml b/test/combinations/fork11-rollup.yml index fb941760..79baa92d 100644 --- a/test/combinations/fork11-rollup.yml +++ b/test/combinations/fork11-rollup.yml @@ -7,3 +7,4 @@ args: zkevm_use_gas_token_contract: true data_availability_mode: rollup sequencer_type: erigon + \ No newline at end of file diff --git a/test/combinations/fork12-cdk-validium.yml b/test/combinations/fork12-cdk-validium.yml index 9619b0f9..c17444b3 100644 --- a/test/combinations/fork12-cdk-validium.yml +++ b/test/combinations/fork12-cdk-validium.yml @@ -6,3 +6,5 @@ args: zkevm_use_gas_token_contract: true data_availability_mode: cdk-validium sequencer_type: erigon + + diff --git a/test/config/kurtosis-cdk-node-config.toml.template b/test/config/kurtosis-cdk-node-config.toml.template index 5c885d5f..508c1286 100644 --- a/test/config/kurtosis-cdk-node-config.toml.template +++ b/test/config/kurtosis-cdk-node-config.toml.template @@ -1,12 +1,17 @@ -PathRWData = "/tmp/" +PathRWData = "{{.zkevm_path_rw_data}}/" L1URL="{{.l1_rpc_url}}" L2URL="http://{{.l2_rpc_name}}{{.deployment_suffix}}:{{.zkevm_rpc_http_port}}" AggLayerURL="{{.agglayer_url}}" ForkId = {{.zkevm_rollup_fork_id}} IsValidiumMode = {{.is_cdk_validium}} + {{if eq .zkevm_rollup_fork_id "12"}} ContractVersions = "banana" +{{else if eq .zkevm_rollup_fork_id "13"}} +# Doesn't look like this is needed at the moment, but soon perhaps? +# ContractVersions = "durian" +ContractVersions = "banana" {{else}} ContractVersions = "elderberry" {{end}} @@ -46,7 +51,10 @@ Outputs = ["stderr"] [Aggregator] Port = "{{.zkevm_aggregator_port}}" - + RetryTime = "30s" + VerifyProofInterval = "10s" + GasOffset = 150000 + SettlementBackend = "agglayer" [Aggregator.DB] Name = "{{.aggregator_db.name}}" User = "{{.aggregator_db.user}}" @@ -57,4 +65,8 @@ Outputs = ["stderr"] MaxConns = 200 [AggSender] -SequencerPrivateKey = {Path = "{{or .zkevm_l2_agglayer_keystore_file "/pk/sequencer.keystore"}}", Password = "{{.zkevm_l2_agglayer_keystore_password}}"} +CertificateSendInterval = "1m" +CheckSettledInterval = "5s" +SaveCertificatesToFilesPath = "{{.zkevm_path_rw_data}}/" + + From 67c37e48fb34ea561551a9aa584e9d0057b2ca39 Mon Sep 17 00:00:00 2001 From: Arnau Bennassar Date: Fri, 8 Nov 2024 11:34:40 -0600 Subject: [PATCH 25/33] feat: sync UpdateL1InfoTreeV2 (#145) * feat: sync UpdateL1InfoTreeV2 * fix linter * use common hash instead of bytes 32 * imporve * imporve * imporve * cover verify trusted aggregator event * cover halted queries * rm coverage file * increase coverage * moar coverage * remove files that shouldnt be there * do not cover smart contracts (generated bindings) * feat: increase coverage (#159) * apply pr suggestions * add context done in handle newblock * add context done in handle newblock * add context done in handle newblock * add context done in handle newblock --------- Co-authored-by: Joan Esteban <129153821+joanestebanr@users.noreply.github.com> --- bridgesync/processor.go | 5 +- bridgesync/test_db_path | Bin 0 -> 4096 bytes bridgesync/test_db_path-shm | Bin 0 -> 32768 bytes bridgesync/test_db_path-wal | Bin 0 -> 78312 bytes l1infotreesync/downloader.go | 13 +- l1infotreesync/e2e_test.go | 9 +- l1infotreesync/l1infotreesync.go | 57 +++++ l1infotreesync/l1infotreesync_test.go | 198 ++++++++++++++++++ l1infotreesync/mocks/eth_clienter.go | 2 +- .../mock_reorgdetector.go} | 2 +- l1infotreesync/processor.go | 68 +++++- l1infotreesync/processor_test.go | 23 ++ sonar-project.properties | 2 +- sync/driver.go | 7 +- sync/evmdriver.go | 68 ++++-- sync/evmdriver_test.go | 25 ++- test/Makefile | 2 +- test/contracts/abi/verifybatchesmock.abi | 2 +- test/contracts/bin/verifybatchesmock.bin | 2 +- .../verifybatchesmock/VerifyBatchesMock.sol | 32 +++ .../verifybatchesmock/verifybatchesmock.go | 183 +++++++++++++++- 21 files changed, 650 insertions(+), 50 deletions(-) create mode 100644 bridgesync/test_db_path create mode 100644 bridgesync/test_db_path-shm create mode 100644 bridgesync/test_db_path-wal create mode 100644 l1infotreesync/l1infotreesync_test.go rename l1infotreesync/{mock_reorgdetector_test.go => mocks/mock_reorgdetector.go} (98%) diff --git a/bridgesync/processor.go b/bridgesync/processor.go index e8a79c1f..b2e0ed24 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -269,8 +269,9 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { if err != nil { return err } + shouldRollback := true defer func() { - if err != nil { + if shouldRollback { if errRllbck := tx.Rollback(); errRllbck != nil { log.Errorf("error while rolling back tx %v", errRllbck) } @@ -306,9 +307,9 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { if err := tx.Commit(); err != nil { return err } + shouldRollback = false p.log.Debugf("processed %d events until block %d", len(block.Events), block.Num) - return nil } diff --git a/bridgesync/test_db_path b/bridgesync/test_db_path new file mode 100644 index 0000000000000000000000000000000000000000..0de02ecf623141161c863ee065d9f7dd83cbe849 GIT binary patch literal 4096 zcmWFz^vNtqRY=P(%1ta$FlG>7U}9o$P*7lCU|@t|AVoG{WYDWBZ`; z8~?f5&X!CQRXUZjGK~o;geC+`lO~OUR>aWQ8;}5rA<>ucf)@yiv`G^xds(SN<$3IL z&*%D@mPtUu*GlZO{qy7J`8>a~a*yoG!E=f8k+Ex^ibQrt*z~<0UcdOAH_ca``ooWZ z@>VW)oor^}RKBvZ~lJSu#vbvdIB@bSxHmoDdotqrY)>OnYL-@9!SsztEIk%UkEP!Y3&J)CKmY;|fB*y_009U<00Izz!2guM1{Y28 zBO}p`iY+aua?>zdqNX%W$yN;A3arM;b)h&fkojV%Dv(TIZ-xwKltzX;Et!kUk~v%$ zAI}oqu!+8^s-$Hqnq;n%6?uJ>WTaM0Rpf>!*%{K1Y}r;cIeRFU;7^W3BZ}UT*Q~QD z{S+@stG3~+i-EI=`M?JDxjxmlWuNnJj~A>Re*f01F7LX;T^Ue( zW#c6TAOHafKmY;|fIts{3lqsC&-2ks$DLbQ7gb5ooLQ>twpC{n8Sdv6RAX^P)K@jq zJymM+!n9B)wb^-6d!kw;b>XN`7iwi;jyT5*(_^w^wnmN!Re|0PTQ1I(i${b}ugI2R zb(ctjMbglwYAi^q=-k`ti{|EqC+TV1r>1d+sWcT`)Mfh_!(8b^h7{Qjl^P9GwtQ7M zRjikf6*~>0r8Z>CR&?hkU(p{Q5-MrNs&02q?7a$Bv}GE`l1NQytK>Ds7U?=WtX(8M zsT*M$s=C_phjbz|NzrLb{X3o0bt3LO+O1C~q7Cb>LZA$PC#x^-2)8tHr##OjhCvb``YT1Yy@WC+&80rw7|kvx6n8dj%|2rfKbnuR1g_ zcib&8-E7V~i@yJZ=LlZ1&#h&@FP~x05k%8hBlHIo1Rwwb2tWV=5P$##AOHafKmY<; zK_DNEXJ_1z6Kh>x%;j?VLay-WSUx|NdyM3BQ{$6Ug@gMG6S)JEbkoFCE=QZ?e21X- z9Kq4w{_M;@Z#>Ac=Ln+d>k;~c2?7v+00bZa0SG_<0uX=z1Rwx`Z6UBHp5?m74QR8R zX8^o$fxjicefjST){FSw!EMpuXaocx009U<00Izz00bZa0SG`~Gl4!9;EfCXdGe<} zuZRymg6|#Nj1BuC009U<00Izz00bZa0SG_<0^33$!CK*s3w-s%$zPp&E`ApG3v7$N zMI#^p0SG_<0uX=z1Rwwb2tWV=n+YUYfHyAi%3p52e*U$GUc>zYo3UX(1Rwwb2tWV= z5P$##AOHafKww)4q*yDwae;?h-#qcH>02-1et~V#w`c?eAOHafKmY;|fB*y_009U< zU^4-Z25{cEz(*f{@$GMXW&Ca2FR&RK_Co*y5P$##AOHafKmY;|fB*!xg+P?G!W$Qu zK6T;CAAa=SvAL5~#g@sEVQP|14n%fGVzJ2MgwW6!{dI09pi4Hf?w6@YyZfB*y_009X6 z=LI(6JCpq0z0nQLmKIdmT(;@YK)Egy=LIrfEL8KFVX>wdRJ?aFg@{;WjsLjrk+7s2PyVq2jw5ZPE-roM~LjwtZa&I)E=nZ+zI;+w@ zSwv~oHk@^lRzS?t+2F=OF3IoP7d?N{t*l|#CjIU2t}fjYsIR|DchtFMScT4mZoI^TI=BAMiQ zK6>f6)5JwpQZ#3l>T0C3iMEpS*NwHnn}RJNb>XN`7iwi;jyT5*)AnS^Y>gZdssion za&fL)JR*#GMbg%IWwn>VB57z-H5MdQbh^nG&CLr>`i(e)cD>nwn(u=jZ#ZZxwDJCD$UMTg<_3H_}(*Q_;$vPx``yxOv(0Vv#A7si0_r* zPQHqHXTd#h^(FbCq3Fe;ljH6`R&(cxt}J)A^-A+#OFvC#x z^-2)8tHr##OjhCvb``YT1Yy@-C+&80rw7~KzSMVJoqg4ziMiu$iRor@-dXf*aM2_` zG7{aWIOlB|W=qtRrs-T_tiWolt1g+q-V7PeD2)txS~3@xC3CniKAt5yy_V>!s!Cd> zqDkgDS&`RANk(e5R7GxxlAR&+eJb0ECT9=D68y=LUR;5*iTS{WIQ>h)d47N{+2{P* z;{t=GD?B=u&yVFEBl+Ca_~caK;QqoyZhUflEH^Pl1ETTlj5~5-t?P@q-iI8Rq@}P! zXrtH|!R>K@zb-9XpSgL{V9yc6($^yC>vV<*0uX=z1Rwwb2tWV=5P$##AOL|aDzG#D zK(sxs5Ze>aa^2$wv{BAi33$&D{Ntki#&cizdI{ef_eS_};<$Lqs$LAOHafKmY;|fB*y_009U4@R(Wl@1-OdkizrYslO;j5K5P$##AOHafKmY;|fB*y_aDM`92w>0~7nu7+^Or~d j*#83V7q~w}L_+`q5P$##AOHafKmY;|fB*!xqQJiZ(_eq$ literal 0 HcmV?d00001 diff --git a/l1infotreesync/downloader.go b/l1infotreesync/downloader.go index ed3c7efb..9521c726 100644 --- a/l1infotreesync/downloader.go +++ b/l1infotreesync/downloader.go @@ -130,17 +130,20 @@ func buildAppender(client EthClienter, globalExitRoot, return nil } - // TODO: integrate this event to perform sanity checks - appender[updateL1InfoTreeSignatureV2] = func(b *sync.EVMBlock, l types.Log) error { //nolint:unparam - l1InfoTreeUpdate, err := ger.ParseUpdateL1InfoTreeV2(l) + appender[updateL1InfoTreeSignatureV2] = func(b *sync.EVMBlock, l types.Log) error { + l1InfoTreeUpdateV2, err := ger.ParseUpdateL1InfoTreeV2(l) if err != nil { return fmt.Errorf( "error parsing log %+v using ger.ParseUpdateL1InfoTreeV2: %w", l, err, ) } - log.Infof("updateL1InfoTreeSignatureV2: expected root: %s", - common.BytesToHash(l1InfoTreeUpdate.CurrentL1InfoRoot[:])) + b.Events = append(b.Events, Event{UpdateL1InfoTreeV2: &UpdateL1InfoTreeV2{ + CurrentL1InfoRoot: l1InfoTreeUpdateV2.CurrentL1InfoRoot, + LeafCount: l1InfoTreeUpdateV2.LeafCount, + Blockhash: common.BytesToHash(l1InfoTreeUpdateV2.Blockhash.Bytes()), + MinTimestamp: l1InfoTreeUpdateV2.MinTimestamp, + }}) return nil } diff --git a/l1infotreesync/e2e_test.go b/l1infotreesync/e2e_test.go index 94ec008c..132f563f 100644 --- a/l1infotreesync/e2e_test.go +++ b/l1infotreesync/e2e_test.go @@ -13,6 +13,8 @@ import ( cdktypes "github.com/0xPolygon/cdk/config/types" "github.com/0xPolygon/cdk/etherman" "github.com/0xPolygon/cdk/l1infotreesync" + mocks_l1infotreesync "github.com/0xPolygon/cdk/l1infotreesync/mocks" + "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/reorgdetector" "github.com/0xPolygon/cdk/test/contracts/verifybatchesmock" "github.com/0xPolygon/cdk/test/helpers" @@ -59,7 +61,7 @@ func TestE2E(t *testing.T) { ctx, cancelCtx := context.WithCancel(context.Background()) dbPath := path.Join(t.TempDir(), "file::memory:?cache=shared") - rdm := l1infotreesync.NewReorgDetectorMock(t) + rdm := mocks_l1infotreesync.NewReorgDetectorMock(t) rdm.On("Subscribe", mock.Anything).Return(&reorgdetector.Subscription{}, nil) rdm.On("AddBlockToTrack", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) @@ -180,12 +182,12 @@ func TestWithReorgs(t *testing.T) { // Update L1 Info Tree + Rollup Exit Tree newLocalExitRoot := common.HexToHash(strconv.Itoa(i) + "ffff" + strconv.Itoa(1)) - _, err = verifySC.VerifyBatches(auth, rollupID, 0, newLocalExitRoot, common.Hash{}, true) + _, err = verifySC.VerifyBatchesTrustedAggregator(auth, rollupID, 0, newLocalExitRoot, common.Hash{}, true) require.NoError(t, err) // Update Rollup Exit Tree newLocalExitRoot = common.HexToHash(strconv.Itoa(i) + "ffff" + strconv.Itoa(2)) - _, err = verifySC.VerifyBatches(auth, rollupID, 0, newLocalExitRoot, common.Hash{}, false) + _, err = verifySC.VerifyBatchesTrustedAggregator(auth, rollupID, 0, newLocalExitRoot, common.Hash{}, false) require.NoError(t, err) } @@ -310,6 +312,7 @@ func TestStressAndReorgs(t *testing.T) { require.NoError(t, err) block, err := client.Client().BlockByNumber(ctx, big.NewInt(int64(currentBlockNum-reorgSizeInBlocks))) + log.Debugf("reorging until block %d. Current block %d (before reorg)", block.NumberU64(), currentBlockNum) require.NoError(t, err) reorgFrom := block.Hash() err = client.Fork(reorgFrom) diff --git a/l1infotreesync/l1infotreesync.go b/l1infotreesync/l1infotreesync.go index a7e50128..9719fcd7 100644 --- a/l1infotreesync/l1infotreesync.go +++ b/l1infotreesync/l1infotreesync.go @@ -106,6 +106,9 @@ func (s *L1InfoTreeSync) Start(ctx context.Context) { // GetL1InfoTreeMerkleProof creates a merkle proof for the L1 Info tree func (s *L1InfoTreeSync) GetL1InfoTreeMerkleProof(ctx context.Context, index uint32) (types.Proof, types.Root, error) { + if s.processor.halted { + return types.Proof{}, types.Root{}, sync.ErrInconsistentState + } return s.processor.GetL1InfoTreeMerkleProof(ctx, index) } @@ -115,6 +118,9 @@ func (s *L1InfoTreeSync) GetRollupExitTreeMerkleProof( networkID uint32, root common.Hash, ) (types.Proof, error) { + if s.processor.halted { + return types.Proof{}, sync.ErrInconsistentState + } if networkID == 0 { return tree.EmptyProof, nil } @@ -135,38 +141,59 @@ func translateError(err error) error { // - ErrBlockNotProcessed, // - ErrNotFound func (s *L1InfoTreeSync) GetLatestInfoUntilBlock(ctx context.Context, blockNum uint64) (*L1InfoTreeLeaf, error) { + if s.processor.halted { + return nil, sync.ErrInconsistentState + } leaf, err := s.processor.GetLatestInfoUntilBlock(ctx, blockNum) return leaf, translateError(err) } // GetInfoByIndex returns the value of a leaf (not the hash) of the L1 info tree func (s *L1InfoTreeSync) GetInfoByIndex(ctx context.Context, index uint32) (*L1InfoTreeLeaf, error) { + if s.processor.halted { + return nil, sync.ErrInconsistentState + } return s.processor.GetInfoByIndex(ctx, index) } // GetL1InfoTreeRootByIndex returns the root of the L1 info tree at the moment the leaf with the given index was added func (s *L1InfoTreeSync) GetL1InfoTreeRootByIndex(ctx context.Context, index uint32) (types.Root, error) { + if s.processor.halted { + return types.Root{}, sync.ErrInconsistentState + } return s.processor.l1InfoTree.GetRootByIndex(ctx, index) } // GetLastRollupExitRoot return the last rollup exit root processed func (s *L1InfoTreeSync) GetLastRollupExitRoot(ctx context.Context) (types.Root, error) { + if s.processor.halted { + return types.Root{}, sync.ErrInconsistentState + } return s.processor.rollupExitTree.GetLastRoot(nil) } // GetLastL1InfoTreeRoot return the last root and index processed from the L1 Info tree func (s *L1InfoTreeSync) GetLastL1InfoTreeRoot(ctx context.Context) (types.Root, error) { + if s.processor.halted { + return types.Root{}, sync.ErrInconsistentState + } return s.processor.l1InfoTree.GetLastRoot(nil) } // GetLastProcessedBlock return the last processed block func (s *L1InfoTreeSync) GetLastProcessedBlock(ctx context.Context) (uint64, error) { + if s.processor.halted { + return 0, sync.ErrInconsistentState + } return s.processor.GetLastProcessedBlock(ctx) } func (s *L1InfoTreeSync) GetLocalExitRoot( ctx context.Context, networkID uint32, rollupExitRoot common.Hash, ) (common.Hash, error) { + if s.processor.halted { + return common.Hash{}, sync.ErrInconsistentState + } if networkID == 0 { return common.Hash{}, errors.New("network 0 is not a rollup, and it's not part of the rollup exit tree") } @@ -175,34 +202,58 @@ func (s *L1InfoTreeSync) GetLocalExitRoot( } func (s *L1InfoTreeSync) GetLastVerifiedBatches(rollupID uint32) (*VerifyBatches, error) { + if s.processor.halted { + return nil, sync.ErrInconsistentState + } return s.processor.GetLastVerifiedBatches(rollupID) } func (s *L1InfoTreeSync) GetFirstVerifiedBatches(rollupID uint32) (*VerifyBatches, error) { + if s.processor.halted { + return nil, sync.ErrInconsistentState + } return s.processor.GetFirstVerifiedBatches(rollupID) } func (s *L1InfoTreeSync) GetFirstVerifiedBatchesAfterBlock(rollupID uint32, blockNum uint64) (*VerifyBatches, error) { + if s.processor.halted { + return nil, sync.ErrInconsistentState + } return s.processor.GetFirstVerifiedBatchesAfterBlock(rollupID, blockNum) } func (s *L1InfoTreeSync) GetFirstL1InfoWithRollupExitRoot(rollupExitRoot common.Hash) (*L1InfoTreeLeaf, error) { + if s.processor.halted { + return nil, sync.ErrInconsistentState + } return s.processor.GetFirstL1InfoWithRollupExitRoot(rollupExitRoot) } func (s *L1InfoTreeSync) GetLastInfo() (*L1InfoTreeLeaf, error) { + if s.processor.halted { + return nil, sync.ErrInconsistentState + } return s.processor.GetLastInfo() } func (s *L1InfoTreeSync) GetFirstInfo() (*L1InfoTreeLeaf, error) { + if s.processor.halted { + return nil, sync.ErrInconsistentState + } return s.processor.GetFirstInfo() } func (s *L1InfoTreeSync) GetFirstInfoAfterBlock(blockNum uint64) (*L1InfoTreeLeaf, error) { + if s.processor.halted { + return nil, sync.ErrInconsistentState + } return s.processor.GetFirstInfoAfterBlock(blockNum) } func (s *L1InfoTreeSync) GetInfoByGlobalExitRoot(ger common.Hash) (*L1InfoTreeLeaf, error) { + if s.processor.halted { + return nil, sync.ErrInconsistentState + } return s.processor.GetInfoByGlobalExitRoot(ger) } @@ -210,10 +261,16 @@ func (s *L1InfoTreeSync) GetInfoByGlobalExitRoot(ger common.Hash) (*L1InfoTreeLe func (s *L1InfoTreeSync) GetL1InfoTreeMerkleProofFromIndexToRoot( ctx context.Context, index uint32, root common.Hash, ) (types.Proof, error) { + if s.processor.halted { + return types.Proof{}, sync.ErrInconsistentState + } return s.processor.l1InfoTree.GetProof(ctx, index, root) } // GetInitL1InfoRootMap returns the initial L1 info root map, nil if no root map has been set func (s *L1InfoTreeSync) GetInitL1InfoRootMap(ctx context.Context) (*L1InfoTreeInitial, error) { + if s.processor.halted { + return nil, sync.ErrInconsistentState + } return s.processor.GetInitL1InfoRootMap(nil) } diff --git a/l1infotreesync/l1infotreesync_test.go b/l1infotreesync/l1infotreesync_test.go new file mode 100644 index 00000000..a6c5ef03 --- /dev/null +++ b/l1infotreesync/l1infotreesync_test.go @@ -0,0 +1,198 @@ +package l1infotreesync + +import ( + "context" + "errors" + "testing" + + "github.com/0xPolygon/cdk/sync" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestGetL1InfoTreeMerkleProof(t *testing.T) { + s := L1InfoTreeSync{ + processor: &processor{ + halted: true, + }, + } + _, _, err := s.GetL1InfoTreeMerkleProof(context.Background(), 0) + require.Error(t, err) + require.True(t, errors.Is(err, sync.ErrInconsistentState)) +} + +func TestGetRollupExitTreeMerkleProof(t *testing.T) { + s := L1InfoTreeSync{ + processor: &processor{ + halted: true, + }, + } + _, err := s.GetRollupExitTreeMerkleProof(context.Background(), 0, common.Hash{}) + require.Error(t, err) + require.True(t, errors.Is(err, sync.ErrInconsistentState)) +} + +func TestGetLatestInfoUntilBlock(t *testing.T) { + s := L1InfoTreeSync{ + processor: &processor{ + halted: true, + }, + } + _, err := s.GetLatestInfoUntilBlock(context.Background(), 0) + require.Error(t, err) + require.True(t, errors.Is(err, sync.ErrInconsistentState)) +} + +func TestGetInfoByIndex(t *testing.T) { + s := L1InfoTreeSync{ + processor: &processor{ + halted: true, + }, + } + _, err := s.GetInfoByIndex(context.Background(), 0) + require.Error(t, err) + require.True(t, errors.Is(err, sync.ErrInconsistentState)) +} + +func TestGetL1InfoTreeRootByIndex(t *testing.T) { + s := L1InfoTreeSync{ + processor: &processor{ + halted: true, + }, + } + _, err := s.GetL1InfoTreeRootByIndex(context.Background(), 0) + require.Error(t, err) + require.True(t, errors.Is(err, sync.ErrInconsistentState)) +} + +func TestGetLastRollupExitRoot(t *testing.T) { + s := L1InfoTreeSync{ + processor: &processor{ + halted: true, + }, + } + _, err := s.GetLastRollupExitRoot(context.Background()) + require.Error(t, err) + require.True(t, errors.Is(err, sync.ErrInconsistentState)) +} + +func TestGetLastL1InfoTreeRoot(t *testing.T) { + s := L1InfoTreeSync{ + processor: &processor{ + halted: true, + }, + } + _, err := s.GetLastL1InfoTreeRoot(context.Background()) + require.Error(t, err) + require.True(t, errors.Is(err, sync.ErrInconsistentState)) +} + +func TestGetLastProcessedBlock(t *testing.T) { + s := L1InfoTreeSync{ + processor: &processor{ + halted: true, + }, + } + _, err := s.GetLastProcessedBlock(context.Background()) + require.Error(t, err) + require.True(t, errors.Is(err, sync.ErrInconsistentState)) +} + +func TestGetLocalExitRoot(t *testing.T) { + s := L1InfoTreeSync{ + processor: &processor{ + halted: true, + }, + } + _, err := s.GetLocalExitRoot(context.Background(), 0, common.Hash{}) + require.Error(t, err) + require.True(t, errors.Is(err, sync.ErrInconsistentState)) +} + +func TestGetLastVerifiedBatches(t *testing.T) { + s := L1InfoTreeSync{ + processor: &processor{ + halted: true, + }, + } + _, err := s.GetLastVerifiedBatches(0) + require.Error(t, err) + require.True(t, errors.Is(err, sync.ErrInconsistentState)) +} + +func TestGetFirstVerifiedBatches(t *testing.T) { + s := L1InfoTreeSync{ + processor: &processor{ + halted: true, + }, + } + _, err := s.GetFirstVerifiedBatches(0) + require.Error(t, err) + require.True(t, errors.Is(err, sync.ErrInconsistentState)) +} + +func TestGetFirstVerifiedBatchesAfterBlock(t *testing.T) { + s := L1InfoTreeSync{ + processor: &processor{ + halted: true, + }, + } + _, err := s.GetFirstVerifiedBatchesAfterBlock(0, 0) + require.Error(t, err) + require.True(t, errors.Is(err, sync.ErrInconsistentState)) +} + +func TestGetFirstL1InfoWithRollupExitRoot(t *testing.T) { + s := L1InfoTreeSync{ + processor: &processor{ + halted: true, + }, + } + _, err := s.GetFirstL1InfoWithRollupExitRoot(common.Hash{}) + require.Error(t, err) + require.True(t, errors.Is(err, sync.ErrInconsistentState)) +} + +func TestGetLastInfo(t *testing.T) { + s := L1InfoTreeSync{ + processor: &processor{ + halted: true, + }, + } + _, err := s.GetLastInfo() + require.Error(t, err) + require.True(t, errors.Is(err, sync.ErrInconsistentState)) +} + +func TestGetFirstInfo(t *testing.T) { + s := L1InfoTreeSync{ + processor: &processor{ + halted: true, + }, + } + _, err := s.GetFirstInfo() + require.Error(t, err) + require.True(t, errors.Is(err, sync.ErrInconsistentState)) +} + +func TestGetFirstInfoAfterBlock(t *testing.T) { + s := L1InfoTreeSync{ + processor: &processor{ + halted: true, + }, + } + _, err := s.GetFirstInfoAfterBlock(0) + require.Error(t, err) + require.True(t, errors.Is(err, sync.ErrInconsistentState)) +} + +func TestGetL1InfoTreeMerkleProofFromIndexToRoot(t *testing.T) { + s := L1InfoTreeSync{ + processor: &processor{ + halted: true, + }, + } + _, err := s.GetL1InfoTreeMerkleProofFromIndexToRoot(context.Background(), 0, common.Hash{}) + require.Error(t, err) + require.True(t, errors.Is(err, sync.ErrInconsistentState)) +} diff --git a/l1infotreesync/mocks/eth_clienter.go b/l1infotreesync/mocks/eth_clienter.go index 270c40d9..3e5897f9 100644 --- a/l1infotreesync/mocks/eth_clienter.go +++ b/l1infotreesync/mocks/eth_clienter.go @@ -1083,4 +1083,4 @@ func NewEthClienter(t interface { t.Cleanup(func() { mock.AssertExpectations(t) }) return mock -} +} \ No newline at end of file diff --git a/l1infotreesync/mock_reorgdetector_test.go b/l1infotreesync/mocks/mock_reorgdetector.go similarity index 98% rename from l1infotreesync/mock_reorgdetector_test.go rename to l1infotreesync/mocks/mock_reorgdetector.go index 18ac7bc8..79c6232e 100644 --- a/l1infotreesync/mock_reorgdetector_test.go +++ b/l1infotreesync/mocks/mock_reorgdetector.go @@ -1,6 +1,6 @@ // Code generated by mockery v2.39.0. DO NOT EDIT. -package l1infotreesync +package mocks_l1infotreesync import ( context "context" diff --git a/l1infotreesync/processor.go b/l1infotreesync/processor.go index 2cd6190c..ee94e829 100644 --- a/l1infotreesync/processor.go +++ b/l1infotreesync/processor.go @@ -28,6 +28,8 @@ type processor struct { db *sql.DB l1InfoTree *tree.AppendOnlyTree rollupExitTree *tree.UpdatableTree + halted bool + haltedReason string } // UpdateL1InfoTree representation of the UpdateL1InfoTree event @@ -39,6 +41,13 @@ type UpdateL1InfoTree struct { Timestamp uint64 } +type UpdateL1InfoTreeV2 struct { + CurrentL1InfoRoot common.Hash + LeafCount uint32 + Blockhash common.Hash + MinTimestamp uint64 +} + // VerifyBatches representation of the VerifyBatches and VerifyBatchesTrustedAggregator events type VerifyBatches struct { BlockNumber uint64 `meddler:"block_num"` @@ -70,9 +79,10 @@ func (i *InitL1InfoRootMap) String() string { } type Event struct { - UpdateL1InfoTree *UpdateL1InfoTree - VerifyBatches *VerifyBatches - InitL1InfoRootMap *InitL1InfoRootMap + UpdateL1InfoTree *UpdateL1InfoTree + UpdateL1InfoTreeV2 *UpdateL1InfoTreeV2 + VerifyBatches *VerifyBatches + InitL1InfoRootMap *InitL1InfoRootMap } // L1InfoTreeLeaf representation of a leaf of the L1 Info tree @@ -227,15 +237,16 @@ func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { if err != nil { return err } + shouldRollback := true defer func() { - if err != nil { + if shouldRollback { if errRllbck := tx.Rollback(); errRllbck != nil { log.Errorf("error while rolling back tx %v", errRllbck) } } }() - _, err = tx.Exec(`DELETE FROM block WHERE num >= $1;`, firstReorgedBlock) + res, err := tx.Exec(`DELETE FROM block WHERE num >= $1;`, firstReorgedBlock) if err != nil { return err } @@ -247,19 +258,36 @@ func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { if err = p.rollupExitTree.Reorg(tx, firstReorgedBlock); err != nil { return err } + rowsAffected, err := res.RowsAffected() + if err != nil { + return err + } - return tx.Commit() + if err := tx.Commit(); err != nil { + return err + } + if rowsAffected > 0 { + p.halted = false + p.haltedReason = "" + } + shouldRollback = false + return nil } // ProcessBlock process the events of the block to build the rollup exit tree and the l1 info tree // and updates the last processed block (can be called without events for that purpose) func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { + if p.halted { + log.Errorf("processor is halted due to: %s", p.haltedReason) + return sync.ErrInconsistentState + } tx, err := db.NewTx(ctx, p.db) if err != nil { return err } + shouldRollback := true defer func() { - if err != nil { + if shouldRollback { if errRllbck := tx.Rollback(); errRllbck != nil { log.Errorf("error while rolling back tx %v", errRllbck) } @@ -277,7 +305,6 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { switch { case errors.Is(err, db.ErrNotFound): initialL1InfoIndex = 0 - err = nil case err != nil: return fmt.Errorf("getLastIndex err: %w", err) default: @@ -316,6 +343,29 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { log.Infof("inserted L1InfoTreeLeaf %s", info.String()) l1InfoLeavesAdded++ } + if event.UpdateL1InfoTreeV2 != nil { + root, err := p.l1InfoTree.GetLastRoot(tx) + if err != nil { + return fmt.Errorf("GetLastRoot(). err: %w", err) + } + // If the sanity check fails, halt the syncer and rollback. The sanity check could have + // failed due to a reorg. Hopefully, this is the case, eventually the reorg will get detected, + // and the syncer will get unhalted. Otherwise, this means that the syncer has an inconsistent state + // compared to the contracts, and this will need manual intervention. + if root.Hash != event.UpdateL1InfoTreeV2.CurrentL1InfoRoot || root.Index+1 != event.UpdateL1InfoTreeV2.LeafCount { + errStr := fmt.Sprintf( + "failed to check UpdateL1InfoTreeV2. Root: %s vs event:%s. "+ + "Index: : %d vs event.LeafCount:%d. Happened on block %d", + root.Hash, common.Bytes2Hex(event.UpdateL1InfoTreeV2.CurrentL1InfoRoot[:]), + root.Index, event.UpdateL1InfoTreeV2.LeafCount, + block.Num, + ) + log.Error(errStr) + p.haltedReason = errStr + p.halted = true + return sync.ErrInconsistentState + } + } if event.VerifyBatches != nil { log.Debugf("handle VerifyBatches event %s", event.VerifyBatches.String()) err = p.processVerifyBatches(tx, block.Num, event.VerifyBatches) @@ -340,6 +390,8 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { if err := tx.Commit(); err != nil { return fmt.Errorf("err: %w", err) } + shouldRollback = false + log.Infof("block %d processed with %d events", block.Num, len(block.Events)) return nil } diff --git a/l1infotreesync/processor_test.go b/l1infotreesync/processor_test.go index 34c5daef..e76ebaa5 100644 --- a/l1infotreesync/processor_test.go +++ b/l1infotreesync/processor_test.go @@ -358,3 +358,26 @@ func createTestLeaves(t *testing.T, numOfLeaves int) []*L1InfoTreeLeaf { return leaves } + +func TestProcessBlockUpdateL1InfoTreeV2DontMatchTree(t *testing.T) { + sut, err := newProcessor("file:Test_processor_BlockUpdateL1InfoTreeV2?mode=memory&cache=shared") + require.NoError(t, err) + block := sync.Block{ + Num: 10, + Events: []interface{}{ + Event{UpdateL1InfoTree: &UpdateL1InfoTree{ + MainnetExitRoot: common.HexToHash("beef"), + RollupExitRoot: common.HexToHash("5ca1e"), + ParentHash: common.HexToHash("1010101"), + Timestamp: 420, + }}, + Event{UpdateL1InfoTreeV2: &UpdateL1InfoTreeV2{ + CurrentL1InfoRoot: common.HexToHash("beef"), + LeafCount: 1, + }}, + }, + } + err = sut.ProcessBlock(context.Background(), block) + require.ErrorIs(t, err, sync.ErrInconsistentState) + require.True(t, sut.halted) +} diff --git a/sonar-project.properties b/sonar-project.properties index f46e9863..a6245819 100644 --- a/sonar-project.properties +++ b/sonar-project.properties @@ -11,7 +11,7 @@ sonar.exclusions=**/test/**,**/vendor/**,**/mocks/**,**/build/**,**/target/**,** sonar.tests=. sonar.test.inclusions=**/*_test.go -sonar.test.exclusions=**/vendor/**,**/docs/**,**/mocks/**,**/*.pb.go,**/*.yml,**/*.yaml,**/*.json,**/*.xml,**/*.toml,**/mocks_*/*,**/mock_*.go,**/agglayer/**,**/cmd/** +sonar.test.exclusions=test/contracts/**,**/vendor/**,**/docs/**,**/mocks/**,**/*.pb.go,**/*.yml,**/*.yaml,**/*.json,**/*.xml,**/*.toml,**/mocks_*/*,**/mock_*.go,**/agglayer/**,**/cmd/** sonar.issue.enforceSemantic=true # ===================================================== diff --git a/sync/driver.go b/sync/driver.go index bd066ba1..f85c04fb 100644 --- a/sync/driver.go +++ b/sync/driver.go @@ -1,6 +1,11 @@ package sync -import "context" +import ( + "context" + "errors" +) + +var ErrInconsistentState = errors.New("state is inconsistent, try again later once the state is consolidated") type Block struct { Num uint64 diff --git a/sync/evmdriver.go b/sync/evmdriver.go index 4e195af2..3412cd13 100644 --- a/sync/evmdriver.go +++ b/sync/evmdriver.go @@ -2,6 +2,7 @@ package sync import ( "context" + "errors" "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/reorgdetector" @@ -97,8 +98,8 @@ reset: cancel() return case b := <-downloadCh: - d.log.Debug("handleNewBlock", " blockNum: ", b.Num, " blockHash: ", b.Hash) - d.handleNewBlock(ctx, b) + d.log.Debugf("handleNewBlock, blockNum: %d, blockHash: %s", b.Num, b.Hash) + d.handleNewBlock(ctx, cancel, b) case firstReorgedBlock := <-d.reorgSub.ReorgedBlock: d.log.Debug("handleReorg from block: ", firstReorgedBlock) d.handleReorg(ctx, cancel, firstReorgedBlock) @@ -107,32 +108,59 @@ reset: } } -func (d *EVMDriver) handleNewBlock(ctx context.Context, b EVMBlock) { +func (d *EVMDriver) handleNewBlock(ctx context.Context, cancel context.CancelFunc, b EVMBlock) { attempts := 0 + succeed := false for { - err := d.reorgDetector.AddBlockToTrack(ctx, d.reorgDetectorID, b.Num, b.Hash) - if err != nil { - attempts++ - d.log.Errorf("error adding block %d to tracker: %v", b.Num, err) - d.rh.Handle("handleNewBlock", attempts) - continue + select { + case <-ctx.Done(): + // If the context is canceled, exit the function + d.log.Warnf("context canceled while adding block %d to tracker", b.Num) + return + default: + err := d.reorgDetector.AddBlockToTrack(ctx, d.reorgDetectorID, b.Num, b.Hash) + if err != nil { + attempts++ + d.log.Errorf("error adding block %d to tracker: %v", b.Num, err) + d.rh.Handle("handleNewBlock", attempts) + } else { + succeed = true + } + } + if succeed { + break } - break } attempts = 0 + succeed = false for { - blockToProcess := Block{ - Num: b.Num, - Events: b.Events, + select { + case <-ctx.Done(): + // If the context is canceled, exit the function + d.log.Warnf("context canceled while processing block %d", b.Num) + return + default: + blockToProcess := Block{ + Num: b.Num, + Events: b.Events, + } + err := d.processor.ProcessBlock(ctx, blockToProcess) + if err != nil { + if errors.Is(err, ErrInconsistentState) { + d.log.Warn("state got inconsistent after processing this block. Stopping downloader until there is a reorg") + cancel() + return + } + attempts++ + d.log.Errorf("error processing events for block %d, err: ", b.Num, err) + d.rh.Handle("handleNewBlock", attempts) + } else { + succeed = true + } } - err := d.processor.ProcessBlock(ctx, blockToProcess) - if err != nil { - attempts++ - d.log.Errorf("error processing events for block %d, err: ", b.Num, err) - d.rh.Handle("handleNewBlock", attempts) - continue + if succeed { + break } - break } } diff --git a/sync/evmdriver_test.go b/sync/evmdriver_test.go index c17370e1..ef551d0f 100644 --- a/sync/evmdriver_test.go +++ b/sync/evmdriver_test.go @@ -144,7 +144,7 @@ func TestHandleNewBlock(t *testing.T) { Return(nil) pm.On("ProcessBlock", ctx, Block{Num: b1.Num, Events: b1.Events}). Return(nil) - driver.handleNewBlock(ctx, b1) + driver.handleNewBlock(ctx, nil, b1) // reorg deteector fails once b2 := EVMBlock{ @@ -161,7 +161,7 @@ func TestHandleNewBlock(t *testing.T) { Return(nil).Once() pm.On("ProcessBlock", ctx, Block{Num: b2.Num, Events: b2.Events}). Return(nil) - driver.handleNewBlock(ctx, b2) + driver.handleNewBlock(ctx, nil, b2) // processor fails once b3 := EVMBlock{ @@ -177,7 +177,26 @@ func TestHandleNewBlock(t *testing.T) { Return(errors.New("foo")).Once() pm.On("ProcessBlock", ctx, Block{Num: b3.Num, Events: b3.Events}). Return(nil).Once() - driver.handleNewBlock(ctx, b3) + driver.handleNewBlock(ctx, nil, b3) + + // inconsistent state error + b4 := EVMBlock{ + EVMBlockHeader: EVMBlockHeader{ + Num: 4, + Hash: common.HexToHash("f00"), + }, + } + rdm. + On("AddBlockToTrack", ctx, reorgDetectorID, b4.Num, b4.Hash). + Return(nil) + pm.On("ProcessBlock", ctx, Block{Num: b4.Num, Events: b4.Events}). + Return(ErrInconsistentState) + cancelIsCalled := false + cancel := func() { + cancelIsCalled = true + } + driver.handleNewBlock(ctx, cancel, b4) + require.True(t, cancelIsCalled) } func TestHandleReorg(t *testing.T) { diff --git a/test/Makefile b/test/Makefile index a864cf82..51a475ed 100644 --- a/test/Makefile +++ b/test/Makefile @@ -35,7 +35,7 @@ generate-mocks-rpc: ## Generates mocks for rpc, using mockery tool generate-mocks-l1infotreesync: ## Generates mocks for l1infotreesync, using mockery tool rm -Rf ../l1infotreesync/mocks export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --all --case snake --dir ../l1infotreesync --output ../l1infotreesync/mocks --outpkg mocks_l1infotreesync ${COMMON_MOCKERY_PARAMS} - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=ReorgDetector --dir=../sync --output=../l1infotreesync --outpkg=l1infotreesync --structname=ReorgDetectorMock --filename=mock_reorgdetector_test.go + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=ReorgDetector --dir=../sync --output=../l1infotreesync/mocks --outpkg=mocks_l1infotreesync --structname=ReorgDetectorMock --filename=mock_reorgdetector.go .PHONY: generate-mocks-aggoracle generate-mocks-helpers: ## Generates mocks for helpers , using mockery tool diff --git a/test/contracts/abi/verifybatchesmock.abi b/test/contracts/abi/verifybatchesmock.abi index 2b314a92..176fb78b 100644 --- a/test/contracts/abi/verifybatchesmock.abi +++ b/test/contracts/abi/verifybatchesmock.abi @@ -1 +1 @@ -[{"inputs":[{"internalType":"contract IPolygonZkEVMGlobalExitRootV2","name":"_globalExitRootManager","type":"address"}],"stateMutability":"nonpayable","type":"constructor"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint32","name":"rollupID","type":"uint32"},{"indexed":false,"internalType":"uint64","name":"numBatch","type":"uint64"},{"indexed":false,"internalType":"bytes32","name":"stateRoot","type":"bytes32"},{"indexed":false,"internalType":"bytes32","name":"exitRoot","type":"bytes32"},{"indexed":true,"internalType":"address","name":"aggregator","type":"address"}],"name":"VerifyBatches","type":"event"},{"inputs":[],"name":"getRollupExitRoot","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"globalExitRootManager","outputs":[{"internalType":"contract IPolygonZkEVMGlobalExitRootV2","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"rollupCount","outputs":[{"internalType":"uint32","name":"","type":"uint32"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint32","name":"rollupID","type":"uint32"}],"name":"rollupIDToLastExitRoot","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint32","name":"rollupID","type":"uint32"},{"internalType":"uint64","name":"finalNewBatch","type":"uint64"},{"internalType":"bytes32","name":"newLocalExitRoot","type":"bytes32"},{"internalType":"bytes32","name":"newStateRoot","type":"bytes32"},{"internalType":"bool","name":"updateGER","type":"bool"}],"name":"verifyBatches","outputs":[],"stateMutability":"nonpayable","type":"function"}] \ No newline at end of file +[{"inputs":[{"internalType":"contract IPolygonZkEVMGlobalExitRootV2","name":"_globalExitRootManager","type":"address"}],"stateMutability":"nonpayable","type":"constructor"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint32","name":"rollupID","type":"uint32"},{"indexed":false,"internalType":"uint64","name":"numBatch","type":"uint64"},{"indexed":false,"internalType":"bytes32","name":"stateRoot","type":"bytes32"},{"indexed":false,"internalType":"bytes32","name":"exitRoot","type":"bytes32"},{"indexed":true,"internalType":"address","name":"aggregator","type":"address"}],"name":"VerifyBatches","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint32","name":"rollupID","type":"uint32"},{"indexed":false,"internalType":"uint64","name":"numBatch","type":"uint64"},{"indexed":false,"internalType":"bytes32","name":"stateRoot","type":"bytes32"},{"indexed":false,"internalType":"bytes32","name":"exitRoot","type":"bytes32"},{"indexed":true,"internalType":"address","name":"aggregator","type":"address"}],"name":"VerifyBatchesTrustedAggregator","type":"event"},{"inputs":[],"name":"getRollupExitRoot","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"globalExitRootManager","outputs":[{"internalType":"contract IPolygonZkEVMGlobalExitRootV2","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"rollupCount","outputs":[{"internalType":"uint32","name":"","type":"uint32"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint32","name":"rollupID","type":"uint32"}],"name":"rollupIDToLastExitRoot","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint32","name":"rollupID","type":"uint32"},{"internalType":"uint64","name":"finalNewBatch","type":"uint64"},{"internalType":"bytes32","name":"newLocalExitRoot","type":"bytes32"},{"internalType":"bytes32","name":"newStateRoot","type":"bytes32"},{"internalType":"bool","name":"updateGER","type":"bool"}],"name":"verifyBatches","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint32","name":"rollupID","type":"uint32"},{"internalType":"uint64","name":"finalNewBatch","type":"uint64"},{"internalType":"bytes32","name":"newLocalExitRoot","type":"bytes32"},{"internalType":"bytes32","name":"newStateRoot","type":"bytes32"},{"internalType":"bool","name":"updateGER","type":"bool"}],"name":"verifyBatchesTrustedAggregator","outputs":[],"stateMutability":"nonpayable","type":"function"}] \ No newline at end of file diff --git a/test/contracts/bin/verifybatchesmock.bin b/test/contracts/bin/verifybatchesmock.bin index 17badba8..fd4e6d15 100644 --- a/test/contracts/bin/verifybatchesmock.bin +++ b/test/contracts/bin/verifybatchesmock.bin @@ -1 +1 @@ -60a060405234801561001057600080fd5b5060405161082938038061082983398101604081905261002f91610040565b6001600160a01b0316608052610070565b60006020828403121561005257600080fd5b81516001600160a01b038116811461006957600080fd5b9392505050565b60805161079861009160003960008181609c01526104e301526107986000f3fe608060405234801561001057600080fd5b50600436106100575760003560e01c80630680cf5c1461005c578063a2967d991461008f578063d02103ca14610097578063db3abdb9146100d6578063f4e92675146100eb575b600080fd5b61007c61006a3660046105de565b60016020526000908152604090205481565b6040519081526020015b60405180910390f35b61007c610110565b6100be7f000000000000000000000000000000000000000000000000000000000000000081565b6040516001600160a01b039091168152602001610086565b6100e96100e4366004610600565b610499565b005b6000546100fb9063ffffffff1681565b60405163ffffffff9091168152602001610086565b6000805463ffffffff1680820361012957506000919050565b60008167ffffffffffffffff8111156101445761014461066f565b60405190808252806020026020018201604052801561016d578160200160208202803683370190505b50905060005b828110156101d35760016000610189838361069b565b63ffffffff1663ffffffff168152602001908152602001600020548282815181106101b6576101b66106b4565b6020908102919091010152806101cb816106ca565b915050610173565b50600060205b836001146103fd5760006101ee6002866106f9565b6101f960028761070d565b610203919061069b565b905060008167ffffffffffffffff8111156102205761022061066f565b604051908082528060200260200182016040528015610249578160200160208202803683370190505b50905060005b828110156103ad57610262600184610721565b8114801561027a57506102766002886106f9565b6001145b156102f7578561028b826002610734565b8151811061029b5761029b6106b4565b6020026020010151856040516020016102be929190918252602082015260400190565b604051602081830303815290604052805190602001208282815181106102e6576102e66106b4565b60200260200101818152505061039b565b85610303826002610734565b81518110610313576103136106b4565b6020026020010151868260026103299190610734565b61033490600161069b565b81518110610344576103446106b4565b6020026020010151604051602001610366929190918252602082015260400190565b6040516020818303038152906040528051906020012082828151811061038e5761038e6106b4565b6020026020010181815250505b806103a5816106ca565b91505061024f565b5080945081955083846040516020016103d0929190918252602082015260400190565b60405160208183030381529060405280519060200120935082806103f39061074b565b93505050506101d9565b600083600081518110610412576104126106b4565b6020026020010151905060005b8281101561048f57604080516020810184905290810185905260600160408051601f19818403018152828252805160209182012090830187905290820186905292506060016040516020818303038152906040528051906020012093508080610487906106ca565b91505061041f565b5095945050505050565b60005463ffffffff90811690861611156104c3576000805463ffffffff191663ffffffff87161790555b63ffffffff851660009081526001602052604090208390558015610569577f00000000000000000000000000000000000000000000000000000000000000006001600160a01b03166333d6247d610518610110565b6040518263ffffffff1660e01b815260040161053691815260200190565b600060405180830381600087803b15801561055057600080fd5b505af1158015610564573d6000803e3d6000fd5b505050505b6040805167ffffffffffffffff8616815260208101849052908101849052339063ffffffff8716907faac1e7a157b259544ebacd6e8a82ae5d6c8f174e12aa48696277bcc9a661f0b49060600160405180910390a35050505050565b803563ffffffff811681146105d957600080fd5b919050565b6000602082840312156105f057600080fd5b6105f9826105c5565b9392505050565b600080600080600060a0868803121561061857600080fd5b610621866105c5565b9450602086013567ffffffffffffffff8116811461063e57600080fd5b935060408601359250606086013591506080860135801515811461066157600080fd5b809150509295509295909350565b634e487b7160e01b600052604160045260246000fd5b634e487b7160e01b600052601160045260246000fd5b808201808211156106ae576106ae610685565b92915050565b634e487b7160e01b600052603260045260246000fd5b6000600182016106dc576106dc610685565b5060010190565b634e487b7160e01b600052601260045260246000fd5b600082610708576107086106e3565b500690565b60008261071c5761071c6106e3565b500490565b818103818111156106ae576106ae610685565b80820281158282048414176106ae576106ae610685565b60008161075a5761075a610685565b50600019019056fea26469706673582212205adc139a1c2a423d3d8d0db882b69ac1b5cdcb3419bc6315ca33eeac9aa68a7464736f6c63430008120033 \ No newline at end of file +60a060405234801561001057600080fd5b5060405161097138038061097183398101604081905261002f91610040565b6001600160a01b0316608052610070565b60006020828403121561005257600080fd5b81516001600160a01b038116811461006957600080fd5b9392505050565b6080516108d96100986000396000818160bc01528181610178015261062e01526108d96000f3fe608060405234801561001057600080fd5b50600436106100625760003560e01c80630680cf5c1461006757806343955dd31461009a578063a2967d99146100af578063d02103ca146100b7578063db3abdb9146100f6578063f4e9267514610109575b600080fd5b61008761007536600461071f565b60016020526000908152604090205481565b6040519081526020015b60405180910390f35b6100ad6100a8366004610741565b61012e565b005b61008761025b565b6100de7f000000000000000000000000000000000000000000000000000000000000000081565b6040516001600160a01b039091168152602001610091565b6100ad610104366004610741565b6105e4565b6000546101199063ffffffff1681565b60405163ffffffff9091168152602001610091565b60005463ffffffff9081169086161115610158576000805463ffffffff191663ffffffff87161790555b63ffffffff8516600090815260016020526040902083905580156101fe577f00000000000000000000000000000000000000000000000000000000000000006001600160a01b03166333d6247d6101ad61025b565b6040518263ffffffff1660e01b81526004016101cb91815260200190565b600060405180830381600087803b1580156101e557600080fd5b505af11580156101f9573d6000803e3d6000fd5b505050505b6040805167ffffffffffffffff8616815260208101849052908101849052339063ffffffff8716907fd1ec3a1216f08b6eff72e169ceb548b782db18a6614852618d86bb19f3f9b0d3906060015b60405180910390a35050505050565b6000805463ffffffff1680820361027457506000919050565b60008167ffffffffffffffff81111561028f5761028f6107b0565b6040519080825280602002602001820160405280156102b8578160200160208202803683370190505b50905060005b8281101561031e57600160006102d483836107dc565b63ffffffff1663ffffffff16815260200190815260200160002054828281518110610301576103016107f5565b6020908102919091010152806103168161080b565b9150506102be565b50600060205b8360011461054857600061033960028661083a565b61034460028761084e565b61034e91906107dc565b905060008167ffffffffffffffff81111561036b5761036b6107b0565b604051908082528060200260200182016040528015610394578160200160208202803683370190505b50905060005b828110156104f8576103ad600184610862565b811480156103c557506103c160028861083a565b6001145b1561044257856103d6826002610875565b815181106103e6576103e66107f5565b602002602001015185604051602001610409929190918252602082015260400190565b60405160208183030381529060405280519060200120828281518110610431576104316107f5565b6020026020010181815250506104e6565b8561044e826002610875565b8151811061045e5761045e6107f5565b6020026020010151868260026104749190610875565b61047f9060016107dc565b8151811061048f5761048f6107f5565b60200260200101516040516020016104b1929190918252602082015260400190565b604051602081830303815290604052805190602001208282815181106104d9576104d96107f5565b6020026020010181815250505b806104f08161080b565b91505061039a565b50809450819550838460405160200161051b929190918252602082015260400190565b604051602081830303815290604052805190602001209350828061053e9061088c565b9350505050610324565b60008360008151811061055d5761055d6107f5565b6020026020010151905060005b828110156105da57604080516020810184905290810185905260600160408051601f198184030181528282528051602091820120908301879052908201869052925060600160405160208183030381529060405280519060200120935080806105d29061080b565b91505061056a565b5095945050505050565b60005463ffffffff908116908616111561060e576000805463ffffffff191663ffffffff87161790555b63ffffffff8516600090815260016020526040902083905580156106b4577f00000000000000000000000000000000000000000000000000000000000000006001600160a01b03166333d6247d61066361025b565b6040518263ffffffff1660e01b815260040161068191815260200190565b600060405180830381600087803b15801561069b57600080fd5b505af11580156106af573d6000803e3d6000fd5b505050505b6040805167ffffffffffffffff8616815260208101849052908101849052339063ffffffff8716907faac1e7a157b259544ebacd6e8a82ae5d6c8f174e12aa48696277bcc9a661f0b49060600161024c565b803563ffffffff8116811461071a57600080fd5b919050565b60006020828403121561073157600080fd5b61073a82610706565b9392505050565b600080600080600060a0868803121561075957600080fd5b61076286610706565b9450602086013567ffffffffffffffff8116811461077f57600080fd5b93506040860135925060608601359150608086013580151581146107a257600080fd5b809150509295509295909350565b634e487b7160e01b600052604160045260246000fd5b634e487b7160e01b600052601160045260246000fd5b808201808211156107ef576107ef6107c6565b92915050565b634e487b7160e01b600052603260045260246000fd5b60006001820161081d5761081d6107c6565b5060010190565b634e487b7160e01b600052601260045260246000fd5b60008261084957610849610824565b500690565b60008261085d5761085d610824565b500490565b818103818111156107ef576107ef6107c6565b80820281158282048414176107ef576107ef6107c6565b60008161089b5761089b6107c6565b50600019019056fea26469706673582212204b504ae2d3686f35f611e3ef5bc38d1f2d64ce4fea28c7a2a657dbe4ba6178ce64736f6c63430008120033 \ No newline at end of file diff --git a/test/contracts/verifybatchesmock/VerifyBatchesMock.sol b/test/contracts/verifybatchesmock/VerifyBatchesMock.sol index 6a65a548..34db310a 100644 --- a/test/contracts/verifybatchesmock/VerifyBatchesMock.sol +++ b/test/contracts/verifybatchesmock/VerifyBatchesMock.sol @@ -35,6 +35,14 @@ contract VerifyBatchesMock { address indexed aggregator ); + event VerifyBatchesTrustedAggregator( + uint32 indexed rollupID, + uint64 numBatch, + bytes32 stateRoot, + bytes32 exitRoot, + address indexed aggregator + ); + constructor( IPolygonZkEVMGlobalExitRootV2 _globalExitRootManager ) { @@ -65,6 +73,30 @@ contract VerifyBatchesMock { ); } + function verifyBatchesTrustedAggregator( + uint32 rollupID, + uint64 finalNewBatch, + bytes32 newLocalExitRoot, + bytes32 newStateRoot, + bool updateGER + ) external { + if (rollupID > rollupCount) { + rollupCount = rollupID; + } + rollupIDToLastExitRoot[rollupID] = newLocalExitRoot; + if (updateGER) { + globalExitRootManager.updateExitRoot(getRollupExitRoot()); + } + + emit VerifyBatchesTrustedAggregator( + rollupID, + finalNewBatch, + newStateRoot, + newLocalExitRoot, + msg.sender + ); + } + function getRollupExitRoot() public view returns (bytes32) { uint256 currentNodes = rollupCount; diff --git a/test/contracts/verifybatchesmock/verifybatchesmock.go b/test/contracts/verifybatchesmock/verifybatchesmock.go index 10fc3b8d..58b67630 100644 --- a/test/contracts/verifybatchesmock/verifybatchesmock.go +++ b/test/contracts/verifybatchesmock/verifybatchesmock.go @@ -31,8 +31,8 @@ var ( // VerifybatchesmockMetaData contains all meta data concerning the Verifybatchesmock contract. var VerifybatchesmockMetaData = &bind.MetaData{ - ABI: "[{\"inputs\":[{\"internalType\":\"contractIPolygonZkEVMGlobalExitRootV2\",\"name\":\"_globalExitRootManager\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint32\",\"name\":\"rollupID\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"numBatch\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"stateRoot\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"exitRoot\",\"type\":\"bytes32\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"aggregator\",\"type\":\"address\"}],\"name\":\"VerifyBatches\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"getRollupExitRoot\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"globalExitRootManager\",\"outputs\":[{\"internalType\":\"contractIPolygonZkEVMGlobalExitRootV2\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"rollupCount\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"rollupID\",\"type\":\"uint32\"}],\"name\":\"rollupIDToLastExitRoot\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"rollupID\",\"type\":\"uint32\"},{\"internalType\":\"uint64\",\"name\":\"finalNewBatch\",\"type\":\"uint64\"},{\"internalType\":\"bytes32\",\"name\":\"newLocalExitRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"newStateRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bool\",\"name\":\"updateGER\",\"type\":\"bool\"}],\"name\":\"verifyBatches\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", - Bin: "0x60a060405234801561001057600080fd5b5060405161082938038061082983398101604081905261002f91610040565b6001600160a01b0316608052610070565b60006020828403121561005257600080fd5b81516001600160a01b038116811461006957600080fd5b9392505050565b60805161079861009160003960008181609c01526104e301526107986000f3fe608060405234801561001057600080fd5b50600436106100575760003560e01c80630680cf5c1461005c578063a2967d991461008f578063d02103ca14610097578063db3abdb9146100d6578063f4e92675146100eb575b600080fd5b61007c61006a3660046105de565b60016020526000908152604090205481565b6040519081526020015b60405180910390f35b61007c610110565b6100be7f000000000000000000000000000000000000000000000000000000000000000081565b6040516001600160a01b039091168152602001610086565b6100e96100e4366004610600565b610499565b005b6000546100fb9063ffffffff1681565b60405163ffffffff9091168152602001610086565b6000805463ffffffff1680820361012957506000919050565b60008167ffffffffffffffff8111156101445761014461066f565b60405190808252806020026020018201604052801561016d578160200160208202803683370190505b50905060005b828110156101d35760016000610189838361069b565b63ffffffff1663ffffffff168152602001908152602001600020548282815181106101b6576101b66106b4565b6020908102919091010152806101cb816106ca565b915050610173565b50600060205b836001146103fd5760006101ee6002866106f9565b6101f960028761070d565b610203919061069b565b905060008167ffffffffffffffff8111156102205761022061066f565b604051908082528060200260200182016040528015610249578160200160208202803683370190505b50905060005b828110156103ad57610262600184610721565b8114801561027a57506102766002886106f9565b6001145b156102f7578561028b826002610734565b8151811061029b5761029b6106b4565b6020026020010151856040516020016102be929190918252602082015260400190565b604051602081830303815290604052805190602001208282815181106102e6576102e66106b4565b60200260200101818152505061039b565b85610303826002610734565b81518110610313576103136106b4565b6020026020010151868260026103299190610734565b61033490600161069b565b81518110610344576103446106b4565b6020026020010151604051602001610366929190918252602082015260400190565b6040516020818303038152906040528051906020012082828151811061038e5761038e6106b4565b6020026020010181815250505b806103a5816106ca565b91505061024f565b5080945081955083846040516020016103d0929190918252602082015260400190565b60405160208183030381529060405280519060200120935082806103f39061074b565b93505050506101d9565b600083600081518110610412576104126106b4565b6020026020010151905060005b8281101561048f57604080516020810184905290810185905260600160408051601f19818403018152828252805160209182012090830187905290820186905292506060016040516020818303038152906040528051906020012093508080610487906106ca565b91505061041f565b5095945050505050565b60005463ffffffff90811690861611156104c3576000805463ffffffff191663ffffffff87161790555b63ffffffff851660009081526001602052604090208390558015610569577f00000000000000000000000000000000000000000000000000000000000000006001600160a01b03166333d6247d610518610110565b6040518263ffffffff1660e01b815260040161053691815260200190565b600060405180830381600087803b15801561055057600080fd5b505af1158015610564573d6000803e3d6000fd5b505050505b6040805167ffffffffffffffff8616815260208101849052908101849052339063ffffffff8716907faac1e7a157b259544ebacd6e8a82ae5d6c8f174e12aa48696277bcc9a661f0b49060600160405180910390a35050505050565b803563ffffffff811681146105d957600080fd5b919050565b6000602082840312156105f057600080fd5b6105f9826105c5565b9392505050565b600080600080600060a0868803121561061857600080fd5b610621866105c5565b9450602086013567ffffffffffffffff8116811461063e57600080fd5b935060408601359250606086013591506080860135801515811461066157600080fd5b809150509295509295909350565b634e487b7160e01b600052604160045260246000fd5b634e487b7160e01b600052601160045260246000fd5b808201808211156106ae576106ae610685565b92915050565b634e487b7160e01b600052603260045260246000fd5b6000600182016106dc576106dc610685565b5060010190565b634e487b7160e01b600052601260045260246000fd5b600082610708576107086106e3565b500690565b60008261071c5761071c6106e3565b500490565b818103818111156106ae576106ae610685565b80820281158282048414176106ae576106ae610685565b60008161075a5761075a610685565b50600019019056fea26469706673582212205adc139a1c2a423d3d8d0db882b69ac1b5cdcb3419bc6315ca33eeac9aa68a7464736f6c63430008120033", + ABI: "[{\"inputs\":[{\"internalType\":\"contractIPolygonZkEVMGlobalExitRootV2\",\"name\":\"_globalExitRootManager\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint32\",\"name\":\"rollupID\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"numBatch\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"stateRoot\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"exitRoot\",\"type\":\"bytes32\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"aggregator\",\"type\":\"address\"}],\"name\":\"VerifyBatches\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint32\",\"name\":\"rollupID\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"numBatch\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"stateRoot\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"exitRoot\",\"type\":\"bytes32\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"aggregator\",\"type\":\"address\"}],\"name\":\"VerifyBatchesTrustedAggregator\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"getRollupExitRoot\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"globalExitRootManager\",\"outputs\":[{\"internalType\":\"contractIPolygonZkEVMGlobalExitRootV2\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"rollupCount\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"rollupID\",\"type\":\"uint32\"}],\"name\":\"rollupIDToLastExitRoot\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"rollupID\",\"type\":\"uint32\"},{\"internalType\":\"uint64\",\"name\":\"finalNewBatch\",\"type\":\"uint64\"},{\"internalType\":\"bytes32\",\"name\":\"newLocalExitRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"newStateRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bool\",\"name\":\"updateGER\",\"type\":\"bool\"}],\"name\":\"verifyBatches\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"rollupID\",\"type\":\"uint32\"},{\"internalType\":\"uint64\",\"name\":\"finalNewBatch\",\"type\":\"uint64\"},{\"internalType\":\"bytes32\",\"name\":\"newLocalExitRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"newStateRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bool\",\"name\":\"updateGER\",\"type\":\"bool\"}],\"name\":\"verifyBatchesTrustedAggregator\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60a060405234801561001057600080fd5b5060405161097138038061097183398101604081905261002f91610040565b6001600160a01b0316608052610070565b60006020828403121561005257600080fd5b81516001600160a01b038116811461006957600080fd5b9392505050565b6080516108d96100986000396000818160bc01528181610178015261062e01526108d96000f3fe608060405234801561001057600080fd5b50600436106100625760003560e01c80630680cf5c1461006757806343955dd31461009a578063a2967d99146100af578063d02103ca146100b7578063db3abdb9146100f6578063f4e9267514610109575b600080fd5b61008761007536600461071f565b60016020526000908152604090205481565b6040519081526020015b60405180910390f35b6100ad6100a8366004610741565b61012e565b005b61008761025b565b6100de7f000000000000000000000000000000000000000000000000000000000000000081565b6040516001600160a01b039091168152602001610091565b6100ad610104366004610741565b6105e4565b6000546101199063ffffffff1681565b60405163ffffffff9091168152602001610091565b60005463ffffffff9081169086161115610158576000805463ffffffff191663ffffffff87161790555b63ffffffff8516600090815260016020526040902083905580156101fe577f00000000000000000000000000000000000000000000000000000000000000006001600160a01b03166333d6247d6101ad61025b565b6040518263ffffffff1660e01b81526004016101cb91815260200190565b600060405180830381600087803b1580156101e557600080fd5b505af11580156101f9573d6000803e3d6000fd5b505050505b6040805167ffffffffffffffff8616815260208101849052908101849052339063ffffffff8716907fd1ec3a1216f08b6eff72e169ceb548b782db18a6614852618d86bb19f3f9b0d3906060015b60405180910390a35050505050565b6000805463ffffffff1680820361027457506000919050565b60008167ffffffffffffffff81111561028f5761028f6107b0565b6040519080825280602002602001820160405280156102b8578160200160208202803683370190505b50905060005b8281101561031e57600160006102d483836107dc565b63ffffffff1663ffffffff16815260200190815260200160002054828281518110610301576103016107f5565b6020908102919091010152806103168161080b565b9150506102be565b50600060205b8360011461054857600061033960028661083a565b61034460028761084e565b61034e91906107dc565b905060008167ffffffffffffffff81111561036b5761036b6107b0565b604051908082528060200260200182016040528015610394578160200160208202803683370190505b50905060005b828110156104f8576103ad600184610862565b811480156103c557506103c160028861083a565b6001145b1561044257856103d6826002610875565b815181106103e6576103e66107f5565b602002602001015185604051602001610409929190918252602082015260400190565b60405160208183030381529060405280519060200120828281518110610431576104316107f5565b6020026020010181815250506104e6565b8561044e826002610875565b8151811061045e5761045e6107f5565b6020026020010151868260026104749190610875565b61047f9060016107dc565b8151811061048f5761048f6107f5565b60200260200101516040516020016104b1929190918252602082015260400190565b604051602081830303815290604052805190602001208282815181106104d9576104d96107f5565b6020026020010181815250505b806104f08161080b565b91505061039a565b50809450819550838460405160200161051b929190918252602082015260400190565b604051602081830303815290604052805190602001209350828061053e9061088c565b9350505050610324565b60008360008151811061055d5761055d6107f5565b6020026020010151905060005b828110156105da57604080516020810184905290810185905260600160408051601f198184030181528282528051602091820120908301879052908201869052925060600160405160208183030381529060405280519060200120935080806105d29061080b565b91505061056a565b5095945050505050565b60005463ffffffff908116908616111561060e576000805463ffffffff191663ffffffff87161790555b63ffffffff8516600090815260016020526040902083905580156106b4577f00000000000000000000000000000000000000000000000000000000000000006001600160a01b03166333d6247d61066361025b565b6040518263ffffffff1660e01b815260040161068191815260200190565b600060405180830381600087803b15801561069b57600080fd5b505af11580156106af573d6000803e3d6000fd5b505050505b6040805167ffffffffffffffff8616815260208101849052908101849052339063ffffffff8716907faac1e7a157b259544ebacd6e8a82ae5d6c8f174e12aa48696277bcc9a661f0b49060600161024c565b803563ffffffff8116811461071a57600080fd5b919050565b60006020828403121561073157600080fd5b61073a82610706565b9392505050565b600080600080600060a0868803121561075957600080fd5b61076286610706565b9450602086013567ffffffffffffffff8116811461077f57600080fd5b93506040860135925060608601359150608086013580151581146107a257600080fd5b809150509295509295909350565b634e487b7160e01b600052604160045260246000fd5b634e487b7160e01b600052601160045260246000fd5b808201808211156107ef576107ef6107c6565b92915050565b634e487b7160e01b600052603260045260246000fd5b60006001820161081d5761081d6107c6565b5060010190565b634e487b7160e01b600052601260045260246000fd5b60008261084957610849610824565b500690565b60008261085d5761085d610824565b500490565b818103818111156107ef576107ef6107c6565b80820281158282048414176107ef576107ef6107c6565b60008161089b5761089b6107c6565b50600019019056fea26469706673582212204b504ae2d3686f35f611e3ef5bc38d1f2d64ce4fea28c7a2a657dbe4ba6178ce64736f6c63430008120033", } // VerifybatchesmockABI is the input ABI used to generate the binding from. @@ -347,6 +347,27 @@ func (_Verifybatchesmock *VerifybatchesmockTransactorSession) VerifyBatches(roll return _Verifybatchesmock.Contract.VerifyBatches(&_Verifybatchesmock.TransactOpts, rollupID, finalNewBatch, newLocalExitRoot, newStateRoot, updateGER) } +// VerifyBatchesTrustedAggregator is a paid mutator transaction binding the contract method 0x43955dd3. +// +// Solidity: function verifyBatchesTrustedAggregator(uint32 rollupID, uint64 finalNewBatch, bytes32 newLocalExitRoot, bytes32 newStateRoot, bool updateGER) returns() +func (_Verifybatchesmock *VerifybatchesmockTransactor) VerifyBatchesTrustedAggregator(opts *bind.TransactOpts, rollupID uint32, finalNewBatch uint64, newLocalExitRoot [32]byte, newStateRoot [32]byte, updateGER bool) (*types.Transaction, error) { + return _Verifybatchesmock.contract.Transact(opts, "verifyBatchesTrustedAggregator", rollupID, finalNewBatch, newLocalExitRoot, newStateRoot, updateGER) +} + +// VerifyBatchesTrustedAggregator is a paid mutator transaction binding the contract method 0x43955dd3. +// +// Solidity: function verifyBatchesTrustedAggregator(uint32 rollupID, uint64 finalNewBatch, bytes32 newLocalExitRoot, bytes32 newStateRoot, bool updateGER) returns() +func (_Verifybatchesmock *VerifybatchesmockSession) VerifyBatchesTrustedAggregator(rollupID uint32, finalNewBatch uint64, newLocalExitRoot [32]byte, newStateRoot [32]byte, updateGER bool) (*types.Transaction, error) { + return _Verifybatchesmock.Contract.VerifyBatchesTrustedAggregator(&_Verifybatchesmock.TransactOpts, rollupID, finalNewBatch, newLocalExitRoot, newStateRoot, updateGER) +} + +// VerifyBatchesTrustedAggregator is a paid mutator transaction binding the contract method 0x43955dd3. +// +// Solidity: function verifyBatchesTrustedAggregator(uint32 rollupID, uint64 finalNewBatch, bytes32 newLocalExitRoot, bytes32 newStateRoot, bool updateGER) returns() +func (_Verifybatchesmock *VerifybatchesmockTransactorSession) VerifyBatchesTrustedAggregator(rollupID uint32, finalNewBatch uint64, newLocalExitRoot [32]byte, newStateRoot [32]byte, updateGER bool) (*types.Transaction, error) { + return _Verifybatchesmock.Contract.VerifyBatchesTrustedAggregator(&_Verifybatchesmock.TransactOpts, rollupID, finalNewBatch, newLocalExitRoot, newStateRoot, updateGER) +} + // VerifybatchesmockVerifyBatchesIterator is returned from FilterVerifyBatches and is used to iterate over the raw logs and unpacked data for VerifyBatches events raised by the Verifybatchesmock contract. type VerifybatchesmockVerifyBatchesIterator struct { Event *VerifybatchesmockVerifyBatches // Event containing the contract specifics and raw log @@ -504,3 +525,161 @@ func (_Verifybatchesmock *VerifybatchesmockFilterer) ParseVerifyBatches(log type event.Raw = log return event, nil } + +// VerifybatchesmockVerifyBatchesTrustedAggregatorIterator is returned from FilterVerifyBatchesTrustedAggregator and is used to iterate over the raw logs and unpacked data for VerifyBatchesTrustedAggregator events raised by the Verifybatchesmock contract. +type VerifybatchesmockVerifyBatchesTrustedAggregatorIterator struct { + Event *VerifybatchesmockVerifyBatchesTrustedAggregator // Event containing the contract specifics and raw log + + contract *bind.BoundContract // Generic contract to use for unpacking event data + event string // Event name to use for unpacking event data + + logs chan types.Log // Log channel receiving the found contract events + sub ethereum.Subscription // Subscription for errors, completion and termination + done bool // Whether the subscription completed delivering logs + fail error // Occurred error to stop iteration +} + +// Next advances the iterator to the subsequent event, returning whether there +// are any more events found. In case of a retrieval or parsing error, false is +// returned and Error() can be queried for the exact failure. +func (it *VerifybatchesmockVerifyBatchesTrustedAggregatorIterator) Next() bool { + // If the iterator failed, stop iterating + if it.fail != nil { + return false + } + // If the iterator completed, deliver directly whatever's available + if it.done { + select { + case log := <-it.logs: + it.Event = new(VerifybatchesmockVerifyBatchesTrustedAggregator) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + // Iterator still in progress, wait for either a data or an error event + select { + case log := <-it.logs: + it.Event = new(VerifybatchesmockVerifyBatchesTrustedAggregator) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +// Error returns any retrieval or parsing error occurred during filtering. +func (it *VerifybatchesmockVerifyBatchesTrustedAggregatorIterator) Error() error { + return it.fail +} + +// Close terminates the iteration process, releasing any pending underlying +// resources. +func (it *VerifybatchesmockVerifyBatchesTrustedAggregatorIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +// VerifybatchesmockVerifyBatchesTrustedAggregator represents a VerifyBatchesTrustedAggregator event raised by the Verifybatchesmock contract. +type VerifybatchesmockVerifyBatchesTrustedAggregator struct { + RollupID uint32 + NumBatch uint64 + StateRoot [32]byte + ExitRoot [32]byte + Aggregator common.Address + Raw types.Log // Blockchain specific contextual infos +} + +// FilterVerifyBatchesTrustedAggregator is a free log retrieval operation binding the contract event 0xd1ec3a1216f08b6eff72e169ceb548b782db18a6614852618d86bb19f3f9b0d3. +// +// Solidity: event VerifyBatchesTrustedAggregator(uint32 indexed rollupID, uint64 numBatch, bytes32 stateRoot, bytes32 exitRoot, address indexed aggregator) +func (_Verifybatchesmock *VerifybatchesmockFilterer) FilterVerifyBatchesTrustedAggregator(opts *bind.FilterOpts, rollupID []uint32, aggregator []common.Address) (*VerifybatchesmockVerifyBatchesTrustedAggregatorIterator, error) { + + var rollupIDRule []interface{} + for _, rollupIDItem := range rollupID { + rollupIDRule = append(rollupIDRule, rollupIDItem) + } + + var aggregatorRule []interface{} + for _, aggregatorItem := range aggregator { + aggregatorRule = append(aggregatorRule, aggregatorItem) + } + + logs, sub, err := _Verifybatchesmock.contract.FilterLogs(opts, "VerifyBatchesTrustedAggregator", rollupIDRule, aggregatorRule) + if err != nil { + return nil, err + } + return &VerifybatchesmockVerifyBatchesTrustedAggregatorIterator{contract: _Verifybatchesmock.contract, event: "VerifyBatchesTrustedAggregator", logs: logs, sub: sub}, nil +} + +// WatchVerifyBatchesTrustedAggregator is a free log subscription operation binding the contract event 0xd1ec3a1216f08b6eff72e169ceb548b782db18a6614852618d86bb19f3f9b0d3. +// +// Solidity: event VerifyBatchesTrustedAggregator(uint32 indexed rollupID, uint64 numBatch, bytes32 stateRoot, bytes32 exitRoot, address indexed aggregator) +func (_Verifybatchesmock *VerifybatchesmockFilterer) WatchVerifyBatchesTrustedAggregator(opts *bind.WatchOpts, sink chan<- *VerifybatchesmockVerifyBatchesTrustedAggregator, rollupID []uint32, aggregator []common.Address) (event.Subscription, error) { + + var rollupIDRule []interface{} + for _, rollupIDItem := range rollupID { + rollupIDRule = append(rollupIDRule, rollupIDItem) + } + + var aggregatorRule []interface{} + for _, aggregatorItem := range aggregator { + aggregatorRule = append(aggregatorRule, aggregatorItem) + } + + logs, sub, err := _Verifybatchesmock.contract.WatchLogs(opts, "VerifyBatchesTrustedAggregator", rollupIDRule, aggregatorRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + // New log arrived, parse the event and forward to the user + event := new(VerifybatchesmockVerifyBatchesTrustedAggregator) + if err := _Verifybatchesmock.contract.UnpackLog(event, "VerifyBatchesTrustedAggregator", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +// ParseVerifyBatchesTrustedAggregator is a log parse operation binding the contract event 0xd1ec3a1216f08b6eff72e169ceb548b782db18a6614852618d86bb19f3f9b0d3. +// +// Solidity: event VerifyBatchesTrustedAggregator(uint32 indexed rollupID, uint64 numBatch, bytes32 stateRoot, bytes32 exitRoot, address indexed aggregator) +func (_Verifybatchesmock *VerifybatchesmockFilterer) ParseVerifyBatchesTrustedAggregator(log types.Log) (*VerifybatchesmockVerifyBatchesTrustedAggregator, error) { + event := new(VerifybatchesmockVerifyBatchesTrustedAggregator) + if err := _Verifybatchesmock.contract.UnpackLog(event, "VerifyBatchesTrustedAggregator", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} From b480dd4f07c606ee6e9dfc7a4fd69a43c97656b1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toni=20Ram=C3=ADrez?= <58293609+ToniRamirezM@users.noreply.github.com> Date: Tue, 12 Nov 2024 11:35:23 +0100 Subject: [PATCH 26/33] feat: remove sanity check (#178) --- sequencesender/txbuilder/banana_base.go | 20 +++-------- sequencesender/txbuilder/banana_base_test.go | 37 -------------------- 2 files changed, 4 insertions(+), 53 deletions(-) diff --git a/sequencesender/txbuilder/banana_base.go b/sequencesender/txbuilder/banana_base.go index 2868bb4b..ee21228d 100644 --- a/sequencesender/txbuilder/banana_base.go +++ b/sequencesender/txbuilder/banana_base.go @@ -149,21 +149,25 @@ func (t *TxBuilderBananaBase) NewSequence( counterL1InfoRoot, err := t.GetCounterL1InfoRoot(ctx, greatestL1Index) if err != nil { + log.Errorf("error getting CounterL1InfoRoot: %s", err) return nil, err } sequence.CounterL1InfoRoot = counterL1InfoRoot l1InfoRoot, err := t.getL1InfoRoot(sequence.CounterL1InfoRoot) if err != nil { + log.Errorf("error getting L1InfoRootMap: %s", err) return nil, err } err = t.CheckL1InfoTreeLeafCounterVsInitL1InfoMap(ctx, sequence.CounterL1InfoRoot) if err != nil { + log.Errorf("error checking L1InfoTreeLeafCounterVsInitL1InfoMap: %s", err) return nil, err } sequence.L1InfoRoot = l1InfoRoot accInputHash, err := t.rollupContract.LastAccInputHash(&bind.CallOpts{Pending: false}) if err != nil { + log.Errorf("error getting LastAccInputHash: %s", err) return nil, err } @@ -187,26 +191,10 @@ func (t *TxBuilderBananaBase) NewSequence( sequence.OldAccInputHash = oldAccInputHash sequence.AccInputHash = accInputHash - - err = SequenceSanityCheck(sequence) - if err != nil { - return nil, fmt.Errorf("sequenceSanityCheck fails. Err: %w", err) - } res := NewBananaSequence(*sequence) return res, nil } -func SequenceSanityCheck(seq *etherman.SequenceBanana) error { - maxL1InfoIndex, err := calculateMaxL1InfoTreeIndexInsideSequence(seq) - if err != nil { - return err - } - if seq.CounterL1InfoRoot < maxL1InfoIndex+1 { - return fmt.Errorf("wrong CounterL1InfoRoot(%d): BatchL2Data (max=%d) ", seq.CounterL1InfoRoot, maxL1InfoIndex) - } - return nil -} - func (t *TxBuilderBananaBase) getL1InfoRoot(counterL1InfoRoot uint32) (common.Hash, error) { return t.globalExitRootContract.L1InfoRootMap(&bind.CallOpts{Pending: false}, counterL1InfoRoot) } diff --git a/sequencesender/txbuilder/banana_base_test.go b/sequencesender/txbuilder/banana_base_test.go index 44d7a7b1..e5911500 100644 --- a/sequencesender/txbuilder/banana_base_test.go +++ b/sequencesender/txbuilder/banana_base_test.go @@ -6,13 +6,11 @@ import ( "math/big" "testing" - "github.com/0xPolygon/cdk/etherman" "github.com/0xPolygon/cdk/l1infotreesync" "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" "github.com/0xPolygon/cdk/sequencesender/txbuilder" "github.com/0xPolygon/cdk/sequencesender/txbuilder/mocks_txbuilder" - "github.com/0xPolygon/cdk/state" "github.com/0xPolygon/cdk/state/datastream" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" @@ -92,41 +90,6 @@ func TestBananaBaseNewSequenceBatch(t *testing.T) { // TODO: check that the seq have the right values } -func TestBananaSanityCheck(t *testing.T) { - batch := state.BatchRawV2{ - Blocks: []state.L2BlockRaw{ - { - BlockNumber: 1, - ChangeL2BlockHeader: state.ChangeL2BlockHeader{ - DeltaTimestamp: 1, - IndexL1InfoTree: 1, - }, - }, - }, - } - data, err := state.EncodeBatchV2(&batch) - require.NoError(t, err) - require.NotNil(t, data) - seq := etherman.SequenceBanana{ - CounterL1InfoRoot: 2, - Batches: []etherman.Batch{ - { - L2Data: data, - }, - }, - } - err = txbuilder.SequenceSanityCheck(&seq) - require.NoError(t, err, "inside batchl2data max is 1 and counter is 2 (2>=1+1)") - seq.CounterL1InfoRoot = 1 - err = txbuilder.SequenceSanityCheck(&seq) - require.Error(t, err, "inside batchl2data max is 1 and counter is 1. The batchl2data is not included in counter") -} - -func TestBananaSanityCheckNilSeq(t *testing.T) { - err := txbuilder.SequenceSanityCheck(nil) - require.Error(t, err, "nil sequence") -} - func TestBananaEmptyL1InfoTree(t *testing.T) { testData := newBananaBaseTestData(t) From 0ed309e8050f6ef141b199acd984c55ac313a843 Mon Sep 17 00:00:00 2001 From: Joan Esteban <129153821+joanestebanr@users.noreply.github.com> Date: Tue, 12 Nov 2024 15:47:12 +0100 Subject: [PATCH 27/33] feat: include aggsender to release 0.4.0 (#181) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: unpack and log agglayer errors (#158) * feat: unpack and log agglayer errors * feat: agglayer error unpacking * fix: lint and UT * feat: epoch notifier (#144) - Send certificates after a percentage of epoch - Require epoch configuration to AggLayer - Change config of `aggsender` adding: `BlockFinality` and `EpochNotificationPercentage` * refact: GetSequence method (#169) * feat: remove sanity check (#178) (#179) --------- Co-authored-by: Goran Rojovic <100121253+goran-ethernal@users.noreply.github.com> Co-authored-by: Rachit Sonthalia <54906134+rachit77@users.noreply.github.com> Co-authored-by: Toni Ramírez <58293609+ToniRamirezM@users.noreply.github.com> --- agglayer/client.go | 30 +- agglayer/client_test.go | 76 ++ agglayer/errors_test.go | 270 +++++++ agglayer/mock_agglayer_client.go | 32 +- agglayer/proof_generation_error.go | 657 ++++++++++++++++++ agglayer/proof_verification_error.go | 164 +++++ .../errors_with_declared_computed_data.json | 45 ++ .../errors_with_token_info.json | 29 + .../errors_without_inner_data.json | 38 + .../invalid_imported_bridge_exit_errors.json | 48 ++ .../invalid_signer_error.json | 21 + .../random_unmarshal_errors.json | 12 + .../errors_with_inner_data.json | 22 + .../errors_without_inner_data.json | 6 + .../errors_with_declared_computed_data.json | 6 + .../errors_with_token_info.json | 26 + .../errors_without_inner_data.json | 6 + ...ullifier_path_generation_failed_error.json | 20 + agglayer/type_conversion_error.go | 255 +++++++ agglayer/types.go | 127 +++- agglayer/types_test.go | 99 +++ aggsender/aggsender.go | 79 ++- aggsender/aggsender_test.go | 84 ++- aggsender/block_notifier_polling.go | 219 ++++++ aggsender/block_notifier_polling_test.go | 211 ++++++ aggsender/config.go | 11 + aggsender/epoch_notifier_per_block.go | 204 ++++++ aggsender/epoch_notifier_per_block_test.go | 219 ++++++ aggsender/generic_subscriber_impl.go | 33 + aggsender/mocks/agg_sender_storage.go | 351 ++++++++++ aggsender/mocks/block_notifier.go | 128 ++++ aggsender/mocks/epoch_notifier.go | 163 +++++ .../{mock_eth_client.go => eth_client.go} | 50 +- aggsender/mocks/generic_subscriber.go | 113 +++ aggsender/mocks/l1_info_tree_syncer.go | 217 ++++++ aggsender/mocks/l2_bridge_syncer.go | 423 +++++++++++ aggsender/mocks/logger.go | 376 ++++++++++ aggsender/mocks/mock_aggsender_storage.go | 351 ---------- aggsender/mocks/mock_l1infotree_syncer.go | 217 ------ aggsender/mocks/mock_l2bridge_syncer.go | 423 ----------- aggsender/mocks/mock_logger.go | 290 -------- aggsender/types/block_notifier.go | 15 + aggsender/types/epoch_notifier.go | 25 + aggsender/types/generic_subscriber.go | 6 + aggsender/types/types.go | 2 + cmd/run.go | 29 +- config/default.go | 5 +- .../datacommittee/datacommittee.go | 33 +- go.mod | 4 +- go.sum | 7 + scripts/local_config | 2 +- sonar-project.properties | 4 +- test/Makefile | 9 +- test/bridge-e2e.bats | 22 +- .../kurtosis-cdk-node-config.toml.template | 2 - test/helpers/lxly-bridge-test.bash | 1 + 56 files changed, 4884 insertions(+), 1433 deletions(-) create mode 100644 agglayer/client_test.go create mode 100644 agglayer/errors_test.go create mode 100644 agglayer/proof_generation_error.go create mode 100644 agglayer/proof_verification_error.go create mode 100644 agglayer/testdata/proof_generation_errors/errors_with_declared_computed_data.json create mode 100644 agglayer/testdata/proof_generation_errors/errors_with_token_info.json create mode 100644 agglayer/testdata/proof_generation_errors/errors_without_inner_data.json create mode 100644 agglayer/testdata/proof_generation_errors/invalid_imported_bridge_exit_errors.json create mode 100644 agglayer/testdata/proof_generation_errors/invalid_signer_error.json create mode 100644 agglayer/testdata/proof_generation_errors/random_unmarshal_errors.json create mode 100644 agglayer/testdata/proof_verification_errors/errors_with_inner_data.json create mode 100644 agglayer/testdata/proof_verification_errors/errors_without_inner_data.json create mode 100644 agglayer/testdata/type_conversion_errors/errors_with_declared_computed_data.json create mode 100644 agglayer/testdata/type_conversion_errors/errors_with_token_info.json create mode 100644 agglayer/testdata/type_conversion_errors/errors_without_inner_data.json create mode 100644 agglayer/testdata/type_conversion_errors/nullifier_path_generation_failed_error.json create mode 100644 agglayer/type_conversion_error.go create mode 100644 aggsender/block_notifier_polling.go create mode 100644 aggsender/block_notifier_polling_test.go create mode 100644 aggsender/epoch_notifier_per_block.go create mode 100644 aggsender/epoch_notifier_per_block_test.go create mode 100644 aggsender/generic_subscriber_impl.go create mode 100644 aggsender/mocks/agg_sender_storage.go create mode 100644 aggsender/mocks/block_notifier.go create mode 100644 aggsender/mocks/epoch_notifier.go rename aggsender/mocks/{mock_eth_client.go => eth_client.go} (50%) create mode 100644 aggsender/mocks/generic_subscriber.go create mode 100644 aggsender/mocks/l1_info_tree_syncer.go create mode 100644 aggsender/mocks/l2_bridge_syncer.go create mode 100644 aggsender/mocks/logger.go delete mode 100644 aggsender/mocks/mock_aggsender_storage.go delete mode 100644 aggsender/mocks/mock_l1infotree_syncer.go delete mode 100644 aggsender/mocks/mock_l2bridge_syncer.go delete mode 100644 aggsender/mocks/mock_logger.go create mode 100644 aggsender/types/block_notifier.go create mode 100644 aggsender/types/epoch_notifier.go create mode 100644 aggsender/types/generic_subscriber.go diff --git a/agglayer/client.go b/agglayer/client.go index e60c1c7c..8396fc9e 100644 --- a/agglayer/client.go +++ b/agglayer/client.go @@ -15,7 +15,14 @@ import ( const errCodeAgglayerRateLimitExceeded int = -10007 -var ErrAgglayerRateLimitExceeded = fmt.Errorf("agglayer rate limit exceeded") +var ( + ErrAgglayerRateLimitExceeded = fmt.Errorf("agglayer rate limit exceeded") + jSONRPCCall = rpc.JSONRPCCall +) + +type AggLayerClientGetEpochConfiguration interface { + GetEpochConfiguration() (*ClockConfiguration, error) +} // AgglayerClientInterface is the interface that defines the methods that the AggLayerClient will implement type AgglayerClientInterface interface { @@ -23,6 +30,7 @@ type AgglayerClientInterface interface { WaitTxToBeMined(hash common.Hash, ctx context.Context) error SendCertificate(certificate *SignedCertificate) (common.Hash, error) GetCertificateHeader(certificateHash common.Hash) (*CertificateHeader, error) + AggLayerClientGetEpochConfiguration } // AggLayerClient is the client that will be used to interact with the AggLayer @@ -130,3 +138,23 @@ func (c *AggLayerClient) GetCertificateHeader(certificateHash common.Hash) (*Cer return result, nil } + +// GetEpochConfiguration returns the clock configuration of AggLayer +func (c *AggLayerClient) GetEpochConfiguration() (*ClockConfiguration, error) { + response, err := jSONRPCCall(c.url, "interop_getEpochConfiguration") + if err != nil { + return nil, err + } + + if response.Error != nil { + return nil, fmt.Errorf("GetEpochConfiguration code=%d msg=%s", response.Error.Code, response.Error.Message) + } + + var result *ClockConfiguration + err = json.Unmarshal(response.Result, &result) + if err != nil { + return nil, err + } + + return result, nil +} diff --git a/agglayer/client_test.go b/agglayer/client_test.go new file mode 100644 index 00000000..82baea85 --- /dev/null +++ b/agglayer/client_test.go @@ -0,0 +1,76 @@ +package agglayer + +import ( + "fmt" + "testing" + + "github.com/0xPolygon/cdk-rpc/rpc" + "github.com/stretchr/testify/require" +) + +const ( + testURL = "http://localhost:8080" +) + +func TestExploratoryClient(t *testing.T) { + t.Skip("This test is for exploratory purposes only") + sut := NewAggLayerClient("http://127.0.0.1:32853") + config, err := sut.GetEpochConfiguration() + require.NoError(t, err) + require.NotNil(t, config) + fmt.Printf("Config: %s", config.String()) +} + +func TestGetEpochConfigurationResponseWithError(t *testing.T) { + sut := NewAggLayerClient(testURL) + response := rpc.Response{ + Error: &rpc.ErrorObject{}, + } + jSONRPCCall = func(url, method string, params ...interface{}) (rpc.Response, error) { + return response, nil + } + clockConfig, err := sut.GetEpochConfiguration() + require.Nil(t, clockConfig) + require.Error(t, err) +} + +func TestGetEpochConfigurationResponseBadJson(t *testing.T) { + sut := NewAggLayerClient(testURL) + response := rpc.Response{ + Result: []byte(`{`), + } + jSONRPCCall = func(url, method string, params ...interface{}) (rpc.Response, error) { + return response, nil + } + clockConfig, err := sut.GetEpochConfiguration() + require.Nil(t, clockConfig) + require.Error(t, err) +} + +func TestGetEpochConfigurationErrorResponse(t *testing.T) { + sut := NewAggLayerClient(testURL) + + jSONRPCCall = func(url, method string, params ...interface{}) (rpc.Response, error) { + return rpc.Response{}, fmt.Errorf("unittest error") + } + clockConfig, err := sut.GetEpochConfiguration() + require.Nil(t, clockConfig) + require.Error(t, err) +} + +func TestGetEpochConfigurationOkResponse(t *testing.T) { + sut := NewAggLayerClient(testURL) + response := rpc.Response{ + Result: []byte(`{"epoch_duration": 1, "genesis_block": 1}`), + } + jSONRPCCall = func(url, method string, params ...interface{}) (rpc.Response, error) { + return response, nil + } + clockConfig, err := sut.GetEpochConfiguration() + require.NotNil(t, clockConfig) + require.NoError(t, err) + require.Equal(t, ClockConfiguration{ + EpochDuration: 1, + GenesisBlock: 1, + }, *clockConfig) +} diff --git a/agglayer/errors_test.go b/agglayer/errors_test.go new file mode 100644 index 00000000..3ca7b7ed --- /dev/null +++ b/agglayer/errors_test.go @@ -0,0 +1,270 @@ +package agglayer + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestErrorVectors(t *testing.T) { + t.Parallel() + + type testCase struct { + TestName string `json:"test_name"` + ExpectedError string `json:"expected_error"` + CertificateHeaderJSON string `json:"certificate_header"` + } + + files, err := filepath.Glob("testdata/*/*.json") + require.NoError(t, err) + + for _, file := range files { + file := file + + t.Run(file, func(t *testing.T) { + t.Parallel() + + data, err := os.ReadFile(file) + require.NoError(t, err) + + var testCases []*testCase + + require.NoError(t, json.Unmarshal(data, &testCases)) + + for _, tc := range testCases { + certificateHeader := &CertificateHeader{} + err = json.Unmarshal([]byte(tc.CertificateHeaderJSON), certificateHeader) + + if tc.ExpectedError == "" { + require.NoError(t, err, "Test: %s not expected any unmarshal error, but got: %v", tc.TestName, err) + require.NotNil(t, certificateHeader.Error, "Test: %s unpacked error is nil", tc.TestName) + fmt.Println(certificateHeader.Error.String()) + } else { + require.ErrorContains(t, err, tc.ExpectedError, "Test: %s expected error: %s. Got: %v", tc.TestName, tc.ExpectedError, err) + } + } + }) + } +} + +func TestConvertMapValue_String(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + data map[string]interface{} + key string + want string + errString string + }{ + { + name: "Key exists and type matches", + data: map[string]interface{}{ + "key1": "value1", + }, + key: "key1", + want: "value1", + }, + { + name: "Key exists but type does not match", + data: map[string]interface{}{ + "key1": 1, + }, + key: "key1", + want: "", + errString: "is not of type", + }, + { + name: "Key does not exist", + data: map[string]interface{}{ + "key1": "value1", + }, + key: "key2", + want: "", + errString: "key key2 not found in map", + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + got, err := convertMapValue[string](tt.data, tt.key) + if tt.errString != "" { + require.ErrorContains(t, err, tt.errString) + } else { + require.Equal(t, tt.want, got) + } + }) + } +} + +//nolint:dupl +func TestConvertMapValue_Uint32(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + data map[string]interface{} + key string + want uint32 + errString string + }{ + { + name: "Key exists and type matches", + data: map[string]interface{}{ + "key1": uint32(123), + }, + key: "key1", + want: uint32(123), + }, + { + name: "Key exists but type does not match", + data: map[string]interface{}{ + "key1": "value1", + }, + key: "key1", + want: 0, + errString: "is not of type", + }, + { + name: "Key does not exist", + data: map[string]interface{}{ + "key1": uint32(123), + }, + key: "key2", + want: 0, + errString: "key key2 not found in map", + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + got, err := convertMapValue[uint32](tt.data, tt.key) + if tt.errString != "" { + require.ErrorContains(t, err, tt.errString) + } else { + require.Equal(t, tt.want, got) + } + }) + } +} + +//nolint:dupl +func TestConvertMapValue_Uint64(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + data map[string]interface{} + key string + want uint64 + errString string + }{ + { + name: "Key exists and type matches", + data: map[string]interface{}{ + "key1": uint64(3411), + }, + key: "key1", + want: uint64(3411), + }, + { + name: "Key exists but type does not match", + data: map[string]interface{}{ + "key1": "not a number", + }, + key: "key1", + want: 0, + errString: "is not of type", + }, + { + name: "Key does not exist", + data: map[string]interface{}{ + "key1": uint64(123555), + }, + key: "key22", + want: 0, + errString: "key key22 not found in map", + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + got, err := convertMapValue[uint64](tt.data, tt.key) + if tt.errString != "" { + require.ErrorContains(t, err, tt.errString) + } else { + require.Equal(t, tt.want, got) + } + }) + } +} + +func TestConvertMapValue_Bool(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + data map[string]interface{} + key string + want bool + errString string + }{ + { + name: "Key exists and type matches", + data: map[string]interface{}{ + "key1": true, + }, + key: "key1", + want: true, + }, + { + name: "Key exists but type does not match", + data: map[string]interface{}{ + "key1": "value1", + }, + key: "key1", + want: false, + errString: "is not of type", + }, + { + name: "Key does not exist", + data: map[string]interface{}{ + "key1": true, + }, + key: "key2", + want: false, + errString: "key key2 not found in map", + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + got, err := convertMapValue[bool](tt.data, tt.key) + if tt.errString != "" { + require.ErrorContains(t, err, tt.errString) + } else { + require.Equal(t, tt.want, got) + } + }) + } +} diff --git a/agglayer/mock_agglayer_client.go b/agglayer/mock_agglayer_client.go index 43100a2e..1b756713 100644 --- a/agglayer/mock_agglayer_client.go +++ b/agglayer/mock_agglayer_client.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.45.0. DO NOT EDIT. +// Code generated by mockery v2.39.0. DO NOT EDIT. package agglayer @@ -45,6 +45,36 @@ func (_m *AgglayerClientMock) GetCertificateHeader(certificateHash common.Hash) return r0, r1 } +// GetEpochConfiguration provides a mock function with given fields: +func (_m *AgglayerClientMock) GetEpochConfiguration() (*ClockConfiguration, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetEpochConfiguration") + } + + var r0 *ClockConfiguration + var r1 error + if rf, ok := ret.Get(0).(func() (*ClockConfiguration, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() *ClockConfiguration); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*ClockConfiguration) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // SendCertificate provides a mock function with given fields: certificate func (_m *AgglayerClientMock) SendCertificate(certificate *SignedCertificate) (common.Hash, error) { ret := _m.Called(certificate) diff --git a/agglayer/proof_generation_error.go b/agglayer/proof_generation_error.go new file mode 100644 index 00000000..fa7012f7 --- /dev/null +++ b/agglayer/proof_generation_error.go @@ -0,0 +1,657 @@ +package agglayer + +import ( + "errors" + "fmt" + "reflect" + + "github.com/ethereum/go-ethereum/common" +) + +var errNotMap = errors.New("inner error is not a map") + +const ( + InvalidSignerErrorType = "InvalidSigner" + InvalidPreviousLERErrorType = "InvalidPreviousLocalExitRoot" + InvalidPreviousBalanceRootErrorType = "InvalidPreviousBalanceRoot" + InvalidPreviousNullifierRootErrorType = "InvalidPreviousNullifierRoot" + InvalidNewLocalExitRootErrorType = "InvalidNewLocalExitRoot" + InvalidNewBalanceRootErrorType = "InvalidNewBalanceRoot" + InvalidNewNullifierRootErrorType = "InvalidNewNullifierRoot" + InvalidImportedExitsRootErrorType = "InvalidImportedExitsRoot" + MismatchImportedExitsRootErrorType = "MismatchImportedExitsRoot" + InvalidNullifierPathErrorType = "InvalidNullifierPath" + InvalidBalancePathErrorType = "InvalidBalancePath" + BalanceOverflowInBridgeExitErrorType = "BalanceOverflowInBridgeExit" + BalanceUnderflowInBridgeExitErrorType = "BalanceUnderflowInBridgeExit" + CannotExitToSameNetworkErrorType = "CannotExitToSameNetwork" + InvalidMessageOriginNetworkErrorType = "InvalidMessageOriginNetwork" + InvalidL1TokenInfoErrorType = "InvalidL1TokenInfo" + MissingTokenBalanceProofErrorType = "MissingTokenBalanceProof" + DuplicateTokenBalanceProofErrorType = "DuplicateTokenBalanceProof" + InvalidSignatureErrorType = "InvalidSignature" + InvalidImportedBridgeExitErrorType = "InvalidImportedBridgeExit" + UnknownErrorType = "UnknownError" +) + +type PPError interface { + String() string +} + +// ProofGenerationError is a struct that represents an error that occurs when generating a proof. +type ProofGenerationError struct { + GenerationType string + InnerErrors []PPError +} + +// String is the implementation of the Error interface +func (p *ProofGenerationError) String() string { + return fmt.Sprintf("Proof generation error: %s. %s", p.GenerationType, p.InnerErrors) +} + +// Unmarshal unmarshals the data from a map into a ProofGenerationError struct. +func (p *ProofGenerationError) Unmarshal(data interface{}) error { + dataMap, ok := data.(map[string]interface{}) + if !ok { + return errNotMap + } + + generationType, err := convertMapValue[string](dataMap, "generation_type") + if err != nil { + return err + } + + p.GenerationType = generationType + + getPPErrFn := func(key string, value interface{}) (PPError, error) { + switch key { + case InvalidSignerErrorType: + invalidSigner := &InvalidSignerError{} + if err := invalidSigner.UnmarshalFromMap(value); err != nil { + return nil, err + } + return invalidSigner, nil + case InvalidPreviousLERErrorType: + invalidPreviousLER := NewInvalidPreviousLocalExitRoot() + if err := invalidPreviousLER.UnmarshalFromMap(value); err != nil { + return nil, err + } + p.InnerErrors = append(p.InnerErrors, invalidPreviousLER) + case InvalidPreviousBalanceRootErrorType: + invalidPreviousBalanceRoot := NewInvalidPreviousBalanceRoot() + if err := invalidPreviousBalanceRoot.UnmarshalFromMap(value); err != nil { + return nil, err + } + p.InnerErrors = append(p.InnerErrors, invalidPreviousBalanceRoot) + case InvalidPreviousNullifierRootErrorType: + invalidPreviousNullifierRoot := NewInvalidPreviousNullifierRoot() + if err := invalidPreviousNullifierRoot.UnmarshalFromMap(value); err != nil { + return nil, err + } + p.InnerErrors = append(p.InnerErrors, invalidPreviousNullifierRoot) + case InvalidNewLocalExitRootErrorType: + invalidNewLocalExitRoot := NewInvalidNewLocalExitRoot() + if err := invalidNewLocalExitRoot.UnmarshalFromMap(value); err != nil { + return nil, err + } + p.InnerErrors = append(p.InnerErrors, invalidNewLocalExitRoot) + case InvalidNewBalanceRootErrorType: + invalidNewBalanceRoot := NewInvalidNewBalanceRoot() + if err := invalidNewBalanceRoot.UnmarshalFromMap(value); err != nil { + return nil, err + } + p.InnerErrors = append(p.InnerErrors, invalidNewBalanceRoot) + case InvalidNewNullifierRootErrorType: + invalidNewNullifierRoot := NewInvalidNewNullifierRoot() + if err := invalidNewNullifierRoot.UnmarshalFromMap(value); err != nil { + return nil, err + } + p.InnerErrors = append(p.InnerErrors, invalidNewNullifierRoot) + case InvalidImportedExitsRootErrorType: + invalidImportedExitsRoot := NewInvalidImportedExitsRoot() + if err := invalidImportedExitsRoot.UnmarshalFromMap(value); err != nil { + return nil, err + } + p.InnerErrors = append(p.InnerErrors, invalidImportedExitsRoot) + case MismatchImportedExitsRootErrorType: + p.InnerErrors = append(p.InnerErrors, &MismatchImportedExitsRoot{}) + case InvalidNullifierPathErrorType: + p.InnerErrors = append(p.InnerErrors, &InvalidNullifierPath{}) + case InvalidBalancePathErrorType: + p.InnerErrors = append(p.InnerErrors, &InvalidBalancePath{}) + case BalanceOverflowInBridgeExitErrorType: + p.InnerErrors = append(p.InnerErrors, &BalanceOverflowInBridgeExit{}) + case BalanceUnderflowInBridgeExitErrorType: + p.InnerErrors = append(p.InnerErrors, &BalanceUnderflowInBridgeExit{}) + case CannotExitToSameNetworkErrorType: + p.InnerErrors = append(p.InnerErrors, &CannotExitToSameNetwork{}) + case InvalidMessageOriginNetworkErrorType: + p.InnerErrors = append(p.InnerErrors, &InvalidMessageOriginNetwork{}) + case InvalidL1TokenInfoErrorType: + invalidL1TokenInfo := NewInvalidL1TokenInfo() + if err := invalidL1TokenInfo.UnmarshalFromMap(value); err != nil { + return nil, err + } + p.InnerErrors = append(p.InnerErrors, invalidL1TokenInfo) + case MissingTokenBalanceProofErrorType: + missingTokenBalanceProof := NewMissingTokenBalanceProof() + if err := missingTokenBalanceProof.UnmarshalFromMap(value); err != nil { + return nil, err + } + p.InnerErrors = append(p.InnerErrors, missingTokenBalanceProof) + case DuplicateTokenBalanceProofErrorType: + duplicateTokenBalanceProof := NewDuplicateTokenBalanceProof() + if err := duplicateTokenBalanceProof.UnmarshalFromMap(value); err != nil { + return nil, err + } + p.InnerErrors = append(p.InnerErrors, duplicateTokenBalanceProof) + case InvalidSignatureErrorType: + p.InnerErrors = append(p.InnerErrors, &InvalidSignature{}) + case InvalidImportedBridgeExitErrorType: + invalidImportedBridgeExit := &InvalidImportedBridgeExit{} + if err := invalidImportedBridgeExit.UnmarshalFromMap(value); err != nil { + return nil, err + } + p.InnerErrors = append(p.InnerErrors, invalidImportedBridgeExit) + case UnknownErrorType: + p.InnerErrors = append(p.InnerErrors, &UnknownError{}) + default: + return nil, fmt.Errorf("unknown proof generation error type: %s", key) + } + + return nil, nil + } + + errorSourceMap, err := convertMapValue[map[string]interface{}](dataMap, "source") + if err != nil { + // it can be a single error + errSourceString, err := convertMapValue[string](dataMap, "source") + if err != nil { + return err + } + + ppErr, err := getPPErrFn(errSourceString, nil) + if err != nil { + return err + } + + if ppErr != nil { + p.InnerErrors = append(p.InnerErrors, ppErr) + } + + return nil + } + + // there will always be only one key in the source map + for key, value := range errorSourceMap { + ppErr, err := getPPErrFn(key, value) + if err != nil { + return err + } + + if ppErr != nil { + p.InnerErrors = append(p.InnerErrors, ppErr) + } + } + + return nil +} + +// InvalidSignerError is a struct that represents an error that occurs when +// the signer of the certificate is invalid, or the hash that was signed was not valid. +type InvalidSignerError struct { + Declared common.Address `json:"declared"` + Recovered common.Address `json:"recovered"` +} + +// String is the implementation of the Error interface +func (e *InvalidSignerError) String() string { + return fmt.Sprintf("%s. Declared: %s, Computed: %s", + InvalidSignerErrorType, e.Declared.String(), e.Recovered.String()) +} + +func (e *InvalidSignerError) UnmarshalFromMap(data interface{}) error { + dataMap, ok := data.(map[string]interface{}) + if !ok { + return errNotMap + } + + declared, err := convertMapValue[string](dataMap, "declared") + if err != nil { + return err + } + + recovered, err := convertMapValue[string](dataMap, "recovered") + if err != nil { + return err + } + + e.Declared = common.HexToAddress(declared) + e.Recovered = common.HexToAddress(recovered) + + return nil +} + +// DeclaredComputedError is a base struct for errors that have both declared and computed values. +type DeclaredComputedError struct { + Declared common.Hash `json:"declared"` + Computed common.Hash `json:"computed"` + ErrType string +} + +// String is the implementation of the Error interface +func (e *DeclaredComputedError) String() string { + return fmt.Sprintf("%s. Declared: %s, Computed: %s", + e.ErrType, e.Declared.String(), e.Computed.String()) +} + +// UnmarshalFromMap is the implementation of the Error interface +func (e *DeclaredComputedError) UnmarshalFromMap(data interface{}) error { + dataMap, ok := data.(map[string]interface{}) + if !ok { + return errNotMap + } + + declared, err := convertMapValue[string](dataMap, "declared") + if err != nil { + return err + } + + computed, err := convertMapValue[string](dataMap, "computed") + if err != nil { + return err + } + + e.Declared = common.HexToHash(declared) + e.Computed = common.HexToHash(computed) + + return nil +} + +// InvalidPreviousLocalExitRoot is a struct that represents an error that occurs when +// the previous local exit root is invalid. +type InvalidPreviousLocalExitRoot struct { + *DeclaredComputedError +} + +func NewInvalidPreviousLocalExitRoot() *InvalidPreviousLocalExitRoot { + return &InvalidPreviousLocalExitRoot{ + DeclaredComputedError: &DeclaredComputedError{ErrType: InvalidPreviousLERErrorType}, + } +} + +// InvalidPreviousBalanceRoot is a struct that represents an error that occurs when +// the previous balance root is invalid. +type InvalidPreviousBalanceRoot struct { + *DeclaredComputedError +} + +func NewInvalidPreviousBalanceRoot() *InvalidPreviousBalanceRoot { + return &InvalidPreviousBalanceRoot{ + DeclaredComputedError: &DeclaredComputedError{ErrType: InvalidPreviousBalanceRootErrorType}, + } +} + +// InvalidPreviousNullifierRoot is a struct that represents an error that occurs when +// the previous nullifier root is invalid. +type InvalidPreviousNullifierRoot struct { + *DeclaredComputedError +} + +func NewInvalidPreviousNullifierRoot() *InvalidPreviousNullifierRoot { + return &InvalidPreviousNullifierRoot{ + DeclaredComputedError: &DeclaredComputedError{ErrType: InvalidPreviousNullifierRootErrorType}, + } +} + +// InvalidNewLocalExitRoot is a struct that represents an error that occurs when +// the new local exit root is invalid. +type InvalidNewLocalExitRoot struct { + *DeclaredComputedError +} + +func NewInvalidNewLocalExitRoot() *InvalidNewLocalExitRoot { + return &InvalidNewLocalExitRoot{ + DeclaredComputedError: &DeclaredComputedError{ErrType: InvalidNewLocalExitRootErrorType}, + } +} + +// InvalidNewBalanceRoot is a struct that represents an error that occurs when +// the new balance root is invalid. +type InvalidNewBalanceRoot struct { + *DeclaredComputedError +} + +func NewInvalidNewBalanceRoot() *InvalidNewBalanceRoot { + return &InvalidNewBalanceRoot{ + DeclaredComputedError: &DeclaredComputedError{ErrType: InvalidNewBalanceRootErrorType}, + } +} + +// InvalidNewNullifierRoot is a struct that represents an error that occurs when +// the new nullifier root is invalid. +type InvalidNewNullifierRoot struct { + *DeclaredComputedError +} + +func NewInvalidNewNullifierRoot() *InvalidNewNullifierRoot { + return &InvalidNewNullifierRoot{ + DeclaredComputedError: &DeclaredComputedError{ErrType: InvalidNewNullifierRootErrorType}, + } +} + +// InvalidImportedExitsRoot is a struct that represents an error that occurs when +// the imported exits root is invalid. +type InvalidImportedExitsRoot struct { + *DeclaredComputedError +} + +func NewInvalidImportedExitsRoot() *InvalidImportedExitsRoot { + return &InvalidImportedExitsRoot{ + DeclaredComputedError: &DeclaredComputedError{ErrType: InvalidImportedExitsRootErrorType}, + } +} + +// MismatchImportedExitsRoot is a struct that represents an error that occurs when +// the commitment to the list of imported bridge exits but the list of imported bridge exits is empty. +type MismatchImportedExitsRoot struct{} + +// String is the implementation of the Error interface +func (e *MismatchImportedExitsRoot) String() string { + return fmt.Sprintf(`%s: The commitment to the list of imported bridge exits + should be Some if and only if this list is non-empty, should be None otherwise.`, + MismatchImportedExitsRootErrorType) +} + +// InvalidNullifierPath is a struct that represents an error that occurs when +// the provided nullifier path is invalid. +type InvalidNullifierPath struct{} + +// String is the implementation of the Error interface +func (e *InvalidNullifierPath) String() string { + return fmt.Sprintf("%s: The provided nullifier path is invalid", InvalidNullifierPathErrorType) +} + +// InvalidBalancePath is a struct that represents an error that occurs when +// the provided balance path is invalid. +type InvalidBalancePath struct{} + +// String is the implementation of the Error interface +func (e *InvalidBalancePath) String() string { + return fmt.Sprintf("%s: The provided balance path is invalid", InvalidBalancePathErrorType) +} + +// BalanceOverflowInBridgeExit is a struct that represents an error that occurs when +// bridge exit led to balance overflow. +type BalanceOverflowInBridgeExit struct{} + +// String is the implementation of the Error interface +func (e *BalanceOverflowInBridgeExit) String() string { + return fmt.Sprintf("%s: The imported bridge exit led to balance overflow.", BalanceOverflowInBridgeExitErrorType) +} + +// BalanceUnderflowInBridgeExit is a struct that represents an error that occurs when +// bridge exit led to balance underflow. +type BalanceUnderflowInBridgeExit struct{} + +// String is the implementation of the Error interface +func (e *BalanceUnderflowInBridgeExit) String() string { + return fmt.Sprintf("%s: The imported bridge exit led to balance underflow.", BalanceUnderflowInBridgeExitErrorType) +} + +// CannotExitToSameNetwork is a struct that represents an error that occurs when +// the user tries to exit to the same network. +type CannotExitToSameNetwork struct{} + +// String is the implementation of the Error interface +func (e *CannotExitToSameNetwork) String() string { + return fmt.Sprintf("%s: The provided bridge exit goes to the sender’s own network which is not permitted.", + CannotExitToSameNetworkErrorType) +} + +// InvalidMessageOriginNetwork is a struct that represents an error that occurs when +// the origin network of the message is invalid. +type InvalidMessageOriginNetwork struct{} + +// String is the implementation of the Error interface +func (e *InvalidMessageOriginNetwork) String() string { + return fmt.Sprintf("%s: The origin network of the message is invalid.", InvalidMessageOriginNetworkErrorType) +} + +// TokenInfoError is a struct inherited by other errors that have a TokenInfo field. +type TokenInfoError struct { + TokenInfo *TokenInfo `json:"token_info"` + isNested bool +} + +func (e *TokenInfoError) UnmarshalFromMap(data interface{}) error { + dataMap, ok := data.(map[string]interface{}) + if !ok { + return errNotMap + } + + var ( + err error + tokenInfoMap map[string]interface{} + ) + + if e.isNested { + tokenInfoMap, err = convertMapValue[map[string]interface{}](dataMap, "TokenInfo") + if err != nil { + return err + } + } else { + tokenInfoMap = dataMap + } + + originNetwork, err := convertMapValue[uint32](tokenInfoMap, "origin_network") + if err != nil { + return err + } + + originAddress, err := convertMapValue[string](tokenInfoMap, "origin_token_address") + if err != nil { + return err + } + + e.TokenInfo = &TokenInfo{ + OriginNetwork: originNetwork, + OriginTokenAddress: common.HexToAddress(originAddress), + } + + return nil +} + +// InvalidL1TokenInfo is a struct that represents an error that occurs when +// the L1 token info is invalid. +type InvalidL1TokenInfo struct { + *TokenInfoError +} + +// NewInvalidL1TokenInfo returns a new instance of InvalidL1TokenInfo. +func NewInvalidL1TokenInfo() *InvalidL1TokenInfo { + return &InvalidL1TokenInfo{ + TokenInfoError: &TokenInfoError{isNested: true}, + } +} + +// String is the implementation of the Error interface +func (e *InvalidL1TokenInfo) String() string { + return fmt.Sprintf("%s: The L1 token info is invalid. %s", + InvalidL1TokenInfoErrorType, e.TokenInfo.String()) +} + +// MissingTokenBalanceProof is a struct that represents an error that occurs when +// the token balance proof is missing. +type MissingTokenBalanceProof struct { + *TokenInfoError +} + +// NewMissingTokenBalanceProof returns a new instance of MissingTokenBalanceProof. +func NewMissingTokenBalanceProof() *MissingTokenBalanceProof { + return &MissingTokenBalanceProof{ + TokenInfoError: &TokenInfoError{isNested: true}, + } +} + +// String is the implementation of the Error interface +func (e *MissingTokenBalanceProof) String() string { + return fmt.Sprintf("%s: The provided token is missing a balance proof. %s", + MissingTokenBalanceProofErrorType, e.TokenInfo.String()) +} + +// DuplicateTokenBalanceProof is a struct that represents an error that occurs when +// the token balance proof is duplicated. +type DuplicateTokenBalanceProof struct { + *TokenInfoError +} + +// NewDuplicateTokenBalanceProof returns a new instance of DuplicateTokenBalanceProof. +func NewDuplicateTokenBalanceProof() *DuplicateTokenBalanceProof { + return &DuplicateTokenBalanceProof{ + TokenInfoError: &TokenInfoError{isNested: true}, + } +} + +// String is the implementation of the Error interface +func (e *DuplicateTokenBalanceProof) String() string { + return fmt.Sprintf("%s: The provided token comes with multiple balance proofs. %s", + DuplicateTokenBalanceProofErrorType, e.TokenInfo.String()) +} + +// InvalidSignature is a struct that represents an error that occurs when +// the signature is invalid. +type InvalidSignature struct{} + +// String is the implementation of the Error interface +func (e *InvalidSignature) String() string { + return fmt.Sprintf("%s: The provided signature is invalid.", InvalidSignatureErrorType) +} + +// UnknownError is a struct that represents an error that occurs when +// an unknown error is encountered. +type UnknownError struct{} + +// String is the implementation of the Error interface +func (e *UnknownError) String() string { + return fmt.Sprintf("%s: An unknown error occurred.", UnknownErrorType) +} + +// InvalidImportedBridgeExit is a struct that represents an error that occurs when +// the imported bridge exit is invalid. +type InvalidImportedBridgeExit struct { + GlobalIndex *GlobalIndex `json:"global_index"` + ErrorType string `json:"error_type"` +} + +// String is the implementation of the Error interface +func (e *InvalidImportedBridgeExit) String() string { + var errorDescription string + switch e.ErrorType { + case "MismatchGlobalIndexInclusionProof": + errorDescription = "The global index and the inclusion proof do not both correspond " + + "to the same network type: mainnet or rollup." + case "MismatchL1Root": + errorDescription = "The provided L1 info root does not match the one provided in the inclusion proof." + case "MismatchMER": + errorDescription = "The provided MER does not match the one provided in the inclusion proof." + case "MismatchRER": + errorDescription = "The provided RER does not match the one provided in the inclusion proof." + case "InvalidMerklePathLeafToLER": + errorDescription = "The inclusion proof from the leaf to the LER is invalid." + case "InvalidMerklePathLERToRER": + errorDescription = "The inclusion proof from the LER to the RER is invalid." + case "InvalidMerklePathGERToL1Root": + errorDescription = "The inclusion proof from the GER to the L1 info Root is invalid." + case "InvalidExitNetwork": + errorDescription = "The provided imported bridge exit does not target the right destination network." + default: + errorDescription = "An unknown error occurred." + } + + return fmt.Sprintf("%s: Global index: %s. Error type: %s. %s", + InvalidImportedBridgeExitErrorType, e.GlobalIndex.String(), e.ErrorType, errorDescription) +} + +func (e *InvalidImportedBridgeExit) UnmarshalFromMap(data interface{}) error { + dataMap, ok := data.(map[string]interface{}) + if !ok { + return errNotMap + } + + sourceErr, err := convertMapValue[string](dataMap, "source") + if err != nil { + return err + } + + e.ErrorType = sourceErr + + globalIndexMap, err := convertMapValue[map[string]interface{}](dataMap, "global_index") + if err != nil { + return err + } + + e.GlobalIndex = &GlobalIndex{} + return e.GlobalIndex.UnmarshalFromMap(globalIndexMap) +} + +// convertMapValue converts the value of a key in a map to a target type. +func convertMapValue[T any](data map[string]interface{}, key string) (T, error) { + value, ok := data[key] + if !ok { + var zero T + return zero, fmt.Errorf("key %s not found in map", key) + } + + // Try a direct type assertion + if convertedValue, ok := value.(T); ok { + return convertedValue, nil + } + + // If direct assertion fails, handle numeric type conversions + var target T + targetType := reflect.TypeOf(target) + + // Check if value is a float64 (default JSON number type) and target is a numeric type + if floatValue, ok := value.(float64); ok && targetType.Kind() >= reflect.Int && targetType.Kind() <= reflect.Uint64 { + convertedValue, err := convertNumeric(floatValue, targetType) + if err != nil { + return target, fmt.Errorf("conversion error for key %s: %w", key, err) + } + return convertedValue.(T), nil //nolint:forcetypeassert + } + + return target, fmt.Errorf("value of key %s is not of type %T", key, target) +} + +// convertNumeric converts a float64 to the specified numeric type. +func convertNumeric(value float64, targetType reflect.Type) (interface{}, error) { + switch targetType.Kind() { + case reflect.Int: + return int(value), nil + case reflect.Int8: + return int8(value), nil + case reflect.Int16: + return int16(value), nil + case reflect.Int32: + return int32(value), nil + case reflect.Int64: + return int64(value), nil + case reflect.Uint: + return uint(value), nil + case reflect.Uint8: + return uint8(value), nil + case reflect.Uint16: + return uint16(value), nil + case reflect.Uint32: + return uint32(value), nil + case reflect.Uint64: + return uint64(value), nil + case reflect.Float32: + return float32(value), nil + case reflect.Float64: + return value, nil + default: + return nil, errors.New("unsupported target type") + } +} diff --git a/agglayer/proof_verification_error.go b/agglayer/proof_verification_error.go new file mode 100644 index 00000000..dd5c5f74 --- /dev/null +++ b/agglayer/proof_verification_error.go @@ -0,0 +1,164 @@ +package agglayer + +import "fmt" + +const ( + VersionMismatchErrorType = "VersionMismatch" + CoreErrorType = "Core" + RecursionErrorType = "Recursion" + PlankErrorType = "Plank" + Groth16ErrorType = "Groth16" + InvalidPublicValuesErrorType = "InvalidPublicValues" +) + +// ProofVerificationError is an error that is returned when verifying a proof +type ProofVerificationError struct { + InnerErrors []PPError +} + +// String is the implementation of the Error interface +func (p *ProofVerificationError) String() string { + return fmt.Sprintf("Proof verification error: %v", p.InnerErrors) +} + +// Unmarshal unmarshals the data from a map into a ProofVerificationError struct. +func (p *ProofVerificationError) Unmarshal(data interface{}) error { + getPPErrFn := func(key string, value interface{}) (PPError, error) { + switch key { + case VersionMismatchErrorType: + versionMismatch := &VersionMismatch{} + if err := versionMismatch.Unmarshal(value); err != nil { + return nil, err + } + return versionMismatch, nil + case CoreErrorType: + core := &Core{} + if err := core.Unmarshal(value); err != nil { + return nil, err + } + return core, nil + case RecursionErrorType: + recursion := &Recursion{} + if err := recursion.Unmarshal(value); err != nil { + return nil, err + } + return recursion, nil + case PlankErrorType: + plank := &Plank{} + if err := plank.Unmarshal(value); err != nil { + return nil, err + } + return plank, nil + case Groth16ErrorType: + groth16 := &Groth16{} + if err := groth16.Unmarshal(value); err != nil { + return nil, err + } + return groth16, nil + case InvalidPublicValuesErrorType: + return &InvalidPublicValues{}, nil + default: + return nil, fmt.Errorf("unknown proof verification error type: %v", key) + } + } + + getAndAddInnerErrorFn := func(key string, value interface{}) error { + ppErr, err := getPPErrFn(key, value) + if err != nil { + return err + } + + if ppErr != nil { + p.InnerErrors = append(p.InnerErrors, ppErr) + } + + return nil + } + + dataMap, ok := data.(map[string]interface{}) + if !ok { + // it can be a single error + return getAndAddInnerErrorFn(data.(string), nil) //nolint:forcetypeassert + } + + for key, value := range dataMap { + if err := getAndAddInnerErrorFn(key, value); err != nil { + return err + } + } + + return nil +} + +// StringError is an error that is inherited by other errors that expect a string +// field in the data. +type StringError string + +// Unmarshal unmarshals the data from an interface{} into a StringError. +func (e *StringError) Unmarshal(data interface{}) error { + str, ok := data.(string) + if !ok { + return fmt.Errorf("expected string for StringError, got %T", data) + } + *e = StringError(str) + return nil +} + +// VersionMismatch is an error that is returned when the version of the proof is +// different from the version of the core. +type VersionMismatch struct { + StringError +} + +// String is the implementation of the Error interface +func (e *VersionMismatch) String() string { + return fmt.Sprintf("%s: %s", VersionMismatchErrorType, e.StringError) +} + +// Core is an error that is returned when the core machine verification fails. +type Core struct { + StringError +} + +// String is the implementation of the Error interface +func (e *Core) String() string { + return fmt.Sprintf("%s: Core machine verification error: %s", CoreErrorType, e.StringError) +} + +// Recursion is an error that is returned when the recursion verification fails. +type Recursion struct { + StringError +} + +// String is the implementation of the Error interface +func (e *Recursion) String() string { + return fmt.Sprintf("%s: Recursion verification error: %s", RecursionErrorType, e.StringError) +} + +// Plank is an error that is returned when the plank verification fails. +type Plank struct { + StringError +} + +// String is the implementation of the Error interface +func (e *Plank) String() string { + return fmt.Sprintf("%s: Plank verification error: %s", PlankErrorType, e.StringError) +} + +// Groth16 is an error that is returned when the Groth16 verification fails. +type Groth16 struct { + StringError +} + +// String is the implementation of the Error interface +func (e *Groth16) String() string { + return fmt.Sprintf("%s: Groth16 verification error: %s", Groth16ErrorType, e.StringError) +} + +// InvalidPublicValues is an error that is returned when the public values are invalid. +type InvalidPublicValues struct{} + +// String is the implementation of the Error interface +func (e *InvalidPublicValues) String() string { + return fmt.Sprintf("%s: Invalid public values", InvalidPublicValuesErrorType) +} diff --git a/agglayer/testdata/proof_generation_errors/errors_with_declared_computed_data.json b/agglayer/testdata/proof_generation_errors/errors_with_declared_computed_data.json new file mode 100644 index 00000000..4b1b4029 --- /dev/null +++ b/agglayer/testdata/proof_generation_errors/errors_with_declared_computed_data.json @@ -0,0 +1,45 @@ +[ + { + "test_name": "InvalidImportedExitsRoot", + "certificate_header": "{\"network_id\":14,\"height\":1,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"ProofGenerationError\":{\"generation_type\":\"Native\",\"source\":{\"InvalidImportedExitsRoot\":{\"declared\":\"0x1116837a43bdc3dd9f114558daf4b26ed4eeeeec\",\"computed\":\"0x20222bfbb55589f7fd0bec3666e3de469111ce3c\"}}}}}}}" + }, + { + "test_name": "InvalidNewBalanceRoot", + "certificate_header": "{\"network_id\":11,\"height\":31,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"ProofGenerationError\":{\"generation_type\":\"Native\",\"source\":{\"InvalidNewBalanceRoot\":{\"declared\":\"0x5b06837a43bdc3dd9f114558daf4b26ed4eeeeec\",\"computed\":\"0x20e92bfbb55589f7fd0bec3666e3de469111ce3c\"}}}}}}}" + }, + { + "test_name": "InvalidNewLocalExitRoot", + "certificate_header": "{\"network_id\":3,\"height\":22,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"ProofGenerationError\":{\"generation_type\":\"Native\",\"source\":{\"InvalidNewLocalExitRoot\":{\"declared\":\"0x5b06837a43bdc3dd9f114558daf4b26ed49831ec\",\"computed\":\"0x20e92bfbb55589f7fd0bec3666e3de469525ce3c\"}}}}}}}" + }, + { + "test_name": "InvalidNewNullifierRoot", + "certificate_header": "{\"network_id\":2,\"height\":12,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"ProofGenerationError\":{\"generation_type\":\"Native\",\"source\":{\"InvalidNewNullifierRoot\":{\"declared\":\"0x5b06837a43bdc3dd9f114558daf4b26ed4ccceec\",\"computed\":\"0x20e92bfbb55589f7fd0bec3666e3de222111ce3c\"}}}}}}}" + }, + { + "test_name": "InvalidPreviousBalanceRoot", + "certificate_header": "{\"network_id\":2,\"height\":11,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"ProofGenerationError\":{\"generation_type\":\"Native\",\"source\":{\"InvalidPreviousBalanceRoot\":{\"declared\":\"0x5b06837a43bdc3dd9f114558daf4b26ed49842ec\",\"computed\":\"0x20e92bfbb55589f7fd0bec3666e3de469526de3c\"}}}}}}}" + }, + { + "test_name": "InvalidPreviousLocalExitRoot", + "certificate_header": "{\"network_id\":1,\"height\":0,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"ProofGenerationError\":{\"generation_type\":\"Native\",\"source\":{\"InvalidPreviousLocalExitRoot\":{\"declared\":\"0x5b06837a43bdc3dd9f114558daf4b26ed49842ed\",\"computed\":\"0x20e92bfbb55589f7fd0bec3666e3de469526de3e\"}}}}}}}" + }, + { + "test_name": "InvalidPreviousNullifierRoot", + "certificate_header": "{\"network_id\":21,\"height\":111,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"ProofGenerationError\":{\"generation_type\":\"Native\",\"source\":{\"InvalidPreviousNullifierRoot\":{\"declared\":\"0x5b06837a43bdc3dd9f114558daf4b26ed49842ee\",\"computed\":\"0x20e92bfbb55589f7fd0bec3666e3de469526de3e\"}}}}}}}" + }, + { + "test_name": "InvalidPreviousNullifierRoot_missing_declared", + "expected_error": "key declared not found in map", + "certificate_header": "{\"network_id\":21,\"height\":111,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"ProofGenerationError\":{\"generation_type\":\"Native\",\"source\":{\"InvalidPreviousNullifierRoot\":{\"computed\":\"0x20e92bfbb55589f7fd0bec3666e3de469526de3e\"}}}}}}}" + }, + { + "test_name": "InvalidPreviousNullifierRoot_missing_computed", + "expected_error": "key computed not found in map", + "certificate_header": "{\"network_id\":21,\"height\":111,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"ProofGenerationError\":{\"generation_type\":\"Native\",\"source\":{\"InvalidPreviousNullifierRoot\":{\"declared\":\"0x5b06837a43bdc3dd9f114558daf4b26ed49842ee\"}}}}}}}" + }, + { + "test_name": "InvalidPreviousNullifierRoot_missing_inner_error", + "expected_error": "not a map", + "certificate_header": "{\"network_id\":21,\"height\":111,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"ProofGenerationError\":{\"generation_type\":\"Native\",\"source\":\"InvalidPreviousNullifierRoot\"}}}}}" + } +] \ No newline at end of file diff --git a/agglayer/testdata/proof_generation_errors/errors_with_token_info.json b/agglayer/testdata/proof_generation_errors/errors_with_token_info.json new file mode 100644 index 00000000..6884676a --- /dev/null +++ b/agglayer/testdata/proof_generation_errors/errors_with_token_info.json @@ -0,0 +1,29 @@ +[ + { + "test_name": "InvalidL1TokenInfo", + "certificate_header":"{\"network_id\":1,\"height\":0,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"ProofGenerationError\":{\"generation_type\":\"Native\",\"source\":{\"InvalidL1TokenInfo\":{\"TokenInfo\":{\"origin_network\":1,\"origin_token_address\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa2\"}}}}}}}}" + }, + { + "test_name": "MissingTokenBalanceProof", + "certificate_header": "{\"network_id\":2111,\"height\":1,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"ProofGenerationError\":{\"generation_type\":\"Native\",\"source\":{\"MissingTokenBalanceProof\":{\"TokenInfo\":{\"origin_network\":2111,\"origin_token_address\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa2\"}}}}}}}}" + }, + { + "test_name": "DuplicateTokenBalanceProof", + "certificate_header": "{\"network_id\":100000000,\"height\":18446744073709551615,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"ProofGenerationError\":{\"generation_type\":\"Native\",\"source\":{\"DuplicateTokenBalanceProof\":{\"TokenInfo\":{\"origin_network\":10000000000,\"origin_token_address\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa2\"}}}}}}}}" + }, + { + "test_name": "DuplicateTokenBalanceProof_missing_token_info", + "expected_error": "key TokenInfo not found in map", + "certificate_header": "{\"network_id\":100000000,\"height\":18446744073709551615,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"ProofGenerationError\":{\"generation_type\":\"Native\",\"source\":{\"DuplicateTokenBalanceProof\":{}}}}}}}" + }, + { + "test_name": "DuplicateTokenBalanceProof_missing_origin_network", + "expected_error": "key origin_network not found in map", + "certificate_header": "{\"network_id\":100000000,\"height\":18446744073709551615,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"ProofGenerationError\":{\"generation_type\":\"Native\",\"source\":{\"DuplicateTokenBalanceProof\":{\"TokenInfo\":{\"origin_token_address\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa2\"}}}}}}}}" + }, + { + "test_name": "DuplicateTokenBalanceProof_missing_origin_token_address", + "expected_error": "key origin_token_address not found in map", + "certificate_header": "{\"network_id\":100000000,\"height\":18446744073709551615,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"ProofGenerationError\":{\"generation_type\":\"Native\",\"source\":{\"DuplicateTokenBalanceProof\":{\"TokenInfo\":{\"origin_network\":10000000000}}}}}}}}" + } +] \ No newline at end of file diff --git a/agglayer/testdata/proof_generation_errors/errors_without_inner_data.json b/agglayer/testdata/proof_generation_errors/errors_without_inner_data.json new file mode 100644 index 00000000..87946f16 --- /dev/null +++ b/agglayer/testdata/proof_generation_errors/errors_without_inner_data.json @@ -0,0 +1,38 @@ +[ + { + "test_name": "MismatchImportedExitsRoot", + "certificate_header": "{\"network_id\":14,\"height\":1,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"ProofGenerationError\":{\"generation_type\":\"Native\",\"source\":\"MismatchImportedExitsRoot\"}}}}}" + }, + { + "test_name": "InvalidNullifierPath", + "certificate_header": "{\"network_id\":15,\"height\":2,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"ProofGenerationError\":{\"generation_type\":\"Native\",\"source\":\"InvalidNullifierPath\"}}}}}" + }, + { + "test_name": "InvalidBalancePath", + "certificate_header": "{\"network_id\":16,\"height\":3,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"ProofGenerationError\":{\"generation_type\":\"Native\",\"source\":\"InvalidBalancePath\"}}}}}" + }, + { + "test_name": "BalanceOverflowInBridgeExit", + "certificate_header": "{\"network_id\":17,\"height\":4,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"ProofGenerationError\":{\"generation_type\":\"Native\",\"source\":\"BalanceOverflowInBridgeExit\"}}}}}" + }, + { + "test_name": "BalanceUnderflowInBridgeExit", + "certificate_header": "{\"network_id\":18,\"height\":5,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"ProofGenerationError\":{\"generation_type\":\"Native\",\"source\":\"BalanceUnderflowInBridgeExit\"}}}}}" + }, + { + "test_name": "CannotExitToSameNetwork", + "certificate_header": "{\"network_id\":19,\"height\":6,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"ProofGenerationError\":{\"generation_type\":\"Native\",\"source\":\"CannotExitToSameNetwork\"}}}}}" + }, + { + "test_name": "InvalidMessageOriginNetwork", + "certificate_header": "{\"network_id\":20,\"height\":7,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"ProofGenerationError\":{\"generation_type\":\"Native\",\"source\":\"InvalidMessageOriginNetwork\"}}}}}" + }, + { + "test_name": "UnknownError", + "certificate_header": "{\"network_id\":21,\"height\":8,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"ProofGenerationError\":{\"generation_type\":\"Native\",\"source\":\"UnknownError\"}}}}}" + }, + { + "test_name": "InvalidSignature", + "certificate_header": "{\"network_id\":22,\"height\":9,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"ProofGenerationError\":{\"generation_type\":\"Native\",\"source\":\"InvalidSignature\"}}}}}" + } +] \ No newline at end of file diff --git a/agglayer/testdata/proof_generation_errors/invalid_imported_bridge_exit_errors.json b/agglayer/testdata/proof_generation_errors/invalid_imported_bridge_exit_errors.json new file mode 100644 index 00000000..dc6b8cad --- /dev/null +++ b/agglayer/testdata/proof_generation_errors/invalid_imported_bridge_exit_errors.json @@ -0,0 +1,48 @@ +[ + { + "test_name": "MismatchGlobalIndexInclusionProof", + "certificate_header": "{\"network_id\":1,\"height\":0,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"ProofGenerationError\":{\"generation_type\":\"Native\",\"source\":{\"InvalidImportedBridgeExit\":{\"source\":\"MismatchGlobalIndexInclusionProof\",\"global_index\":{\"mainnet_flag\":true,\"rollup_index\":0,\"leaf_index\":1}}}}}}}}" + }, + { + "test_name": "MismatchL1Root", + "certificate_header": "{\"network_id\":1,\"height\":1,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"ProofGenerationError\":{\"generation_type\":\"Native\",\"source\":{\"InvalidImportedBridgeExit\":{\"source\":\"MismatchL1Root\",\"global_index\":{\"mainnet_flag\":true,\"rollup_index\":0,\"leaf_index\":2}}}}}}}}" + }, + { + "test_name": "MismatchMER", + "certificate_header": "{\"network_id\":1,\"height\":2,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"ProofGenerationError\":{\"generation_type\":\"Native\",\"source\":{\"InvalidImportedBridgeExit\":{\"source\":\"MismatchMER\",\"global_index\":{\"mainnet_flag\":true,\"rollup_index\":0,\"leaf_index\":3}}}}}}}}" + }, + { + "test_name": "MismatchRER", + "certificate_header": "{\"network_id\":1,\"height\":3,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"ProofGenerationError\":{\"generation_type\":\"Native\",\"source\":{\"InvalidImportedBridgeExit\":{\"source\":\"MismatchRER\",\"global_index\":{\"mainnet_flag\":false,\"rollup_index\":1,\"leaf_index\":4}}}}}}}}" + }, + { + "test_name": "InvalidMerklePathLeafToLER", + "certificate_header": "{\"network_id\":1,\"height\":4,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"ProofGenerationError\":{\"generation_type\":\"Native\",\"source\":{\"InvalidImportedBridgeExit\":{\"source\":\"InvalidMerklePathLeafToLER\",\"global_index\":{\"mainnet_flag\":true,\"rollup_index\":0,\"leaf_index\":5}}}}}}}}" + }, + { + "test_name": "InvalidMerklePathLERToRER", + "certificate_header": "{\"network_id\":1,\"height\":5,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"ProofGenerationError\":{\"generation_type\":\"Native\",\"source\":{\"InvalidImportedBridgeExit\":{\"source\":\"InvalidMerklePathLERToRER\",\"global_index\":{\"mainnet_flag\":false,\"rollup_index\":2,\"leaf_index\":6}}}}}}}}" + }, + { + "test_name": "InvalidMerklePathGERToL1Root", + "certificate_header": "{\"network_id\":1,\"height\":6,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"ProofGenerationError\":{\"generation_type\":\"Native\",\"source\":{\"InvalidImportedBridgeExit\":{\"source\":\"InvalidMerklePathGERToL1Root\",\"global_index\":{\"mainnet_flag\":true,\"rollup_index\":0,\"leaf_index\":7}}}}}}}}" + }, + { + "test_name": "InvalidExitNetwork", + "certificate_header": "{\"network_id\":1,\"height\":7,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"ProofGenerationError\":{\"generation_type\":\"Native\",\"source\":{\"InvalidImportedBridgeExit\":{\"source\":\"InvalidExitNetwork\",\"global_index\":{\"mainnet_flag\":false,\"rollup_index\":1,\"leaf_index\":8}}}}}}}}" + }, + { + "test_name": "InvalidExitNetwork_missing_source", + "expected_error": "key source not found in map", + "certificate_header": "{\"network_id\":1,\"height\":7,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"ProofGenerationError\":{\"generation_type\":\"Native\",\"source\":{\"InvalidImportedBridgeExit\":{\"global_index\":{\"mainnet_flag\":false,\"rollup_index\":1,\"leaf_index\":8}}}}}}}}" + }, + { + "test_name": "InvalidExitNetwork_missing_global_index", + "expected_error": "key global_index not found in map", + "certificate_header": "{\"network_id\":1,\"height\":7,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"ProofGenerationError\":{\"generation_type\":\"Native\",\"source\":{\"InvalidImportedBridgeExit\":{\"source\":\"InvalidExitNetwork\"}}}}}}}" + }, + { + "test_name": "UnknownError", + "certificate_header": "{\"network_id\":1,\"height\":7,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"ProofGenerationError\":{\"generation_type\":\"Native\",\"source\":{\"InvalidImportedBridgeExit\":{\"source\":\"UnknownError\",\"global_index\":{\"mainnet_flag\":false,\"rollup_index\":1,\"leaf_index\":8}}}}}}}}" + } +] \ No newline at end of file diff --git a/agglayer/testdata/proof_generation_errors/invalid_signer_error.json b/agglayer/testdata/proof_generation_errors/invalid_signer_error.json new file mode 100644 index 00000000..62c5578c --- /dev/null +++ b/agglayer/testdata/proof_generation_errors/invalid_signer_error.json @@ -0,0 +1,21 @@ +[ + { + "test_name": "InvalidSignerError", + "certificate_header": "{\"network_id\":1,\"height\":0,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"ProofGenerationError\":{\"generation_type\":\"Native\",\"source\":{\"InvalidSigner\":{\"declared\":\"0x5b06837a43bdc3dd9f114558daf4b26ed49842ed\",\"recovered\":\"0x20e92bfbb55589f7fd0bec3666e3de469526de3e\"}}}}}}}" + }, + { + "test_name": "InvalidSignerError_missing_declared", + "expected_error": "key declared not found in map", + "certificate_header": "{\"network_id\":1,\"height\":0,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"ProofGenerationError\":{\"generation_type\":\"Native\",\"source\":{\"InvalidSigner\":{\"recovered\":\"0x20e92bfbb55589f7fd0bec3666e3de469526de3e\"}}}}}}}" + }, + { + "test_name": "InvalidSignerError_missing_recovered", + "expected_error": "key recovered not found in map", + "certificate_header": "{\"network_id\":1,\"height\":0,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"ProofGenerationError\":{\"generation_type\":\"Native\",\"source\":{\"InvalidSigner\":{\"declared\":\"0x5b06837a43bdc3dd9f114558daf4b26ed49842ed\"}}}}}}}" + }, + { + "test_name": "InvalidSignerError_missing_inner_error", + "expected_error": "not a map", + "certificate_header": "{\"network_id\":1,\"height\":0,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"ProofGenerationError\":{\"generation_type\":\"Native\",\"source\":\"InvalidSigner\"}}}}}" + } +] \ No newline at end of file diff --git a/agglayer/testdata/proof_generation_errors/random_unmarshal_errors.json b/agglayer/testdata/proof_generation_errors/random_unmarshal_errors.json new file mode 100644 index 00000000..680370e2 --- /dev/null +++ b/agglayer/testdata/proof_generation_errors/random_unmarshal_errors.json @@ -0,0 +1,12 @@ +[ + { + "test_name": "missing_proof_generation_type", + "certificate_header": "{\"network_id\":14,\"height\":1,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"ProofGenerationError\":{\"source\":{\"InvalidImportedExitsRoot\":{\"declared\":\"0x1116837a43bdc3dd9f114558daf4b26ed4eeeeec\",\"computed\":\"0x20222bfbb55589f7fd0bec3666e3de469111ce3c\"}}}}}}}", + "expected_error": "key generation_type not found in map" + }, + { + "test_name": "missing_source", + "certificate_header": "{\"network_id\":14,\"height\":1,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"ProofGenerationError\":{\"generation_type\":\"Native\",\"unknown\":{\"InvalidImportedExitsRoot\":{\"declared\":\"0x1116837a43bdc3dd9f114558daf4b26ed4eeeeec\",\"computed\":\"0x20222bfbb55589f7fd0bec3666e3de469111ce3c\"}}}}}}}", + "expected_error": "key source not found in map" + } +] \ No newline at end of file diff --git a/agglayer/testdata/proof_verification_errors/errors_with_inner_data.json b/agglayer/testdata/proof_verification_errors/errors_with_inner_data.json new file mode 100644 index 00000000..2060d2ee --- /dev/null +++ b/agglayer/testdata/proof_verification_errors/errors_with_inner_data.json @@ -0,0 +1,22 @@ +[ + { + "test_name": "VersionMismatch", + "certificate_header": "{\"network_id\":1,\"height\":4,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"ProofVerificationError\":{\"VersionMismatch\":\"version1-1\"}}}}}" + }, + { + "test_name": "Core", + "certificate_header": "{\"network_id\":1,\"height\":4,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"ProofVerificationError\":{\"Core\":\"coreexample\"}}}}}" + }, + { + "test_name": "Recursion", + "certificate_header": "{\"network_id\":1,\"height\":4,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"ProofVerificationError\":{\"Recursion\":\"recursion error\"}}}}}" + }, + { + "test_name": "Plank", + "certificate_header": "{\"network_id\":1,\"height\":4,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"ProofVerificationError\":{\"Plank\":\"plank error\"}}}}}" + }, + { + "test_name": "Groth16", + "certificate_header": "{\"network_id\":1,\"height\":4,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"ProofVerificationError\":{\"Groth16\":\"Groth16 error\"}}}}}" + } +] \ No newline at end of file diff --git a/agglayer/testdata/proof_verification_errors/errors_without_inner_data.json b/agglayer/testdata/proof_verification_errors/errors_without_inner_data.json new file mode 100644 index 00000000..458b07c0 --- /dev/null +++ b/agglayer/testdata/proof_verification_errors/errors_without_inner_data.json @@ -0,0 +1,6 @@ +[ + { + "test_name": "InvalidPublicValues", + "certificate_header": "{\"network_id\":1,\"height\":4,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"ProofVerificationError\":\"InvalidPublicValues\"}}}}" + } +] \ No newline at end of file diff --git a/agglayer/testdata/type_conversion_errors/errors_with_declared_computed_data.json b/agglayer/testdata/type_conversion_errors/errors_with_declared_computed_data.json new file mode 100644 index 00000000..348ffa5f --- /dev/null +++ b/agglayer/testdata/type_conversion_errors/errors_with_declared_computed_data.json @@ -0,0 +1,6 @@ +[ + { + "test_name": "MultipleL1InfoRoot", + "certificate_header": "{\"network_id\":1,\"height\":4,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"TypeConversionError\":{\"MismatchNewLocalExitRoot\":{\"declared\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"computed\":\"0x5b06837a43bdc3dd9f114558daf4b26ed49842ee\"}}}}}}" + } +] \ No newline at end of file diff --git a/agglayer/testdata/type_conversion_errors/errors_with_token_info.json b/agglayer/testdata/type_conversion_errors/errors_with_token_info.json new file mode 100644 index 00000000..06d739a9 --- /dev/null +++ b/agglayer/testdata/type_conversion_errors/errors_with_token_info.json @@ -0,0 +1,26 @@ +[ + { + "test_name": "MultipleL1InfoRoot", + "certificate_header": "{\"network_id\":1,\"height\":0,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"TypeConversionError\":{\"MultipleL1InfoRoot\":{\"origin_network\":1,\"origin_token_address\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa2\"}}}}}}" + }, + { + "test_name": "MismatchNewLocalExitRoot", + "certificate_header": "{\"network_id\":1,\"height\":1,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"TypeConversionError\":{\"MismatchNewLocalExitRoot\":{\"origin_network\":1,\"origin_token_address\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa2\"}}}}}}" + }, + { + "test_name": "BalanceOverflow", + "certificate_header": "{\"network_id\":1,\"height\":2,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"TypeConversionError\":{\"BalanceOverflow\":{\"origin_network\":1,\"origin_token_address\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa2\"}}}}}}" + }, + { + "test_name": "BalanceUnderflow", + "certificate_header": "{\"network_id\":1,\"height\":3,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"TypeConversionError\":{\"BalanceUnderflow\":{\"origin_network\":1,\"origin_token_address\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa2\"}}}}}}" + }, + { + "test_name": "BalanceProofGenerationFailed - KeyAlreadyPresent", + "certificate_header": "{\"network_id\":1,\"height\":4,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"TypeConversionError\":{\"BalanceProofGenerationFailed\":{\"source\":\"KeyAlreadyPresent\",\"token\":{\"origin_network\":1,\"origin_token_address\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa2\"}}}}}}}" + }, + { + "test_name": "BalanceProofGenerationFailed - KeyNotPresent", + "certificate_header": "{\"network_id\":1,\"height\":5,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"TypeConversionError\":{\"BalanceProofGenerationFailed\":{\"source\":\"KeyNotPresent\",\"token\":{\"origin_network\":11,\"origin_token_address\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa2\"}}}}}}}" + } +] \ No newline at end of file diff --git a/agglayer/testdata/type_conversion_errors/errors_without_inner_data.json b/agglayer/testdata/type_conversion_errors/errors_without_inner_data.json new file mode 100644 index 00000000..a92aca80 --- /dev/null +++ b/agglayer/testdata/type_conversion_errors/errors_without_inner_data.json @@ -0,0 +1,6 @@ +[ + { + "test_name": "MultipleL1InfoRoot", + "certificate_header": "{\"network_id\":1,\"height\":0,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"TypeConversionError\":\"MultipleL1InfoRoot\"}}}}" + } +] \ No newline at end of file diff --git a/agglayer/testdata/type_conversion_errors/nullifier_path_generation_failed_error.json b/agglayer/testdata/type_conversion_errors/nullifier_path_generation_failed_error.json new file mode 100644 index 00000000..b52cd73f --- /dev/null +++ b/agglayer/testdata/type_conversion_errors/nullifier_path_generation_failed_error.json @@ -0,0 +1,20 @@ +[ + { + "test_name": "NullifierPathGenerationFailed - KeyPresent", + "certificate_header": "{\"network_id\":1,\"height\":6,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"TypeConversionError\":{\"NullifierPathGenerationFailed\":{\"source\":\"KeyPresent\",\"global_index\":{\"mainnet_flag\":true,\"rollup_index\":0,\"leaf_index\":1}}}}}}}" + }, + { + "test_name": "NullifierPathGenerationFailed - DepthOutOfBounds", + "certificate_header": "{\"network_id\":1,\"height\":7,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"TypeConversionError\":{\"NullifierPathGenerationFailed\":{\"source\":\"DepthOutOfBounds\",\"global_index\":{\"mainnet_flag\":false,\"rollup_index\":11,\"leaf_index\":123}}}}}}}" + }, + { + "test_name": "NullifierPathGenerationFailed_unknown_SMT_error_code", + "expected_error": "unknown SMT error code", + "certificate_header": "{\"network_id\":1,\"height\":7,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"TypeConversionError\":{\"NullifierPathGenerationFailed\":{\"source\":\"UnknownCode\",\"global_index\":{\"mainnet_flag\":false,\"rollup_index\":11,\"leaf_index\":123}}}}}}}" + }, + { + "test_name": "NullifierPathGenerationFailed_missing_SMT_source", + "expected_error": "error code is not a string", + "certificate_header": "{\"network_id\":1,\"height\":7,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"TypeConversionError\":{\"NullifierPathGenerationFailed\":{\"unknown\":\"DepthOutOfBounds\",\"global_index\":{\"mainnet_flag\":false,\"rollup_index\":11,\"leaf_index\":123}}}}}}}" + } +] \ No newline at end of file diff --git a/agglayer/type_conversion_error.go b/agglayer/type_conversion_error.go new file mode 100644 index 00000000..89129253 --- /dev/null +++ b/agglayer/type_conversion_error.go @@ -0,0 +1,255 @@ +package agglayer + +import ( + "errors" + "fmt" +) + +const ( + MultipleL1InfoRootErrorType = "MultipleL1InfoRoot" + MismatchNewLocalExitRootErrorType = "MismatchNewLocalExitRoot" + BalanceOverflowErrorType = "BalanceOverflow" + BalanceUnderflowErrorType = "BalanceUnderflow" + BalanceProofGenerationFailedErrorType = "BalanceProofGenerationFailed" + NullifierPathGenerationFailedErrorType = "NullifierPathGenerationFailed" +) + +// TypeConversionError is an error that is returned when verifying a certficate +// before generating its proof. +type TypeConversionError struct { + InnerErrors []PPError +} + +// String is the implementation of the Error interface +func (p *TypeConversionError) String() string { + return fmt.Sprintf("Type conversion error: %v", p.InnerErrors) +} + +// Unmarshal unmarshals the data from a map into a ProofGenerationError struct. +func (p *TypeConversionError) Unmarshal(data interface{}) error { + getPPErrFn := func(key string, value interface{}) (PPError, error) { + switch key { + case MultipleL1InfoRootErrorType: + p.InnerErrors = append(p.InnerErrors, &MultipleL1InfoRoot{}) + case MismatchNewLocalExitRootErrorType: + p.InnerErrors = append(p.InnerErrors, NewMismatchNewLocalExitRoot()) + case BalanceOverflowErrorType: + balanceOverflow := NewBalanceOverflow() + if err := balanceOverflow.UnmarshalFromMap(value); err != nil { + return nil, err + } + return balanceOverflow, nil + case BalanceUnderflowErrorType: + balanceUnderflow := NewBalanceUnderflow() + if err := balanceUnderflow.UnmarshalFromMap(value); err != nil { + return nil, err + } + return balanceUnderflow, nil + case BalanceProofGenerationFailedErrorType: + balanceProofGenerationFailed := NewBalanceProofGenerationFailed() + if err := balanceProofGenerationFailed.UnmarshalFromMap(value); err != nil { + return nil, err + } + return balanceProofGenerationFailed, nil + case NullifierPathGenerationFailedErrorType: + nullifierPathGenerationFailed := NewNullifierPathGenerationFailed() + if err := nullifierPathGenerationFailed.UnmarshalFromMap(value); err != nil { + return nil, err + } + return nullifierPathGenerationFailed, nil + default: + return nil, fmt.Errorf("unknown type conversion error type: %v", key) + } + + return nil, nil + } + + getAndAddInnerErrorFn := func(key string, value interface{}) error { + ppErr, err := getPPErrFn(key, value) + if err != nil { + return err + } + + if ppErr != nil { + p.InnerErrors = append(p.InnerErrors, ppErr) + } + + return nil + } + + errorSourceMap, ok := data.(map[string]interface{}) + if !ok { + // it can be a single error + return getAndAddInnerErrorFn(data.(string), nil) //nolint:forcetypeassert + } + + for key, value := range errorSourceMap { + if err := getAndAddInnerErrorFn(key, value); err != nil { + return err + } + } + + return nil +} + +// MultipleL1InfoRoot is an error that is returned when the imported bridge exits +// refer to different L1 info roots. +type MultipleL1InfoRoot struct{} + +// String is the implementation of the Error interface +func (e *MultipleL1InfoRoot) String() string { + return fmt.Sprintf(`%s: The imported bridge exits should refer to one and the same L1 info root.`, + MultipleL1InfoRootErrorType) +} + +// MissingNewLocalExitRoot is an error that is returned when the certificate refers to +// a new local exit root which differ from the one computed by the agglayer. +type MismatchNewLocalExitRoot struct { + *DeclaredComputedError +} + +func NewMismatchNewLocalExitRoot() *MismatchNewLocalExitRoot { + return &MismatchNewLocalExitRoot{ + DeclaredComputedError: &DeclaredComputedError{ErrType: MismatchNewLocalExitRootErrorType}, + } +} + +// BalanceOverflow is an error that is returned when the given token balance cannot overflow. +type BalanceOverflow struct { + *TokenInfoError +} + +// NewBalanceOverflow returns a new BalanceOverflow error. +func NewBalanceOverflow() *BalanceOverflow { + return &BalanceOverflow{ + TokenInfoError: &TokenInfoError{}, + } +} + +// String is the implementation of the Error interface +func (e *BalanceOverflow) String() string { + return fmt.Sprintf("%s: The given token balance cannot overflow. %s", + BalanceOverflowErrorType, e.TokenInfo.String()) +} + +// BalanceUnderflow is an error that is returned when the given token balance cannot be negative. +type BalanceUnderflow struct { + *TokenInfoError +} + +// NewBalanceOverflow returns a new BalanceOverflow error. +func NewBalanceUnderflow() *BalanceUnderflow { + return &BalanceUnderflow{ + TokenInfoError: &TokenInfoError{}, + } +} + +// String is the implementation of the Error interface +func (e *BalanceUnderflow) String() string { + return fmt.Sprintf("%s: The given token balance cannot be negative. %s", + BalanceUnderflowErrorType, e.TokenInfo.String()) +} + +// SmtError is a type that is inherited by all errors that occur during SMT operations. +type SmtError struct { + ErrorCode string + Error string +} + +func (e *SmtError) Unmarshal(data interface{}) error { + errCode, ok := data.(string) + if !ok { + return errors.New("error code is not a string") + } + + e.ErrorCode = errCode + + switch errCode { + case "KeyAlreadyPresent": + e.Error = "trying to insert a key already in the SMT" + case "KeyNotPresent": + e.Error = "trying to generate a Merkle proof for a key not in the SMT" + case "KeyPresent": + e.Error = "trying to generate a non-inclusion proof for a key present in the SMT" + case "DepthOutOfBounds": + e.Error = "depth out of bounds" + default: + return fmt.Errorf("unknown SMT error code: %s", errCode) + } + + return nil +} + +// BalanceProofGenerationFailed is a struct that represents an error that occurs when +// the balance proof for the given token cannot be generated. +type BalanceProofGenerationFailed struct { + *TokenInfoError + *SmtError +} + +func NewBalanceProofGenerationFailed() *BalanceProofGenerationFailed { + return &BalanceProofGenerationFailed{ + TokenInfoError: &TokenInfoError{}, + SmtError: &SmtError{}, + } +} + +// String is the implementation of the Error interface +func (e *BalanceProofGenerationFailed) String() string { + return fmt.Sprintf("%s: The balance proof for the given token cannot be generated. TokenInfo: %s. Error type: %s. %s", + BalanceProofGenerationFailedErrorType, e.TokenInfo.String(), + e.SmtError.ErrorCode, e.SmtError.Error) +} + +func (e *BalanceProofGenerationFailed) UnmarshalFromMap(data interface{}) error { + dataMap, ok := data.(map[string]interface{}) + if !ok { + return errNotMap + } + + if err := e.TokenInfoError.UnmarshalFromMap(dataMap["token"]); err != nil { + return err + } + + return e.SmtError.Unmarshal(dataMap["source"]) +} + +// NullifierPathGenerationFailed is a struct that represents an error that occurs when +// the nullifier path for the given imported bridge exit cannot be generated.. +type NullifierPathGenerationFailed struct { + GlobalIndex *GlobalIndex `json:"global_index"` + *SmtError +} + +func NewNullifierPathGenerationFailed() *NullifierPathGenerationFailed { + return &NullifierPathGenerationFailed{ + SmtError: &SmtError{}, + } +} + +// String is the implementation of the Error interface +func (e *NullifierPathGenerationFailed) String() string { + return fmt.Sprintf("%s: The nullifier path for the given imported bridge exit cannot be generated. "+ + "GlobalIndex: %s. Error type: %s. %s", + NullifierPathGenerationFailedErrorType, e.GlobalIndex.String(), + e.SmtError.ErrorCode, e.SmtError.Error) +} + +func (e *NullifierPathGenerationFailed) UnmarshalFromMap(data interface{}) error { + dataMap, ok := data.(map[string]interface{}) + if !ok { + return errNotMap + } + + if err := e.SmtError.Unmarshal(dataMap["source"]); err != nil { + return err + } + + globalIndexMap, err := convertMapValue[map[string]interface{}](dataMap, "global_index") + if err != nil { + return err + } + + e.GlobalIndex = &GlobalIndex{} + return e.GlobalIndex.UnmarshalFromMap(globalIndexMap) +} diff --git a/agglayer/types.go b/agglayer/types.go index 9350e791..b6a3198e 100644 --- a/agglayer/types.go +++ b/agglayer/types.go @@ -2,6 +2,7 @@ package agglayer import ( "encoding/json" + "errors" "fmt" "math/big" "strings" @@ -36,10 +37,7 @@ func (c *CertificateStatus) UnmarshalJSON(data []byte) error { if strings.Contains(dataStr, "InError") { status = "InError" } else { - err := json.Unmarshal(data, &status) - if err != nil { - return err - } + status = string(data) } switch status { @@ -199,6 +197,7 @@ type TokenInfo struct { OriginTokenAddress common.Address `json:"origin_token_address"` } +// String returns a string representation of the TokenInfo struct func (t *TokenInfo) String() string { return fmt.Sprintf("OriginNetwork: %d, OriginTokenAddress: %s", t.OriginNetwork, t.OriginTokenAddress.String()) } @@ -210,6 +209,11 @@ type GlobalIndex struct { LeafIndex uint32 `json:"leaf_index"` } +// String returns a string representation of the GlobalIndex struct +func (g *GlobalIndex) String() string { + return fmt.Sprintf("MainnetFlag: %t, RollupIndex: %d, LeafIndex: %d", g.MainnetFlag, g.RollupIndex, g.LeafIndex) +} + func (g *GlobalIndex) Hash() common.Hash { return crypto.Keccak256Hash( cdkcommon.BigIntToLittleEndianBytes( @@ -218,9 +222,27 @@ func (g *GlobalIndex) Hash() common.Hash { ) } -func (g *GlobalIndex) String() string { - return fmt.Sprintf("MainnetFlag: %t, RollupIndex: %d, LeafIndex: %d", - g.MainnetFlag, g.RollupIndex, g.LeafIndex) +func (g *GlobalIndex) UnmarshalFromMap(data map[string]interface{}) error { + rollupIndex, err := convertMapValue[uint32](data, "rollup_index") + if err != nil { + return err + } + + leafIndex, err := convertMapValue[uint32](data, "leaf_index") + if err != nil { + return err + } + + mainnetFlag, err := convertMapValue[bool](data, "mainnet_flag") + if err != nil { + return err + } + + g.RollupIndex = rollupIndex + g.LeafIndex = leafIndex + g.MainnetFlag = mainnetFlag + + return nil } // BridgeExit represents a token bridge exit @@ -525,9 +547,96 @@ type CertificateHeader struct { NewLocalExitRoot common.Hash `json:"new_local_exit_root"` Status CertificateStatus `json:"status"` Metadata common.Hash `json:"metadata"` + Error PPError `json:"-"` } func (c CertificateHeader) String() string { - return fmt.Sprintf("Height: %d, CertificateID: %s, NewLocalExitRoot: %s", - c.Height, c.CertificateID.String(), c.NewLocalExitRoot.String()) + errors := "" + if c.Error != nil { + errors = c.Error.String() + } + + return fmt.Sprintf("Height: %d, CertificateID: %s, NewLocalExitRoot: %s. Status: %s. Errors: %s", + c.Height, c.CertificateID.String(), c.NewLocalExitRoot.String(), c.Status.String(), errors) +} + +func (c *CertificateHeader) UnmarshalJSON(data []byte) error { + // we define an alias to avoid infinite recursion + type Alias CertificateHeader + aux := &struct { + Status interface{} `json:"status"` + *Alias + }{ + Alias: (*Alias)(c), + } + + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + + // Process Status field + switch status := aux.Status.(type) { + case string: // certificate not InError + if err := c.Status.UnmarshalJSON([]byte(status)); err != nil { + return err + } + case map[string]interface{}: // certificate has errors + inErrMap, err := convertMapValue[map[string]interface{}](status, "InError") + if err != nil { + return err + } + + inErrDataMap, err := convertMapValue[map[string]interface{}](inErrMap, "error") + if err != nil { + return err + } + + var ppError PPError + + for key, value := range inErrDataMap { + switch key { + case "ProofGenerationError": + p := &ProofGenerationError{} + if err := p.Unmarshal(value); err != nil { + return err + } + + ppError = p + case "TypeConversionError": + t := &TypeConversionError{} + if err := t.Unmarshal(value); err != nil { + return err + } + + ppError = t + case "ProofVerificationError": + p := &ProofVerificationError{} + if err := p.Unmarshal(value); err != nil { + return err + } + + ppError = p + default: + return fmt.Errorf("invalid error type: %s", key) + } + } + + c.Status = InError + c.Error = ppError + default: + return errors.New("invalid status type") + } + + return nil +} + +// ClockConfiguration represents the configuration of the epoch clock +// returned by the interop_GetEpochConfiguration RPC call +type ClockConfiguration struct { + EpochDuration uint64 `json:"epoch_duration"` + GenesisBlock uint64 `json:"genesis_block"` +} + +func (c ClockConfiguration) String() string { + return fmt.Sprintf("EpochDuration: %d, GenesisBlock: %d", c.EpochDuration, c.GenesisBlock) } diff --git a/agglayer/types_test.go b/agglayer/types_test.go index 95033141..f2133923 100644 --- a/agglayer/types_test.go +++ b/agglayer/types_test.go @@ -152,3 +152,102 @@ func TestSignedCertificate_Copy(t *testing.T) { require.Empty(t, certificateCopy.ImportedBridgeExits) }) } + +func TestGlobalIndex_UnmarshalFromMap(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + data map[string]interface{} + want *GlobalIndex + wantErr bool + }{ + { + name: "valid data", + data: map[string]interface{}{ + "rollup_index": uint32(0), + "leaf_index": uint32(2), + "mainnet_flag": true, + }, + want: &GlobalIndex{ + RollupIndex: 0, + LeafIndex: 2, + MainnetFlag: true, + }, + wantErr: false, + }, + { + name: "missing rollup_index", + data: map[string]interface{}{ + "leaf_index": uint32(2), + "mainnet_flag": true, + }, + want: &GlobalIndex{}, + wantErr: true, + }, + { + name: "invalid rollup_index type", + data: map[string]interface{}{ + "rollup_index": "invalid", + "leaf_index": uint32(2), + "mainnet_flag": true, + }, + want: &GlobalIndex{}, + wantErr: true, + }, + { + name: "missing leaf_index", + data: map[string]interface{}{ + "rollup_index": uint32(1), + "mainnet_flag": true, + }, + want: &GlobalIndex{}, + wantErr: true, + }, + { + name: "invalid leaf_index type", + data: map[string]interface{}{ + "rollup_index": uint32(1), + "leaf_index": "invalid", + "mainnet_flag": true, + }, + want: &GlobalIndex{}, + wantErr: true, + }, + { + name: "missing mainnet_flag", + data: map[string]interface{}{ + "rollup_index": uint32(1), + "leaf_index": uint32(2), + }, + want: &GlobalIndex{}, + wantErr: true, + }, + { + name: "invalid mainnet_flag type", + data: map[string]interface{}{ + "rollup_index": uint32(1), + "leaf_index": uint32(2), + "mainnet_flag": "invalid", + }, + want: &GlobalIndex{}, + wantErr: true, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + g := &GlobalIndex{} + err := g.UnmarshalFromMap(tt.data) + if tt.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + require.Equal(t, tt.want, g) + } + }) + } +} diff --git a/aggsender/aggsender.go b/aggsender/aggsender.go index e3242bdf..dcbbc268 100644 --- a/aggsender/aggsender.go +++ b/aggsender/aggsender.go @@ -8,11 +8,12 @@ import ( "fmt" "math/big" "os" + "slices" "time" "github.com/0xPolygon/cdk/agglayer" "github.com/0xPolygon/cdk/aggsender/db" - aggsendertypes "github.com/0xPolygon/cdk/aggsender/types" + "github.com/0xPolygon/cdk/aggsender/types" "github.com/0xPolygon/cdk/bridgesync" cdkcommon "github.com/0xPolygon/cdk/common" "github.com/0xPolygon/cdk/l1infotreesync" @@ -33,10 +34,11 @@ var ( // AggSender is a component that will send certificates to the aggLayer type AggSender struct { - log aggsendertypes.Logger + log types.Logger - l2Syncer aggsendertypes.L2BridgeSyncer - l1infoTreeSyncer aggsendertypes.L1InfoTreeSyncer + l2Syncer types.L2BridgeSyncer + l1infoTreeSyncer types.L1InfoTreeSyncer + epochNotifier types.EpochNotifier storage db.AggSenderStorage aggLayerClient agglayer.AgglayerClientInterface @@ -53,7 +55,8 @@ func New( cfg Config, aggLayerClient agglayer.AgglayerClientInterface, l1InfoTreeSyncer *l1infotreesync.L1InfoTreeSync, - l2Syncer *bridgesync.BridgeSync) (*AggSender, error) { + l2Syncer *bridgesync.BridgeSync, + epochNotifier types.EpochNotifier) (*AggSender, error) { storage, err := db.NewAggSenderSQLStorage(logger, cfg.StoragePath) if err != nil { return nil, err @@ -74,24 +77,30 @@ func New( aggLayerClient: aggLayerClient, l1infoTreeSyncer: l1InfoTreeSyncer, sequencerKey: sequencerPrivateKey, + epochNotifier: epochNotifier, }, nil } // Start starts the AggSender func (a *AggSender) Start(ctx context.Context) { - go a.sendCertificates(ctx) - go a.checkIfCertificatesAreSettled(ctx) + a.sendCertificates(ctx) } // sendCertificates sends certificates to the aggLayer func (a *AggSender) sendCertificates(ctx context.Context) { - ticker := time.NewTicker(a.cfg.BlockGetInterval.Duration) - + chEpoch := a.epochNotifier.Subscribe("aggsender") for { select { - case <-ticker.C: - if _, err := a.sendCertificate(ctx); err != nil { - log.Error(err) + case epoch := <-chEpoch: + a.log.Infof("Epoch received: %s", epoch.String()) + thereArePendingCerts, err := a.checkPendingCertificatesStatus(ctx) + if err == nil && !thereArePendingCerts { + if _, err := a.sendCertificate(ctx); err != nil { + log.Error(err) + } + } else { + log.Warnf("Skipping epoch %s because there are pending certificates %v or error: %w", + epoch.String(), thereArePendingCerts, err) } case <-ctx.Done(): a.log.Info("AggSender stopped") @@ -183,7 +192,7 @@ func (a *AggSender) sendCertificate(ctx context.Context) (*agglayer.SignedCertif } createdTime := time.Now().UTC().UnixMilli() - certInfo := aggsendertypes.CertificateInfo{ + certInfo := types.CertificateInfo{ Height: certificate.Height, CertificateID: certificateHash, NewLocalExitRoot: certificate.NewLocalExitRoot, @@ -224,7 +233,7 @@ func (a *AggSender) saveCertificateToFile(signedCertificate *agglayer.SignedCert // getNextHeightAndPreviousLER returns the height and previous LER for the new certificate func (a *AggSender) getNextHeightAndPreviousLER( - lastSentCertificateInfo *aggsendertypes.CertificateInfo) (uint64, common.Hash) { + lastSentCertificateInfo *types.CertificateInfo) (uint64, common.Hash) { height := lastSentCertificateInfo.Height + 1 if lastSentCertificateInfo.Status == agglayer.InError { // previous certificate was in error, so we need to resend it @@ -247,7 +256,7 @@ func (a *AggSender) getNextHeightAndPreviousLER( func (a *AggSender) buildCertificate(ctx context.Context, bridges []bridgesync.Bridge, claims []bridgesync.Claim, - lastSentCertificateInfo aggsendertypes.CertificateInfo, + lastSentCertificateInfo types.CertificateInfo, toBlock uint64) (*agglayer.Certificate, error) { if len(bridges) == 0 && len(claims) == 0 { return nil, errNoBridgesAndClaims @@ -475,34 +484,30 @@ func (a *AggSender) signCertificate(certificate *agglayer.Certificate) (*agglaye }, nil } -// checkIfCertificatesAreSettled checks if certificates are settled -func (a *AggSender) checkIfCertificatesAreSettled(ctx context.Context) { - ticker := time.NewTicker(a.cfg.CheckSettledInterval.Duration) - for { - select { - case <-ticker.C: - a.checkPendingCertificatesStatus(ctx) - case <-ctx.Done(): - return - } - } -} - // checkPendingCertificatesStatus checks the status of pending certificates // and updates in the storage if it changed on agglayer -func (a *AggSender) checkPendingCertificatesStatus(ctx context.Context) { +// It returns: +// bool -> if there are pending certificates +// error -> if there was an error +func (a *AggSender) checkPendingCertificatesStatus(ctx context.Context) (bool, error) { pendingCertificates, err := a.storage.GetCertificatesByStatus(nonSettledStatuses) if err != nil { - a.log.Errorf("error getting pending certificates: %w", err) - return + err = fmt.Errorf("error getting pending certificates: %w", err) + a.log.Error(err) + return true, err } + thereArePendingCertificates := false a.log.Debugf("checkPendingCertificatesStatus num of pendingCertificates: %d", len(pendingCertificates)) for _, certificate := range pendingCertificates { certificateHeader, err := a.aggLayerClient.GetCertificateHeader(certificate.CertificateID) if err != nil { - a.log.Errorf("error getting certificate header of %s from agglayer: %w", - certificate.String(), err) - continue + err = fmt.Errorf("error getting certificate header of %d/%s from agglayer: %w", + certificate.Height, certificate.String(), err) + a.log.Error(err) + return true, err + } + if slices.Contains(nonSettledStatuses, certificateHeader.Status) { + thereArePendingCertificates = true } a.log.Debugf("aggLayerClient.GetCertificateHeader status [%s] of certificate %s ", certificateHeader.Status, @@ -516,11 +521,13 @@ func (a *AggSender) checkPendingCertificatesStatus(ctx context.Context) { certificate.UpdatedAt = time.Now().UTC().UnixMilli() if err := a.storage.UpdateCertificateStatus(ctx, *certificate); err != nil { - a.log.Errorf("error updating certificate %s status in storage: %w", certificateHeader.String(), err) - continue + err = fmt.Errorf("error updating certificate %s status in storage: %w", certificateHeader.String(), err) + a.log.Error(err) + return true, err } } } + return thereArePendingCertificates, nil } // shouldSendCertificate checks if a certificate should be sent at given time diff --git a/aggsender/aggsender_test.go b/aggsender/aggsender_test.go index e55422e0..0d071e76 100644 --- a/aggsender/aggsender_test.go +++ b/aggsender/aggsender_test.go @@ -27,12 +27,19 @@ import ( func TestExploratoryGetCertificateHeader(t *testing.T) { t.Skip("This test is exploratory and should be skipped") - aggLayerClient := agglayer.NewAggLayerClient("http://localhost:32795") + aggLayerClient := agglayer.NewAggLayerClient("http://localhost:32796") certificateID := common.HexToHash("0xf153e75e24591432ac5deafaeaafba3fec0fd851261c86051b9c0d540b38c369") certificateHeader, err := aggLayerClient.GetCertificateHeader(certificateID) require.NoError(t, err) fmt.Print(certificateHeader) } +func TestExploratoryGetEpochConfiguration(t *testing.T) { + t.Skip("This test is exploratory and should be skipped") + aggLayerClient := agglayer.NewAggLayerClient("http://localhost:32796") + clockConfig, err := aggLayerClient.GetEpochConfiguration() + require.NoError(t, err) + fmt.Print(clockConfig) +} func TestConfigString(t *testing.T) { config := Config{ @@ -42,6 +49,8 @@ func TestConfigString(t *testing.T) { CheckSettledInterval: types.Duration{Duration: 20 * time.Second}, AggsenderPrivateKey: types.KeystoreFileConfig{Path: "/path/to/key", Password: "password"}, URLRPCL2: "http://l2.rpc.url", + BlockFinality: "latestBlock", + EpochNotificationPercentage: 50, SaveCertificatesToFilesPath: "/path/to/certificates", } @@ -52,6 +61,8 @@ func TestConfigString(t *testing.T) { "AggsenderPrivateKeyPath: /path/to/key\n" + "AggsenderPrivateKeyPassword: password\n" + "URLRPCL2: http://l2.rpc.url\n" + + "BlockFinality: latestBlock\n" + + "EpochNotificationPercentage: 50\n" + "SaveCertificatesToFilesPath: /path/to/certificates\n" require.Equal(t, expected, config.String()) @@ -274,7 +285,8 @@ func TestGetImportedBridgeExits(t *testing.T) { t.Parallel() mockProof := generateTestProof(t) - mockL1InfoTreeSyncer := mocks.NewL1InfoTreeSyncerMock(t) + + mockL1InfoTreeSyncer := mocks.NewL1InfoTreeSyncer(t) mockL1InfoTreeSyncer.On("GetInfoByGlobalExitRoot", mock.Anything).Return(&l1infotreesync.L1InfoTreeLeaf{ L1InfoTreeIndex: 1, Timestamp: 123456789, @@ -507,8 +519,8 @@ func TestGetImportedBridgeExits(t *testing.T) { } func TestBuildCertificate(t *testing.T) { - mockL2BridgeSyncer := mocks.NewL2BridgeSyncerMock(t) - mockL1InfoTreeSyncer := mocks.NewL1InfoTreeSyncerMock(t) + mockL2BridgeSyncer := mocks.NewL2BridgeSyncer(t) + mockL1InfoTreeSyncer := mocks.NewL1InfoTreeSyncer(t) mockProof := generateTestProof(t) tests := []struct { @@ -738,17 +750,17 @@ func generateTestProof(t *testing.T) treeTypes.Proof { } func TestCheckIfCertificatesAreSettled(t *testing.T) { - t.Parallel() - tests := []struct { - name string - pendingCertificates []*aggsendertypes.CertificateInfo - certificateHeaders map[common.Hash]*agglayer.CertificateHeader - getFromDBError error - clientError error - updateDBError error - expectedErrorLogMessages []string - expectedInfoMessages []string + name string + pendingCertificates []*aggsendertypes.CertificateInfo + certificateHeaders map[common.Hash]*agglayer.CertificateHeader + getFromDBError error + clientError error + updateDBError error + expectedErrorLogMessages []string + expectedInfoMessages []string + expectedThereArePendingCerts bool + expectedError bool }{ { name: "All certificates settled - update successful", @@ -784,6 +796,8 @@ func TestCheckIfCertificatesAreSettled(t *testing.T) { expectedErrorLogMessages: []string{ "error getting pending certificates: %w", }, + expectedThereArePendingCerts: true, + expectedError: true, }, { name: "Error getting certificate header", @@ -797,6 +811,8 @@ func TestCheckIfCertificatesAreSettled(t *testing.T) { expectedErrorLogMessages: []string{ "error getting header of certificate %s with height: %d from agglayer: %w", }, + expectedThereArePendingCerts: true, + expectedError: true, }, { name: "Error updating certificate status", @@ -813,6 +829,8 @@ func TestCheckIfCertificatesAreSettled(t *testing.T) { expectedInfoMessages: []string{ "certificate %s changed status to %s", }, + expectedThereArePendingCerts: true, + expectedError: true, }, } @@ -820,9 +838,7 @@ func TestCheckIfCertificatesAreSettled(t *testing.T) { tt := tt t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - mockStorage := mocks.NewAggSenderStorageMock(t) + mockStorage := mocks.NewAggSenderStorage(t) mockAggLayerClient := agglayer.NewAgglayerClientMock(t) mockLogger := log.WithFields("test", "unittest") @@ -847,14 +863,10 @@ func TestCheckIfCertificatesAreSettled(t *testing.T) { }, } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - go aggSender.checkIfCertificatesAreSettled(ctx) - - time.Sleep(2 * time.Second) - cancel() - + ctx := context.TODO() + thereArePendingCerts, err := aggSender.checkPendingCertificatesStatus(ctx) + require.Equal(t, tt.expectedThereArePendingCerts, thereArePendingCerts) + require.Equal(t, tt.expectedError, err != nil) mockAggLayerClient.AssertExpectations(t) mockStorage.AssertExpectations(t) }) @@ -885,23 +897,23 @@ func TestSendCertificate(t *testing.T) { expectedError string } - setupTest := func(cfg testCfg) (*AggSender, *mocks.AggSenderStorageMock, *mocks.L2BridgeSyncerMock, - *agglayer.AgglayerClientMock, *mocks.L1InfoTreeSyncerMock) { + setupTest := func(cfg testCfg) (*AggSender, *mocks.AggSenderStorage, *mocks.L2BridgeSyncer, + *agglayer.AgglayerClientMock, *mocks.L1InfoTreeSyncer) { var ( aggsender = &AggSender{ log: log.WithFields("aggsender", 1), cfg: Config{}, sequencerKey: cfg.sequencerKey, } - mockStorage *mocks.AggSenderStorageMock - mockL2Syncer *mocks.L2BridgeSyncerMock + mockStorage *mocks.AggSenderStorage + mockL2Syncer *mocks.L2BridgeSyncer mockAggLayerClient *agglayer.AgglayerClientMock - mockL1InfoTreeSyncer *mocks.L1InfoTreeSyncerMock + mockL1InfoTreeSyncer *mocks.L1InfoTreeSyncer ) if cfg.shouldSendCertificate != nil || cfg.getLastSentCertificate != nil || cfg.saveLastSentCertificate != nil { - mockStorage = mocks.NewAggSenderStorageMock(t) + mockStorage = mocks.NewAggSenderStorage(t) mockStorage.On("GetCertificatesByStatus", nonSettledStatuses). Return(cfg.shouldSendCertificate...).Once() @@ -918,7 +930,7 @@ func TestSendCertificate(t *testing.T) { if cfg.lastL2BlockProcessed != nil || cfg.originNetwork != nil || cfg.getBridges != nil || cfg.getClaims != nil || cfg.getInfoByGlobalExitRoot != nil { - mockL2Syncer = mocks.NewL2BridgeSyncerMock(t) + mockL2Syncer = mocks.NewL2BridgeSyncer(t) mockL2Syncer.On("GetLastProcessedBlock", mock.Anything).Return(cfg.lastL2BlockProcessed...).Once() @@ -950,7 +962,7 @@ func TestSendCertificate(t *testing.T) { if cfg.getInfoByGlobalExitRoot != nil || cfg.getL1InfoTreeRootByIndex != nil || cfg.getL1InfoTreeMerkleProofFromIndexToRoot != nil { - mockL1InfoTreeSyncer = mocks.NewL1InfoTreeSyncerMock(t) + mockL1InfoTreeSyncer = mocks.NewL1InfoTreeSyncer(t) mockL1InfoTreeSyncer.On("GetInfoByGlobalExitRoot", mock.Anything).Return(cfg.getInfoByGlobalExitRoot...).Once() if cfg.getL1InfoTreeRootByIndex != nil { @@ -1481,10 +1493,10 @@ func TestSendCertificate_NoClaims(t *testing.T) { require.NoError(t, err) ctx := context.Background() - mockStorage := mocks.NewAggSenderStorageMock(t) - mockL2Syncer := mocks.NewL2BridgeSyncerMock(t) + mockStorage := mocks.NewAggSenderStorage(t) + mockL2Syncer := mocks.NewL2BridgeSyncer(t) mockAggLayerClient := agglayer.NewAgglayerClientMock(t) - mockL1InfoTreeSyncer := mocks.NewL1InfoTreeSyncerMock(t) + mockL1InfoTreeSyncer := mocks.NewL1InfoTreeSyncer(t) aggSender := &AggSender{ log: log.WithFields("aggsender-test", "no claims test"), diff --git a/aggsender/block_notifier_polling.go b/aggsender/block_notifier_polling.go new file mode 100644 index 00000000..17dafefa --- /dev/null +++ b/aggsender/block_notifier_polling.go @@ -0,0 +1,219 @@ +package aggsender + +import ( + "context" + "fmt" + "math/big" + "sync" + "time" + + "github.com/0xPolygon/cdk/aggsender/types" + "github.com/0xPolygon/cdk/etherman" +) + +var ( + timeNowFunc = time.Now +) + +const ( + AutomaticBlockInterval = time.Second * 0 + // minBlockInterval is the minimum interval at which the AggSender will check for new blocks + minBlockInterval = time.Second + // maxBlockInterval is the maximum interval at which the AggSender will check for new blocks + maxBlockInterval = time.Minute +) + +type ConfigBlockNotifierPolling struct { + // BlockFinalityType is the finality of the block to be notified + BlockFinalityType etherman.BlockNumberFinality + // CheckNewBlockInterval is the interval at which the AggSender will check for new blocks + // if is 0 it will be calculated automatically + CheckNewBlockInterval time.Duration +} + +type BlockNotifierPolling struct { + ethClient types.EthClient + blockFinality *big.Int + logger types.Logger + config ConfigBlockNotifierPolling + mu sync.Mutex + lastStatus *blockNotifierPollingInternalStatus + types.GenericSubscriber[types.EventNewBlock] +} + +// NewBlockNotifierPolling creates a new BlockNotifierPolling. +// if param `subscriber` is nil a new GenericSubscriberImpl[types.EventNewBlock] will be created. +// To use this class you need to subscribe and each time that a new block appear the subscriber +// will be notified through the channel. (check unit tests TestExploratoryBlockNotifierPolling +// for more information) +func NewBlockNotifierPolling(ethClient types.EthClient, + config ConfigBlockNotifierPolling, + logger types.Logger, + subscriber types.GenericSubscriber[types.EventNewBlock]) (*BlockNotifierPolling, error) { + if subscriber == nil { + subscriber = NewGenericSubscriberImpl[types.EventNewBlock]() + } + finality, err := config.BlockFinalityType.ToBlockNum() + if err != nil { + return nil, fmt.Errorf("failed to convert block finality type to block number: %w", err) + } + + return &BlockNotifierPolling{ + ethClient: ethClient, + blockFinality: finality, + logger: logger, + config: config, + GenericSubscriber: subscriber, + }, nil +} + +func (b *BlockNotifierPolling) String() string { + status := b.getGlobalStatus() + res := fmt.Sprintf("BlockNotifierPolling: finality=%s", b.config.BlockFinalityType) + if status != nil { + res += fmt.Sprintf(" lastBlockSeen=%d", status.lastBlockSeen) + } else { + res += " lastBlockSeen=none" + } + return res +} + +// Start starts the BlockNotifierPolling blocking the current goroutine +func (b *BlockNotifierPolling) Start(ctx context.Context) { + ticker := time.NewTimer(b.config.CheckNewBlockInterval) + defer ticker.Stop() + + var status *blockNotifierPollingInternalStatus = nil + + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + delay, newStatus, event := b.step(ctx, status) + status = newStatus + b.setGlobalStatus(status) + if event != nil { + b.Publish(*event) + } + ticker.Reset(delay) + } + } +} + +func (b *BlockNotifierPolling) setGlobalStatus(status *blockNotifierPollingInternalStatus) { + b.mu.Lock() + defer b.mu.Unlock() + b.lastStatus = status +} + +func (b *BlockNotifierPolling) getGlobalStatus() *blockNotifierPollingInternalStatus { + b.mu.Lock() + defer b.mu.Unlock() + if b.lastStatus == nil { + return nil + } + copyStatus := *b.lastStatus + return ©Status +} + +// step is the main function of the BlockNotifierPolling, it checks if there is a new block +// it returns: +// - the delay for the next check +// - the new status +// - the new even to emit or nil +func (b *BlockNotifierPolling) step(ctx context.Context, + previousState *blockNotifierPollingInternalStatus) (time.Duration, + *blockNotifierPollingInternalStatus, *types.EventNewBlock) { + currentBlock, err := b.ethClient.HeaderByNumber(ctx, b.blockFinality) + if err == nil && currentBlock == nil { + err = fmt.Errorf("failed to get block number: return a nil block") + } + if err != nil { + b.logger.Errorf("Failed to get block number: %v", err) + newState := previousState.clear() + return b.nextBlockRequestDelay(nil, err), newState, nil + } + if previousState == nil { + newState := previousState.intialBlock(currentBlock.Number.Uint64()) + return b.nextBlockRequestDelay(previousState, nil), newState, nil + } + if currentBlock.Number.Uint64() == previousState.lastBlockSeen { + // No new block, so no changes on state + return b.nextBlockRequestDelay(previousState, nil), previousState, nil + } + // New blockNumber! + eventToEmit := &types.EventNewBlock{ + BlockNumber: currentBlock.Number.Uint64(), + BlockFinalityType: b.config.BlockFinalityType, + } + + if currentBlock.Number.Uint64()-previousState.lastBlockSeen != 1 { + b.logger.Warnf("Missed block(s) [finality:%s]: %d -> %d", + b.config.BlockFinalityType, previousState.lastBlockSeen, currentBlock.Number.Uint64()) + // It start from scratch because something fails in calculation of block period + newState := previousState.intialBlock(currentBlock.Number.Uint64()) + return b.nextBlockRequestDelay(nil, nil), newState, eventToEmit + } + newState := previousState.incommingNewBlock(currentBlock.Number.Uint64()) + b.logger.Debugf("New block seen [finality:%s]: %d. blockRate:%s", + b.config.BlockFinalityType, currentBlock.Number.Uint64(), newState.previousBlockTime) + + return b.nextBlockRequestDelay(newState, nil), newState, eventToEmit +} + +func (b *BlockNotifierPolling) nextBlockRequestDelay(status *blockNotifierPollingInternalStatus, + err error) time.Duration { + if b.config.CheckNewBlockInterval == AutomaticBlockInterval { + return b.config.CheckNewBlockInterval + } + // Initial stages wait the minimum interval to increas accuracy + if status == nil || status.previousBlockTime == nil { + return minBlockInterval + } + if err != nil { + // If error we wait twice the min interval + return minBlockInterval * 2 //nolint:mnd // 2 times the interval + } + // we have a previous block time so we can calculate the interval + now := timeNowFunc() + expectedTimeNextBlock := status.lastBlockTime.Add(*status.previousBlockTime) + distanceToNextBlock := expectedTimeNextBlock.Sub(now) + interval := distanceToNextBlock * 4 / 5 //nolint:mnd // 80% of for reach the next block + return max(minBlockInterval, min(maxBlockInterval, interval)) +} + +type blockNotifierPollingInternalStatus struct { + lastBlockSeen uint64 + lastBlockTime time.Time // first appear of block lastBlockSeen + previousBlockTime *time.Duration // time of the previous block to appear +} + +func (s *blockNotifierPollingInternalStatus) String() string { + if s == nil { + return "nil" + } + return fmt.Sprintf("lastBlockSeen=%d lastBlockTime=%s previousBlockTime=%s", + s.lastBlockSeen, s.lastBlockTime, s.previousBlockTime) +} + +func (s *blockNotifierPollingInternalStatus) clear() *blockNotifierPollingInternalStatus { + return &blockNotifierPollingInternalStatus{} +} + +func (s *blockNotifierPollingInternalStatus) intialBlock(block uint64) *blockNotifierPollingInternalStatus { + return &blockNotifierPollingInternalStatus{ + lastBlockSeen: block, + lastBlockTime: timeNowFunc(), + } +} + +func (s *blockNotifierPollingInternalStatus) incommingNewBlock(block uint64) *blockNotifierPollingInternalStatus { + now := timeNowFunc() + timePreviousBlock := now.Sub(s.lastBlockTime) + return &blockNotifierPollingInternalStatus{ + lastBlockSeen: block, + lastBlockTime: now, + previousBlockTime: &timePreviousBlock, + } +} diff --git a/aggsender/block_notifier_polling_test.go b/aggsender/block_notifier_polling_test.go new file mode 100644 index 00000000..83b3b643 --- /dev/null +++ b/aggsender/block_notifier_polling_test.go @@ -0,0 +1,211 @@ +package aggsender + +import ( + "context" + "fmt" + "math/big" + "os" + "testing" + "time" + + "github.com/0xPolygon/cdk/aggsender/mocks" + aggsendertypes "github.com/0xPolygon/cdk/aggsender/types" + "github.com/0xPolygon/cdk/etherman" + "github.com/0xPolygon/cdk/log" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestExploratoryBlockNotifierPolling(t *testing.T) { + t.Skip() + urlRPCL1 := os.Getenv("L1URL") + fmt.Println("URL=", urlRPCL1) + ethClient, err := ethclient.Dial(urlRPCL1) + require.NoError(t, err) + + sut, errSut := NewBlockNotifierPolling(ethClient, + ConfigBlockNotifierPolling{ + BlockFinalityType: etherman.LatestBlock, + }, log.WithFields("test", "test"), nil) + require.NoError(t, errSut) + go sut.Start(context.Background()) + ch := sut.Subscribe("test") + for { + select { + case block := <-ch: + fmt.Println(block) + } + } +} + +func TestBlockNotifierPollingStep(t *testing.T) { + time0 := time.Unix(1731322117, 0) + period0 := time.Second * 10 + period0_80percent := time.Second * 8 + time1 := time0.Add(period0) + tests := []struct { + name string + previousStatus *blockNotifierPollingInternalStatus + HeaderByNumberError bool + HeaderByNumberErrorNumber uint64 + forcedTime time.Time + expectedStatus *blockNotifierPollingInternalStatus + expectedDelay time.Duration + expectedEvent *aggsendertypes.EventNewBlock + }{ + { + name: "initial->receive block", + previousStatus: nil, + HeaderByNumberError: false, + HeaderByNumberErrorNumber: 100, + forcedTime: time0, + expectedStatus: &blockNotifierPollingInternalStatus{ + lastBlockSeen: 100, + lastBlockTime: time0, + }, + expectedDelay: minBlockInterval, + expectedEvent: nil, + }, + { + name: "received block->error", + previousStatus: nil, + HeaderByNumberError: true, + forcedTime: time0, + expectedStatus: &blockNotifierPollingInternalStatus{}, + expectedDelay: minBlockInterval, + expectedEvent: nil, + }, + + { + name: "have block period->receive new block", + previousStatus: &blockNotifierPollingInternalStatus{ + lastBlockSeen: 100, + lastBlockTime: time0, + previousBlockTime: &period0, + }, + HeaderByNumberError: false, + HeaderByNumberErrorNumber: 101, + forcedTime: time1, + expectedStatus: &blockNotifierPollingInternalStatus{ + lastBlockSeen: 101, + lastBlockTime: time1, + previousBlockTime: &period0, + }, + expectedDelay: period0_80percent, + expectedEvent: &aggsendertypes.EventNewBlock{ + BlockNumber: 101, + }, + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + testData := newBlockNotifierPollingTestData(t, nil) + + timeNowFunc = func() time.Time { + return tt.forcedTime + } + + if tt.HeaderByNumberError == false { + hdr1 := &types.Header{ + Number: big.NewInt(int64(tt.HeaderByNumberErrorNumber)), + } + testData.ethClientMock.EXPECT().HeaderByNumber(mock.Anything, mock.Anything).Return(hdr1, nil).Once() + } else { + testData.ethClientMock.EXPECT().HeaderByNumber(mock.Anything, mock.Anything).Return(nil, fmt.Errorf("error")).Once() + } + delay, newStatus, event := testData.sut.step(context.TODO(), tt.previousStatus) + require.Equal(t, tt.expectedDelay, delay, "delay") + require.Equal(t, tt.expectedStatus, newStatus, "new_status") + if tt.expectedEvent == nil { + require.Nil(t, event, "send_event") + } else { + require.Equal(t, tt.expectedEvent.BlockNumber, event.BlockNumber, "send_event") + } + }) + } +} + +func TestDelayNoPreviousBLock(t *testing.T) { + testData := newBlockNotifierPollingTestData(t, nil) + status := blockNotifierPollingInternalStatus{ + lastBlockSeen: 100, + } + delay := testData.sut.nextBlockRequestDelay(&status, nil) + require.Equal(t, minBlockInterval, delay) +} + +func TestDelayBLock(t *testing.T) { + testData := newBlockNotifierPollingTestData(t, nil) + pt := time.Second * 10 + status := blockNotifierPollingInternalStatus{ + lastBlockSeen: 100, + previousBlockTime: &pt, + } + delay := testData.sut.nextBlockRequestDelay(&status, nil) + require.Equal(t, minBlockInterval, delay) +} + +func TestNewBlockNotifierPolling(t *testing.T) { + testData := newBlockNotifierPollingTestData(t, nil) + require.NotNil(t, testData.sut) + _, err := NewBlockNotifierPolling(testData.ethClientMock, ConfigBlockNotifierPolling{ + BlockFinalityType: etherman.BlockNumberFinality("invalid"), + }, log.WithFields("test", "test"), nil) + require.Error(t, err) +} + +func TestBlockNotifierPollingString(t *testing.T) { + testData := newBlockNotifierPollingTestData(t, nil) + require.NotEmpty(t, testData.sut.String()) + testData.sut.lastStatus = &blockNotifierPollingInternalStatus{ + lastBlockSeen: 100, + } + require.NotEmpty(t, testData.sut.String()) +} + +func TestBlockNotifierPollingStart(t *testing.T) { + testData := newBlockNotifierPollingTestData(t, nil) + ch := testData.sut.Subscribe("test") + hdr1 := &types.Header{ + Number: big.NewInt(100), + } + testData.ethClientMock.EXPECT().HeaderByNumber(mock.Anything, mock.Anything).Return(hdr1, nil).Once() + hdr2 := &types.Header{ + Number: big.NewInt(101), + } + testData.ethClientMock.EXPECT().HeaderByNumber(mock.Anything, mock.Anything).Return(hdr2, nil).Once() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go testData.sut.Start(ctx) + block := <-ch + require.NotNil(t, block) + require.Equal(t, uint64(101), block.BlockNumber) +} + +type blockNotifierPollingTestData struct { + sut *BlockNotifierPolling + ethClientMock *mocks.EthClient + ctx context.Context +} + +func newBlockNotifierPollingTestData(t *testing.T, config *ConfigBlockNotifierPolling) blockNotifierPollingTestData { + t.Helper() + if config == nil { + config = &ConfigBlockNotifierPolling{ + BlockFinalityType: etherman.LatestBlock, + CheckNewBlockInterval: time.Second, + } + } + EthClientMock := mocks.NewEthClient(t) + logger := log.WithFields("test", "BlockNotifierPolling") + sut, err := NewBlockNotifierPolling(EthClientMock, *config, logger, nil) + require.NoError(t, err) + return blockNotifierPollingTestData{ + sut: sut, + ethClientMock: EthClientMock, + ctx: context.TODO(), + } +} diff --git a/aggsender/config.go b/aggsender/config.go index 4ff78f96..8ae0b759 100644 --- a/aggsender/config.go +++ b/aggsender/config.go @@ -1,6 +1,8 @@ package aggsender import ( + "fmt" + "github.com/0xPolygon/cdk/config/types" ) @@ -18,6 +20,13 @@ type Config struct { AggsenderPrivateKey types.KeystoreFileConfig `mapstructure:"AggsenderPrivateKey"` // URLRPCL2 is the URL of the L2 RPC node URLRPCL2 string `mapstructure:"URLRPCL2"` + // BlockFinality indicates which finality follows AggLayer + BlockFinality string `jsonschema:"enum=LatestBlock, enum=SafeBlock, enum=PendingBlock, enum=FinalizedBlock, enum=EarliestBlock" mapstructure:"BlockFinality"` //nolint:lll + // EpochNotificationPercentage indicates the percentage of the epoch + // the AggSender should send the certificate + // 0 -> Begin + // 50 -> Middle + EpochNotificationPercentage uint `mapstructure:"EpochNotificationPercentage"` // SaveCertificatesToFilesPath if != "" tells the AggSender to save the certificates to a file in this path SaveCertificatesToFilesPath string `mapstructure:"SaveCertificatesToFilesPath"` } @@ -31,5 +40,7 @@ func (c Config) String() string { "AggsenderPrivateKeyPath: " + c.AggsenderPrivateKey.Path + "\n" + "AggsenderPrivateKeyPassword: " + c.AggsenderPrivateKey.Password + "\n" + "URLRPCL2: " + c.URLRPCL2 + "\n" + + "BlockFinality: " + c.BlockFinality + "\n" + + "EpochNotificationPercentage: " + fmt.Sprintf("%d", c.EpochNotificationPercentage) + "\n" + "SaveCertificatesToFilesPath: " + c.SaveCertificatesToFilesPath + "\n" } diff --git a/aggsender/epoch_notifier_per_block.go b/aggsender/epoch_notifier_per_block.go new file mode 100644 index 00000000..3b560731 --- /dev/null +++ b/aggsender/epoch_notifier_per_block.go @@ -0,0 +1,204 @@ +package aggsender + +import ( + "context" + "fmt" + + "github.com/0xPolygon/cdk/agglayer" + "github.com/0xPolygon/cdk/aggsender/types" +) + +const ( + maxPercent = 100.0 +) + +type ExtraInfoEventEpoch struct { + PendingBlocks int +} + +func (e *ExtraInfoEventEpoch) String() string { + return fmt.Sprintf("ExtraInfoEventEpoch: pendingBlocks=%d", e.PendingBlocks) +} + +type ConfigEpochNotifierPerBlock struct { + StartingEpochBlock uint64 + NumBlockPerEpoch uint + + // EpochNotificationPercentage + // 0 -> begin new Epoch + // 50 -> middle of epoch + // 100 -> end of epoch (same as 0) + EpochNotificationPercentage uint +} + +func NewConfigEpochNotifierPerBlock(aggLayer agglayer.AggLayerClientGetEpochConfiguration, + epochNotificationPercentage uint) (*ConfigEpochNotifierPerBlock, error) { + if aggLayer == nil { + return nil, fmt.Errorf("newConfigEpochNotifierPerBlock: aggLayerClient is required") + } + clockConfig, err := aggLayer.GetEpochConfiguration() + if err != nil { + return nil, fmt.Errorf("newConfigEpochNotifierPerBlock: error getting clock configuration from AggLayer: %w", err) + } + return &ConfigEpochNotifierPerBlock{ + StartingEpochBlock: clockConfig.GenesisBlock, + NumBlockPerEpoch: uint(clockConfig.EpochDuration), + EpochNotificationPercentage: epochNotificationPercentage, + }, nil +} + +func (c *ConfigEpochNotifierPerBlock) Validate() error { + if c.NumBlockPerEpoch == 0 { + return fmt.Errorf("numBlockPerEpoch: num block per epoch is required > 0 ") + } + if c.EpochNotificationPercentage >= maxPercent { + return fmt.Errorf("epochNotificationPercentage: must be between 0 and 99") + } + return nil +} + +type EpochNotifierPerBlock struct { + blockNotifier types.BlockNotifier + logger types.Logger + + lastStartingEpochBlock uint64 + + Config ConfigEpochNotifierPerBlock + types.GenericSubscriber[types.EpochEvent] +} + +func NewEpochNotifierPerBlock(blockNotifier types.BlockNotifier, + logger types.Logger, + config ConfigEpochNotifierPerBlock, + subscriber types.GenericSubscriber[types.EpochEvent]) (*EpochNotifierPerBlock, error) { + if subscriber == nil { + subscriber = NewGenericSubscriberImpl[types.EpochEvent]() + } + + err := config.Validate() + if err != nil { + return nil, fmt.Errorf("invalid config: %w", err) + } + return &EpochNotifierPerBlock{ + blockNotifier: blockNotifier, + logger: logger, + lastStartingEpochBlock: config.StartingEpochBlock, + Config: config, + GenericSubscriber: subscriber, + }, nil +} + +func (e *EpochNotifierPerBlock) String() string { + return fmt.Sprintf("EpochNotifierPerBlock: startingEpochBlock=%d, numBlockPerEpoch=%d,"+ + " EpochNotificationPercentage=%d", + e.Config.StartingEpochBlock, e.Config.NumBlockPerEpoch, e.Config.EpochNotificationPercentage) +} + +// StartAsync starts the notifier in a goroutine +func (e *EpochNotifierPerBlock) StartAsync(ctx context.Context) { + eventNewBlockChannel := e.blockNotifier.Subscribe("EpochNotifierPerBlock") + go e.startInternal(ctx, eventNewBlockChannel) +} + +// Start starts the notifier synchronously +func (e *EpochNotifierPerBlock) Start(ctx context.Context) { + eventNewBlockChannel := e.blockNotifier.Subscribe("EpochNotifierPerBlock") + e.startInternal(ctx, eventNewBlockChannel) +} + +func (e *EpochNotifierPerBlock) startInternal(ctx context.Context, eventNewBlockChannel <-chan types.EventNewBlock) { + status := internalStatus{ + lastBlockSeen: e.Config.StartingEpochBlock, + waitingForEpoch: e.epochNumber(e.Config.StartingEpochBlock), + } + for { + select { + case <-ctx.Done(): + return + case newBlock := <-eventNewBlockChannel: + var event *types.EpochEvent + status, event = e.step(status, newBlock) + if event != nil { + e.logger.Debugf("new Epoch Event: %s", event.String()) + e.GenericSubscriber.Publish(*event) + } + } + } +} + +type internalStatus struct { + lastBlockSeen uint64 + waitingForEpoch uint64 +} + +func (e *EpochNotifierPerBlock) step(status internalStatus, + newBlock types.EventNewBlock) (internalStatus, *types.EpochEvent) { + currentBlock := newBlock.BlockNumber + if currentBlock < e.Config.StartingEpochBlock { + // This is a bit strange, the first epoch is in the future + e.logger.Warnf("Block number %d is before the starting first epoch block %d."+ + " Please check your config", currentBlock, e.Config.StartingEpochBlock) + return status, nil + } + // No new block + if currentBlock <= status.lastBlockSeen { + return status, nil + } + status.lastBlockSeen = currentBlock + + needNotify, closingEpoch := e.isNotificationRequired(currentBlock, status.waitingForEpoch) + if needNotify { + // Notify the epoch has started + info := e.infoEpoch(currentBlock, closingEpoch) + status.waitingForEpoch = closingEpoch + 1 + return status, &types.EpochEvent{ + Epoch: closingEpoch, + ExtraInfo: info, + } + } + return status, nil +} + +func (e *EpochNotifierPerBlock) infoEpoch(currentBlock, newEpochNotified uint64) *ExtraInfoEventEpoch { + nextBlockStartingEpoch := e.endBlockEpoch(newEpochNotified) + return &ExtraInfoEventEpoch{ + PendingBlocks: int(nextBlockStartingEpoch - currentBlock), + } +} +func (e *EpochNotifierPerBlock) percentEpoch(currentBlock uint64) float64 { + epoch := e.epochNumber(currentBlock) + startingBlock := e.startingBlockEpoch(epoch) + elapsedBlocks := currentBlock - startingBlock + return float64(elapsedBlocks) / float64(e.Config.NumBlockPerEpoch) +} +func (e *EpochNotifierPerBlock) isNotificationRequired(currentBlock, lastEpochNotified uint64) (bool, uint64) { + percentEpoch := e.percentEpoch(currentBlock) + thresholdPercent := float64(e.Config.EpochNotificationPercentage) / maxPercent + maxTresholdPercent := float64(e.Config.NumBlockPerEpoch-1) / float64(e.Config.NumBlockPerEpoch) + if thresholdPercent > maxTresholdPercent { + thresholdPercent = maxTresholdPercent + } + if percentEpoch < thresholdPercent { + e.logger.Debugf("Block %d is at %f%% of the epoch no notify", currentBlock, percentEpoch*maxPercent) + return false, e.epochNumber(currentBlock) + } + nextEpoch := e.epochNumber(currentBlock) + 1 + return nextEpoch > lastEpochNotified, e.epochNumber(currentBlock) +} + +func (e *EpochNotifierPerBlock) startingBlockEpoch(epoch uint64) uint64 { + if epoch == 0 { + return e.Config.StartingEpochBlock - 1 + } + return e.Config.StartingEpochBlock + ((epoch - 1) * uint64(e.Config.NumBlockPerEpoch)) +} + +func (e *EpochNotifierPerBlock) endBlockEpoch(epoch uint64) uint64 { + return e.startingBlockEpoch(epoch + 1) +} +func (e *EpochNotifierPerBlock) epochNumber(currentBlock uint64) uint64 { + if currentBlock < e.Config.StartingEpochBlock { + return 0 + } + return 1 + ((currentBlock - e.Config.StartingEpochBlock) / uint64(e.Config.NumBlockPerEpoch)) +} diff --git a/aggsender/epoch_notifier_per_block_test.go b/aggsender/epoch_notifier_per_block_test.go new file mode 100644 index 00000000..203116d0 --- /dev/null +++ b/aggsender/epoch_notifier_per_block_test.go @@ -0,0 +1,219 @@ +package aggsender + +import ( + "context" + "fmt" + "testing" + + "github.com/0xPolygon/cdk/agglayer" + "github.com/0xPolygon/cdk/aggsender/mocks" + "github.com/0xPolygon/cdk/aggsender/types" + "github.com/0xPolygon/cdk/etherman" + "github.com/0xPolygon/cdk/log" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestStartingBlockEpoch(t *testing.T) { + testData := newNotifierPerBlockTestData(t, &ConfigEpochNotifierPerBlock{ + StartingEpochBlock: 9, + NumBlockPerEpoch: 10, + EpochNotificationPercentage: 80, + }) + // EPOCH: ---0 ----+----1 -----+----2 ----+----3 ----+----4 ----+----5 ----+---- + // BLOCK: 9 19 29 39 49 + require.Equal(t, uint64(8), testData.sut.startingBlockEpoch(0)) + require.Equal(t, uint64(9), testData.sut.startingBlockEpoch(1)) + require.Equal(t, uint64(19), testData.sut.startingBlockEpoch(2)) +} + +func TestEpochNotifyPercentageEdgeCase0(t *testing.T) { + testData := newNotifierPerBlockTestData(t, nil) + testData.sut.Config.EpochNotificationPercentage = 0 + notify, epoch := testData.sut.isNotificationRequired(9, 0) + require.True(t, notify) + require.Equal(t, uint64(1), epoch) +} + +// if percent is 99 means at end of epoch, so in a config 0, epoch-size=10, +// 99% means last block of epoch +func TestEpochNotifyPercentageEdgeCase99(t *testing.T) { + testData := newNotifierPerBlockTestData(t, nil) + testData.sut.Config.EpochNotificationPercentage = 99 + notify, epoch := testData.sut.isNotificationRequired(9, 0) + require.True(t, notify) + require.Equal(t, uint64(1), epoch) +} + +func TestEpochStep(t *testing.T) { + testData := newNotifierPerBlockTestData(t, &ConfigEpochNotifierPerBlock{ + StartingEpochBlock: 9, + NumBlockPerEpoch: 10, + EpochNotificationPercentage: 50, + }) + // EPOCH: ---0 ----+----1 -----+----2 ----+----3 ----+----4 ----+----5 ----+---- + // BLOCK: 9 19 29 39 49 + // start EPOCH#1 -> 9 + // end EPOCH#1 -> 19 + // start EPOCH#2 -> 19 + + tests := []struct { + name string + initialStatus internalStatus + blockNumber uint64 + expectedEvent bool + expectedEventEpoch uint64 + expectedEventPendingBlocks int + }{ + { + name: "First block of epoch, no notification until close to end", + initialStatus: internalStatus{lastBlockSeen: 8, waitingForEpoch: 0}, + blockNumber: 9, + expectedEvent: false, + expectedEventEpoch: 1, + expectedEventPendingBlocks: 0, + }, + { + name: "epoch#1 close to end, notify it!", + initialStatus: internalStatus{lastBlockSeen: 17, waitingForEpoch: 0}, + blockNumber: 18, + expectedEvent: true, + expectedEventEpoch: 1, // Finishing epoch 0 + expectedEventPendingBlocks: 1, // 19 - 18 + }, + { + name: "epoch#1 close to end, but already notified", + initialStatus: internalStatus{lastBlockSeen: 17, waitingForEpoch: 2}, + blockNumber: 18, + expectedEvent: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, event := testData.sut.step(tt.initialStatus, types.EventNewBlock{BlockNumber: tt.blockNumber, BlockFinalityType: etherman.LatestBlock}) + require.Equal(t, tt.expectedEvent, event != nil) + if event != nil { + require.Equal(t, tt.expectedEventEpoch, event.Epoch, "Epoch") + extraInfo, ok := event.ExtraInfo.(*ExtraInfoEventEpoch) + require.True(t, ok, "ExtraInfo") + require.Equal(t, tt.expectedEventPendingBlocks, extraInfo.PendingBlocks, "PendingBlocks") + } + }) + } +} + +func TestNewConfigEpochNotifierPerBlock(t *testing.T) { + _, err := NewConfigEpochNotifierPerBlock(nil, 1) + require.Error(t, err) + aggLayerMock := agglayer.NewAgglayerClientMock(t) + aggLayerMock.On("GetEpochConfiguration").Return(nil, fmt.Errorf("error")).Once() + _, err = NewConfigEpochNotifierPerBlock(aggLayerMock, 1) + require.Error(t, err) + cfgAggLayer := &agglayer.ClockConfiguration{ + GenesisBlock: 123, + EpochDuration: 456, + } + aggLayerMock.On("GetEpochConfiguration").Return(cfgAggLayer, nil).Once() + cfg, err := NewConfigEpochNotifierPerBlock(aggLayerMock, 1) + require.NoError(t, err) + require.Equal(t, uint64(123), cfg.StartingEpochBlock) + require.Equal(t, uint(456), cfg.NumBlockPerEpoch) +} + +func TestNotifyEpoch(t *testing.T) { + testData := newNotifierPerBlockTestData(t, nil) + ch := testData.sut.Subscribe("test") + chBlocks := make(chan types.EventNewBlock) + testData.blockNotifierMock.EXPECT().Subscribe(mock.Anything).Return(chBlocks) + testData.sut.StartAsync(testData.ctx) + chBlocks <- types.EventNewBlock{BlockNumber: 109, BlockFinalityType: etherman.LatestBlock} + epochEvent := <-ch + require.Equal(t, uint64(11), epochEvent.Epoch) + testData.ctx.Done() +} + +func TestStepSameEpoch(t *testing.T) { + testData := newNotifierPerBlockTestData(t, nil) + status := internalStatus{ + lastBlockSeen: 100, + waitingForEpoch: testData.sut.epochNumber(100), + } + newStatus, _ := testData.sut.step(status, types.EventNewBlock{BlockNumber: 103, BlockFinalityType: etherman.LatestBlock}) + require.Equal(t, uint64(103), newStatus.lastBlockSeen) + require.Equal(t, status.waitingForEpoch, newStatus.waitingForEpoch) +} + +func TestStepNotifyEpoch(t *testing.T) { + testData := newNotifierPerBlockTestData(t, nil) + status := internalStatus{ + lastBlockSeen: 100, + waitingForEpoch: testData.sut.epochNumber(100), + } + status, _ = testData.sut.step(status, types.EventNewBlock{BlockNumber: 109, BlockFinalityType: etherman.LatestBlock}) + require.Equal(t, uint64(109), status.lastBlockSeen) + require.Equal(t, uint64(12), status.waitingForEpoch) +} + +func TestBlockEpochNumber(t *testing.T) { + testData := newNotifierPerBlockTestData(t, &ConfigEpochNotifierPerBlock{ + StartingEpochBlock: 105, + NumBlockPerEpoch: 10, + EpochNotificationPercentage: 1, + }) + require.Equal(t, uint64(0), testData.sut.epochNumber(0)) + require.Equal(t, uint64(0), testData.sut.epochNumber(104)) + require.Equal(t, uint64(1), testData.sut.epochNumber(105)) + require.Equal(t, uint64(1), testData.sut.epochNumber(114)) + require.Equal(t, uint64(2), testData.sut.epochNumber(115)) + require.Equal(t, uint64(2), testData.sut.epochNumber(116)) + require.Equal(t, uint64(2), testData.sut.epochNumber(124)) + require.Equal(t, uint64(3), testData.sut.epochNumber(125)) +} + +func TestBlockBeforeEpoch(t *testing.T) { + testData := newNotifierPerBlockTestData(t, &ConfigEpochNotifierPerBlock{ + StartingEpochBlock: 105, + NumBlockPerEpoch: 10, + EpochNotificationPercentage: 1, + }) + status := internalStatus{ + lastBlockSeen: 104, + waitingForEpoch: testData.sut.epochNumber(104), + } + newStatus, _ := testData.sut.step(status, types.EventNewBlock{BlockNumber: 104, BlockFinalityType: etherman.LatestBlock}) + // We are previous block of first epoch, so we should do nothing + require.Equal(t, status, newStatus) + status = newStatus + // First block of first epoch + newStatus, _ = testData.sut.step(status, types.EventNewBlock{BlockNumber: 105, BlockFinalityType: etherman.LatestBlock}) + require.Equal(t, uint64(105), newStatus.lastBlockSeen) + // Near end first epoch + newStatus, _ = testData.sut.step(status, types.EventNewBlock{BlockNumber: 114, BlockFinalityType: etherman.LatestBlock}) + require.Equal(t, uint64(114), newStatus.lastBlockSeen) +} + +type notifierPerBlockTestData struct { + sut *EpochNotifierPerBlock + blockNotifierMock *mocks.BlockNotifier + ctx context.Context +} + +func newNotifierPerBlockTestData(t *testing.T, config *ConfigEpochNotifierPerBlock) notifierPerBlockTestData { + t.Helper() + if config == nil { + config = &ConfigEpochNotifierPerBlock{ + StartingEpochBlock: 0, + NumBlockPerEpoch: 10, + EpochNotificationPercentage: 50, + } + } + blockNotifierMock := mocks.NewBlockNotifier(t) + logger := log.WithFields("test", "EpochNotifierPerBlock") + sut, err := NewEpochNotifierPerBlock(blockNotifierMock, logger, *config, nil) + require.NoError(t, err) + return notifierPerBlockTestData{ + sut: sut, + blockNotifierMock: blockNotifierMock, + ctx: context.TODO(), + } +} diff --git a/aggsender/generic_subscriber_impl.go b/aggsender/generic_subscriber_impl.go new file mode 100644 index 00000000..e4251449 --- /dev/null +++ b/aggsender/generic_subscriber_impl.go @@ -0,0 +1,33 @@ +package aggsender + +import "sync" + +type GenericSubscriberImpl[T any] struct { + // map of subscribers with names + subs map[chan T]string + mu sync.RWMutex +} + +func NewGenericSubscriberImpl[T any]() *GenericSubscriberImpl[T] { + return &GenericSubscriberImpl[T]{ + subs: make(map[chan T]string), + } +} + +func (g *GenericSubscriberImpl[T]) Subscribe(subscriberName string) <-chan T { + ch := make(chan T) + g.mu.Lock() + defer g.mu.Unlock() + g.subs[ch] = subscriberName + return ch +} + +func (g *GenericSubscriberImpl[T]) Publish(data T) { + g.mu.RLock() + defer g.mu.RUnlock() + for ch := range g.subs { + go func(ch chan T) { + ch <- data + }(ch) + } +} diff --git a/aggsender/mocks/agg_sender_storage.go b/aggsender/mocks/agg_sender_storage.go new file mode 100644 index 00000000..1816d4a3 --- /dev/null +++ b/aggsender/mocks/agg_sender_storage.go @@ -0,0 +1,351 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + agglayer "github.com/0xPolygon/cdk/agglayer" + common "github.com/ethereum/go-ethereum/common" + + context "context" + + mock "github.com/stretchr/testify/mock" + + types "github.com/0xPolygon/cdk/aggsender/types" +) + +// AggSenderStorage is an autogenerated mock type for the AggSenderStorage type +type AggSenderStorage struct { + mock.Mock +} + +type AggSenderStorage_Expecter struct { + mock *mock.Mock +} + +func (_m *AggSenderStorage) EXPECT() *AggSenderStorage_Expecter { + return &AggSenderStorage_Expecter{mock: &_m.Mock} +} + +// DeleteCertificate provides a mock function with given fields: ctx, certificateID +func (_m *AggSenderStorage) DeleteCertificate(ctx context.Context, certificateID common.Hash) error { + ret := _m.Called(ctx, certificateID) + + if len(ret) == 0 { + panic("no return value specified for DeleteCertificate") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) error); ok { + r0 = rf(ctx, certificateID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// AggSenderStorage_DeleteCertificate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteCertificate' +type AggSenderStorage_DeleteCertificate_Call struct { + *mock.Call +} + +// DeleteCertificate is a helper method to define mock.On call +// - ctx context.Context +// - certificateID common.Hash +func (_e *AggSenderStorage_Expecter) DeleteCertificate(ctx interface{}, certificateID interface{}) *AggSenderStorage_DeleteCertificate_Call { + return &AggSenderStorage_DeleteCertificate_Call{Call: _e.mock.On("DeleteCertificate", ctx, certificateID)} +} + +func (_c *AggSenderStorage_DeleteCertificate_Call) Run(run func(ctx context.Context, certificateID common.Hash)) *AggSenderStorage_DeleteCertificate_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Hash)) + }) + return _c +} + +func (_c *AggSenderStorage_DeleteCertificate_Call) Return(_a0 error) *AggSenderStorage_DeleteCertificate_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *AggSenderStorage_DeleteCertificate_Call) RunAndReturn(run func(context.Context, common.Hash) error) *AggSenderStorage_DeleteCertificate_Call { + _c.Call.Return(run) + return _c +} + +// GetCertificateByHeight provides a mock function with given fields: height +func (_m *AggSenderStorage) GetCertificateByHeight(height uint64) (types.CertificateInfo, error) { + ret := _m.Called(height) + + if len(ret) == 0 { + panic("no return value specified for GetCertificateByHeight") + } + + var r0 types.CertificateInfo + var r1 error + if rf, ok := ret.Get(0).(func(uint64) (types.CertificateInfo, error)); ok { + return rf(height) + } + if rf, ok := ret.Get(0).(func(uint64) types.CertificateInfo); ok { + r0 = rf(height) + } else { + r0 = ret.Get(0).(types.CertificateInfo) + } + + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// AggSenderStorage_GetCertificateByHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCertificateByHeight' +type AggSenderStorage_GetCertificateByHeight_Call struct { + *mock.Call +} + +// GetCertificateByHeight is a helper method to define mock.On call +// - height uint64 +func (_e *AggSenderStorage_Expecter) GetCertificateByHeight(height interface{}) *AggSenderStorage_GetCertificateByHeight_Call { + return &AggSenderStorage_GetCertificateByHeight_Call{Call: _e.mock.On("GetCertificateByHeight", height)} +} + +func (_c *AggSenderStorage_GetCertificateByHeight_Call) Run(run func(height uint64)) *AggSenderStorage_GetCertificateByHeight_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(uint64)) + }) + return _c +} + +func (_c *AggSenderStorage_GetCertificateByHeight_Call) Return(_a0 types.CertificateInfo, _a1 error) *AggSenderStorage_GetCertificateByHeight_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *AggSenderStorage_GetCertificateByHeight_Call) RunAndReturn(run func(uint64) (types.CertificateInfo, error)) *AggSenderStorage_GetCertificateByHeight_Call { + _c.Call.Return(run) + return _c +} + +// GetCertificatesByStatus provides a mock function with given fields: status +func (_m *AggSenderStorage) GetCertificatesByStatus(status []agglayer.CertificateStatus) ([]*types.CertificateInfo, error) { + ret := _m.Called(status) + + if len(ret) == 0 { + panic("no return value specified for GetCertificatesByStatus") + } + + var r0 []*types.CertificateInfo + var r1 error + if rf, ok := ret.Get(0).(func([]agglayer.CertificateStatus) ([]*types.CertificateInfo, error)); ok { + return rf(status) + } + if rf, ok := ret.Get(0).(func([]agglayer.CertificateStatus) []*types.CertificateInfo); ok { + r0 = rf(status) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*types.CertificateInfo) + } + } + + if rf, ok := ret.Get(1).(func([]agglayer.CertificateStatus) error); ok { + r1 = rf(status) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// AggSenderStorage_GetCertificatesByStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCertificatesByStatus' +type AggSenderStorage_GetCertificatesByStatus_Call struct { + *mock.Call +} + +// GetCertificatesByStatus is a helper method to define mock.On call +// - status []agglayer.CertificateStatus +func (_e *AggSenderStorage_Expecter) GetCertificatesByStatus(status interface{}) *AggSenderStorage_GetCertificatesByStatus_Call { + return &AggSenderStorage_GetCertificatesByStatus_Call{Call: _e.mock.On("GetCertificatesByStatus", status)} +} + +func (_c *AggSenderStorage_GetCertificatesByStatus_Call) Run(run func(status []agglayer.CertificateStatus)) *AggSenderStorage_GetCertificatesByStatus_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].([]agglayer.CertificateStatus)) + }) + return _c +} + +func (_c *AggSenderStorage_GetCertificatesByStatus_Call) Return(_a0 []*types.CertificateInfo, _a1 error) *AggSenderStorage_GetCertificatesByStatus_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *AggSenderStorage_GetCertificatesByStatus_Call) RunAndReturn(run func([]agglayer.CertificateStatus) ([]*types.CertificateInfo, error)) *AggSenderStorage_GetCertificatesByStatus_Call { + _c.Call.Return(run) + return _c +} + +// GetLastSentCertificate provides a mock function with given fields: +func (_m *AggSenderStorage) GetLastSentCertificate() (types.CertificateInfo, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetLastSentCertificate") + } + + var r0 types.CertificateInfo + var r1 error + if rf, ok := ret.Get(0).(func() (types.CertificateInfo, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() types.CertificateInfo); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(types.CertificateInfo) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// AggSenderStorage_GetLastSentCertificate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLastSentCertificate' +type AggSenderStorage_GetLastSentCertificate_Call struct { + *mock.Call +} + +// GetLastSentCertificate is a helper method to define mock.On call +func (_e *AggSenderStorage_Expecter) GetLastSentCertificate() *AggSenderStorage_GetLastSentCertificate_Call { + return &AggSenderStorage_GetLastSentCertificate_Call{Call: _e.mock.On("GetLastSentCertificate")} +} + +func (_c *AggSenderStorage_GetLastSentCertificate_Call) Run(run func()) *AggSenderStorage_GetLastSentCertificate_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *AggSenderStorage_GetLastSentCertificate_Call) Return(_a0 types.CertificateInfo, _a1 error) *AggSenderStorage_GetLastSentCertificate_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *AggSenderStorage_GetLastSentCertificate_Call) RunAndReturn(run func() (types.CertificateInfo, error)) *AggSenderStorage_GetLastSentCertificate_Call { + _c.Call.Return(run) + return _c +} + +// SaveLastSentCertificate provides a mock function with given fields: ctx, certificate +func (_m *AggSenderStorage) SaveLastSentCertificate(ctx context.Context, certificate types.CertificateInfo) error { + ret := _m.Called(ctx, certificate) + + if len(ret) == 0 { + panic("no return value specified for SaveLastSentCertificate") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, types.CertificateInfo) error); ok { + r0 = rf(ctx, certificate) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// AggSenderStorage_SaveLastSentCertificate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SaveLastSentCertificate' +type AggSenderStorage_SaveLastSentCertificate_Call struct { + *mock.Call +} + +// SaveLastSentCertificate is a helper method to define mock.On call +// - ctx context.Context +// - certificate types.CertificateInfo +func (_e *AggSenderStorage_Expecter) SaveLastSentCertificate(ctx interface{}, certificate interface{}) *AggSenderStorage_SaveLastSentCertificate_Call { + return &AggSenderStorage_SaveLastSentCertificate_Call{Call: _e.mock.On("SaveLastSentCertificate", ctx, certificate)} +} + +func (_c *AggSenderStorage_SaveLastSentCertificate_Call) Run(run func(ctx context.Context, certificate types.CertificateInfo)) *AggSenderStorage_SaveLastSentCertificate_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(types.CertificateInfo)) + }) + return _c +} + +func (_c *AggSenderStorage_SaveLastSentCertificate_Call) Return(_a0 error) *AggSenderStorage_SaveLastSentCertificate_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *AggSenderStorage_SaveLastSentCertificate_Call) RunAndReturn(run func(context.Context, types.CertificateInfo) error) *AggSenderStorage_SaveLastSentCertificate_Call { + _c.Call.Return(run) + return _c +} + +// UpdateCertificateStatus provides a mock function with given fields: ctx, certificate +func (_m *AggSenderStorage) UpdateCertificateStatus(ctx context.Context, certificate types.CertificateInfo) error { + ret := _m.Called(ctx, certificate) + + if len(ret) == 0 { + panic("no return value specified for UpdateCertificateStatus") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, types.CertificateInfo) error); ok { + r0 = rf(ctx, certificate) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// AggSenderStorage_UpdateCertificateStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateCertificateStatus' +type AggSenderStorage_UpdateCertificateStatus_Call struct { + *mock.Call +} + +// UpdateCertificateStatus is a helper method to define mock.On call +// - ctx context.Context +// - certificate types.CertificateInfo +func (_e *AggSenderStorage_Expecter) UpdateCertificateStatus(ctx interface{}, certificate interface{}) *AggSenderStorage_UpdateCertificateStatus_Call { + return &AggSenderStorage_UpdateCertificateStatus_Call{Call: _e.mock.On("UpdateCertificateStatus", ctx, certificate)} +} + +func (_c *AggSenderStorage_UpdateCertificateStatus_Call) Run(run func(ctx context.Context, certificate types.CertificateInfo)) *AggSenderStorage_UpdateCertificateStatus_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(types.CertificateInfo)) + }) + return _c +} + +func (_c *AggSenderStorage_UpdateCertificateStatus_Call) Return(_a0 error) *AggSenderStorage_UpdateCertificateStatus_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *AggSenderStorage_UpdateCertificateStatus_Call) RunAndReturn(run func(context.Context, types.CertificateInfo) error) *AggSenderStorage_UpdateCertificateStatus_Call { + _c.Call.Return(run) + return _c +} + +// NewAggSenderStorage creates a new instance of AggSenderStorage. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewAggSenderStorage(t interface { + mock.TestingT + Cleanup(func()) +}) *AggSenderStorage { + mock := &AggSenderStorage{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggsender/mocks/block_notifier.go b/aggsender/mocks/block_notifier.go new file mode 100644 index 00000000..f8fc556d --- /dev/null +++ b/aggsender/mocks/block_notifier.go @@ -0,0 +1,128 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + types "github.com/0xPolygon/cdk/aggsender/types" + mock "github.com/stretchr/testify/mock" +) + +// BlockNotifier is an autogenerated mock type for the BlockNotifier type +type BlockNotifier struct { + mock.Mock +} + +type BlockNotifier_Expecter struct { + mock *mock.Mock +} + +func (_m *BlockNotifier) EXPECT() *BlockNotifier_Expecter { + return &BlockNotifier_Expecter{mock: &_m.Mock} +} + +// String provides a mock function with given fields: +func (_m *BlockNotifier) String() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for String") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// BlockNotifier_String_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'String' +type BlockNotifier_String_Call struct { + *mock.Call +} + +// String is a helper method to define mock.On call +func (_e *BlockNotifier_Expecter) String() *BlockNotifier_String_Call { + return &BlockNotifier_String_Call{Call: _e.mock.On("String")} +} + +func (_c *BlockNotifier_String_Call) Run(run func()) *BlockNotifier_String_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *BlockNotifier_String_Call) Return(_a0 string) *BlockNotifier_String_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *BlockNotifier_String_Call) RunAndReturn(run func() string) *BlockNotifier_String_Call { + _c.Call.Return(run) + return _c +} + +// Subscribe provides a mock function with given fields: id +func (_m *BlockNotifier) Subscribe(id string) <-chan types.EventNewBlock { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for Subscribe") + } + + var r0 <-chan types.EventNewBlock + if rf, ok := ret.Get(0).(func(string) <-chan types.EventNewBlock); ok { + r0 = rf(id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan types.EventNewBlock) + } + } + + return r0 +} + +// BlockNotifier_Subscribe_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Subscribe' +type BlockNotifier_Subscribe_Call struct { + *mock.Call +} + +// Subscribe is a helper method to define mock.On call +// - id string +func (_e *BlockNotifier_Expecter) Subscribe(id interface{}) *BlockNotifier_Subscribe_Call { + return &BlockNotifier_Subscribe_Call{Call: _e.mock.On("Subscribe", id)} +} + +func (_c *BlockNotifier_Subscribe_Call) Run(run func(id string)) *BlockNotifier_Subscribe_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string)) + }) + return _c +} + +func (_c *BlockNotifier_Subscribe_Call) Return(_a0 <-chan types.EventNewBlock) *BlockNotifier_Subscribe_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *BlockNotifier_Subscribe_Call) RunAndReturn(run func(string) <-chan types.EventNewBlock) *BlockNotifier_Subscribe_Call { + _c.Call.Return(run) + return _c +} + +// NewBlockNotifier creates a new instance of BlockNotifier. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewBlockNotifier(t interface { + mock.TestingT + Cleanup(func()) +}) *BlockNotifier { + mock := &BlockNotifier{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggsender/mocks/epoch_notifier.go b/aggsender/mocks/epoch_notifier.go new file mode 100644 index 00000000..fb8bf35f --- /dev/null +++ b/aggsender/mocks/epoch_notifier.go @@ -0,0 +1,163 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + types "github.com/0xPolygon/cdk/aggsender/types" + mock "github.com/stretchr/testify/mock" +) + +// EpochNotifier is an autogenerated mock type for the EpochNotifier type +type EpochNotifier struct { + mock.Mock +} + +type EpochNotifier_Expecter struct { + mock *mock.Mock +} + +func (_m *EpochNotifier) EXPECT() *EpochNotifier_Expecter { + return &EpochNotifier_Expecter{mock: &_m.Mock} +} + +// Start provides a mock function with given fields: ctx +func (_m *EpochNotifier) Start(ctx context.Context) { + _m.Called(ctx) +} + +// EpochNotifier_Start_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Start' +type EpochNotifier_Start_Call struct { + *mock.Call +} + +// Start is a helper method to define mock.On call +// - ctx context.Context +func (_e *EpochNotifier_Expecter) Start(ctx interface{}) *EpochNotifier_Start_Call { + return &EpochNotifier_Start_Call{Call: _e.mock.On("Start", ctx)} +} + +func (_c *EpochNotifier_Start_Call) Run(run func(ctx context.Context)) *EpochNotifier_Start_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *EpochNotifier_Start_Call) Return() *EpochNotifier_Start_Call { + _c.Call.Return() + return _c +} + +func (_c *EpochNotifier_Start_Call) RunAndReturn(run func(context.Context)) *EpochNotifier_Start_Call { + _c.Call.Return(run) + return _c +} + +// String provides a mock function with given fields: +func (_m *EpochNotifier) String() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for String") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// EpochNotifier_String_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'String' +type EpochNotifier_String_Call struct { + *mock.Call +} + +// String is a helper method to define mock.On call +func (_e *EpochNotifier_Expecter) String() *EpochNotifier_String_Call { + return &EpochNotifier_String_Call{Call: _e.mock.On("String")} +} + +func (_c *EpochNotifier_String_Call) Run(run func()) *EpochNotifier_String_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *EpochNotifier_String_Call) Return(_a0 string) *EpochNotifier_String_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *EpochNotifier_String_Call) RunAndReturn(run func() string) *EpochNotifier_String_Call { + _c.Call.Return(run) + return _c +} + +// Subscribe provides a mock function with given fields: id +func (_m *EpochNotifier) Subscribe(id string) <-chan types.EpochEvent { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for Subscribe") + } + + var r0 <-chan types.EpochEvent + if rf, ok := ret.Get(0).(func(string) <-chan types.EpochEvent); ok { + r0 = rf(id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan types.EpochEvent) + } + } + + return r0 +} + +// EpochNotifier_Subscribe_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Subscribe' +type EpochNotifier_Subscribe_Call struct { + *mock.Call +} + +// Subscribe is a helper method to define mock.On call +// - id string +func (_e *EpochNotifier_Expecter) Subscribe(id interface{}) *EpochNotifier_Subscribe_Call { + return &EpochNotifier_Subscribe_Call{Call: _e.mock.On("Subscribe", id)} +} + +func (_c *EpochNotifier_Subscribe_Call) Run(run func(id string)) *EpochNotifier_Subscribe_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string)) + }) + return _c +} + +func (_c *EpochNotifier_Subscribe_Call) Return(_a0 <-chan types.EpochEvent) *EpochNotifier_Subscribe_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *EpochNotifier_Subscribe_Call) RunAndReturn(run func(string) <-chan types.EpochEvent) *EpochNotifier_Subscribe_Call { + _c.Call.Return(run) + return _c +} + +// NewEpochNotifier creates a new instance of EpochNotifier. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEpochNotifier(t interface { + mock.TestingT + Cleanup(func()) +}) *EpochNotifier { + mock := &EpochNotifier{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggsender/mocks/mock_eth_client.go b/aggsender/mocks/eth_client.go similarity index 50% rename from aggsender/mocks/mock_eth_client.go rename to aggsender/mocks/eth_client.go index ebf618bf..6a68de41 100644 --- a/aggsender/mocks/mock_eth_client.go +++ b/aggsender/mocks/eth_client.go @@ -11,21 +11,21 @@ import ( mock "github.com/stretchr/testify/mock" ) -// EthClientMock is an autogenerated mock type for the EthClient type -type EthClientMock struct { +// EthClient is an autogenerated mock type for the EthClient type +type EthClient struct { mock.Mock } -type EthClientMock_Expecter struct { +type EthClient_Expecter struct { mock *mock.Mock } -func (_m *EthClientMock) EXPECT() *EthClientMock_Expecter { - return &EthClientMock_Expecter{mock: &_m.Mock} +func (_m *EthClient) EXPECT() *EthClient_Expecter { + return &EthClient_Expecter{mock: &_m.Mock} } // BlockNumber provides a mock function with given fields: ctx -func (_m *EthClientMock) BlockNumber(ctx context.Context) (uint64, error) { +func (_m *EthClient) BlockNumber(ctx context.Context) (uint64, error) { ret := _m.Called(ctx) if len(ret) == 0 { @@ -52,36 +52,36 @@ func (_m *EthClientMock) BlockNumber(ctx context.Context) (uint64, error) { return r0, r1 } -// EthClientMock_BlockNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockNumber' -type EthClientMock_BlockNumber_Call struct { +// EthClient_BlockNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockNumber' +type EthClient_BlockNumber_Call struct { *mock.Call } // BlockNumber is a helper method to define mock.On call // - ctx context.Context -func (_e *EthClientMock_Expecter) BlockNumber(ctx interface{}) *EthClientMock_BlockNumber_Call { - return &EthClientMock_BlockNumber_Call{Call: _e.mock.On("BlockNumber", ctx)} +func (_e *EthClient_Expecter) BlockNumber(ctx interface{}) *EthClient_BlockNumber_Call { + return &EthClient_BlockNumber_Call{Call: _e.mock.On("BlockNumber", ctx)} } -func (_c *EthClientMock_BlockNumber_Call) Run(run func(ctx context.Context)) *EthClientMock_BlockNumber_Call { +func (_c *EthClient_BlockNumber_Call) Run(run func(ctx context.Context)) *EthClient_BlockNumber_Call { _c.Call.Run(func(args mock.Arguments) { run(args[0].(context.Context)) }) return _c } -func (_c *EthClientMock_BlockNumber_Call) Return(_a0 uint64, _a1 error) *EthClientMock_BlockNumber_Call { +func (_c *EthClient_BlockNumber_Call) Return(_a0 uint64, _a1 error) *EthClient_BlockNumber_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *EthClientMock_BlockNumber_Call) RunAndReturn(run func(context.Context) (uint64, error)) *EthClientMock_BlockNumber_Call { +func (_c *EthClient_BlockNumber_Call) RunAndReturn(run func(context.Context) (uint64, error)) *EthClient_BlockNumber_Call { _c.Call.Return(run) return _c } // HeaderByNumber provides a mock function with given fields: ctx, number -func (_m *EthClientMock) HeaderByNumber(ctx context.Context, number *big.Int) (*coretypes.Header, error) { +func (_m *EthClient) HeaderByNumber(ctx context.Context, number *big.Int) (*coretypes.Header, error) { ret := _m.Called(ctx, number) if len(ret) == 0 { @@ -110,42 +110,42 @@ func (_m *EthClientMock) HeaderByNumber(ctx context.Context, number *big.Int) (* return r0, r1 } -// EthClientMock_HeaderByNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HeaderByNumber' -type EthClientMock_HeaderByNumber_Call struct { +// EthClient_HeaderByNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HeaderByNumber' +type EthClient_HeaderByNumber_Call struct { *mock.Call } // HeaderByNumber is a helper method to define mock.On call // - ctx context.Context // - number *big.Int -func (_e *EthClientMock_Expecter) HeaderByNumber(ctx interface{}, number interface{}) *EthClientMock_HeaderByNumber_Call { - return &EthClientMock_HeaderByNumber_Call{Call: _e.mock.On("HeaderByNumber", ctx, number)} +func (_e *EthClient_Expecter) HeaderByNumber(ctx interface{}, number interface{}) *EthClient_HeaderByNumber_Call { + return &EthClient_HeaderByNumber_Call{Call: _e.mock.On("HeaderByNumber", ctx, number)} } -func (_c *EthClientMock_HeaderByNumber_Call) Run(run func(ctx context.Context, number *big.Int)) *EthClientMock_HeaderByNumber_Call { +func (_c *EthClient_HeaderByNumber_Call) Run(run func(ctx context.Context, number *big.Int)) *EthClient_HeaderByNumber_Call { _c.Call.Run(func(args mock.Arguments) { run(args[0].(context.Context), args[1].(*big.Int)) }) return _c } -func (_c *EthClientMock_HeaderByNumber_Call) Return(_a0 *coretypes.Header, _a1 error) *EthClientMock_HeaderByNumber_Call { +func (_c *EthClient_HeaderByNumber_Call) Return(_a0 *coretypes.Header, _a1 error) *EthClient_HeaderByNumber_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *EthClientMock_HeaderByNumber_Call) RunAndReturn(run func(context.Context, *big.Int) (*coretypes.Header, error)) *EthClientMock_HeaderByNumber_Call { +func (_c *EthClient_HeaderByNumber_Call) RunAndReturn(run func(context.Context, *big.Int) (*coretypes.Header, error)) *EthClient_HeaderByNumber_Call { _c.Call.Return(run) return _c } -// NewEthClientMock creates a new instance of EthClientMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// NewEthClient creates a new instance of EthClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. -func NewEthClientMock(t interface { +func NewEthClient(t interface { mock.TestingT Cleanup(func()) -}) *EthClientMock { - mock := &EthClientMock{} +}) *EthClient { + mock := &EthClient{} mock.Mock.Test(t) t.Cleanup(func() { mock.AssertExpectations(t) }) diff --git a/aggsender/mocks/generic_subscriber.go b/aggsender/mocks/generic_subscriber.go new file mode 100644 index 00000000..b4bee4b4 --- /dev/null +++ b/aggsender/mocks/generic_subscriber.go @@ -0,0 +1,113 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import mock "github.com/stretchr/testify/mock" + +// GenericSubscriber is an autogenerated mock type for the GenericSubscriber type +type GenericSubscriber[T interface{}] struct { + mock.Mock +} + +type GenericSubscriber_Expecter[T interface{}] struct { + mock *mock.Mock +} + +func (_m *GenericSubscriber[T]) EXPECT() *GenericSubscriber_Expecter[T] { + return &GenericSubscriber_Expecter[T]{mock: &_m.Mock} +} + +// Publish provides a mock function with given fields: data +func (_m *GenericSubscriber[T]) Publish(data T) { + _m.Called(data) +} + +// GenericSubscriber_Publish_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Publish' +type GenericSubscriber_Publish_Call[T interface{}] struct { + *mock.Call +} + +// Publish is a helper method to define mock.On call +// - data T +func (_e *GenericSubscriber_Expecter[T]) Publish(data interface{}) *GenericSubscriber_Publish_Call[T] { + return &GenericSubscriber_Publish_Call[T]{Call: _e.mock.On("Publish", data)} +} + +func (_c *GenericSubscriber_Publish_Call[T]) Run(run func(data T)) *GenericSubscriber_Publish_Call[T] { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(T)) + }) + return _c +} + +func (_c *GenericSubscriber_Publish_Call[T]) Return() *GenericSubscriber_Publish_Call[T] { + _c.Call.Return() + return _c +} + +func (_c *GenericSubscriber_Publish_Call[T]) RunAndReturn(run func(T)) *GenericSubscriber_Publish_Call[T] { + _c.Call.Return(run) + return _c +} + +// Subscribe provides a mock function with given fields: subscriberName +func (_m *GenericSubscriber[T]) Subscribe(subscriberName string) <-chan T { + ret := _m.Called(subscriberName) + + if len(ret) == 0 { + panic("no return value specified for Subscribe") + } + + var r0 <-chan T + if rf, ok := ret.Get(0).(func(string) <-chan T); ok { + r0 = rf(subscriberName) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan T) + } + } + + return r0 +} + +// GenericSubscriber_Subscribe_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Subscribe' +type GenericSubscriber_Subscribe_Call[T interface{}] struct { + *mock.Call +} + +// Subscribe is a helper method to define mock.On call +// - subscriberName string +func (_e *GenericSubscriber_Expecter[T]) Subscribe(subscriberName interface{}) *GenericSubscriber_Subscribe_Call[T] { + return &GenericSubscriber_Subscribe_Call[T]{Call: _e.mock.On("Subscribe", subscriberName)} +} + +func (_c *GenericSubscriber_Subscribe_Call[T]) Run(run func(subscriberName string)) *GenericSubscriber_Subscribe_Call[T] { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string)) + }) + return _c +} + +func (_c *GenericSubscriber_Subscribe_Call[T]) Return(_a0 <-chan T) *GenericSubscriber_Subscribe_Call[T] { + _c.Call.Return(_a0) + return _c +} + +func (_c *GenericSubscriber_Subscribe_Call[T]) RunAndReturn(run func(string) <-chan T) *GenericSubscriber_Subscribe_Call[T] { + _c.Call.Return(run) + return _c +} + +// NewGenericSubscriber creates a new instance of GenericSubscriber. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewGenericSubscriber[T interface{}](t interface { + mock.TestingT + Cleanup(func()) +}) *GenericSubscriber[T] { + mock := &GenericSubscriber[T]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggsender/mocks/l1_info_tree_syncer.go b/aggsender/mocks/l1_info_tree_syncer.go new file mode 100644 index 00000000..70ac97de --- /dev/null +++ b/aggsender/mocks/l1_info_tree_syncer.go @@ -0,0 +1,217 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + common "github.com/ethereum/go-ethereum/common" + + l1infotreesync "github.com/0xPolygon/cdk/l1infotreesync" + + mock "github.com/stretchr/testify/mock" + + treetypes "github.com/0xPolygon/cdk/tree/types" +) + +// L1InfoTreeSyncer is an autogenerated mock type for the L1InfoTreeSyncer type +type L1InfoTreeSyncer struct { + mock.Mock +} + +type L1InfoTreeSyncer_Expecter struct { + mock *mock.Mock +} + +func (_m *L1InfoTreeSyncer) EXPECT() *L1InfoTreeSyncer_Expecter { + return &L1InfoTreeSyncer_Expecter{mock: &_m.Mock} +} + +// GetInfoByGlobalExitRoot provides a mock function with given fields: globalExitRoot +func (_m *L1InfoTreeSyncer) GetInfoByGlobalExitRoot(globalExitRoot common.Hash) (*l1infotreesync.L1InfoTreeLeaf, error) { + ret := _m.Called(globalExitRoot) + + if len(ret) == 0 { + panic("no return value specified for GetInfoByGlobalExitRoot") + } + + var r0 *l1infotreesync.L1InfoTreeLeaf + var r1 error + if rf, ok := ret.Get(0).(func(common.Hash) (*l1infotreesync.L1InfoTreeLeaf, error)); ok { + return rf(globalExitRoot) + } + if rf, ok := ret.Get(0).(func(common.Hash) *l1infotreesync.L1InfoTreeLeaf); ok { + r0 = rf(globalExitRoot) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*l1infotreesync.L1InfoTreeLeaf) + } + } + + if rf, ok := ret.Get(1).(func(common.Hash) error); ok { + r1 = rf(globalExitRoot) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L1InfoTreeSyncer_GetInfoByGlobalExitRoot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetInfoByGlobalExitRoot' +type L1InfoTreeSyncer_GetInfoByGlobalExitRoot_Call struct { + *mock.Call +} + +// GetInfoByGlobalExitRoot is a helper method to define mock.On call +// - globalExitRoot common.Hash +func (_e *L1InfoTreeSyncer_Expecter) GetInfoByGlobalExitRoot(globalExitRoot interface{}) *L1InfoTreeSyncer_GetInfoByGlobalExitRoot_Call { + return &L1InfoTreeSyncer_GetInfoByGlobalExitRoot_Call{Call: _e.mock.On("GetInfoByGlobalExitRoot", globalExitRoot)} +} + +func (_c *L1InfoTreeSyncer_GetInfoByGlobalExitRoot_Call) Run(run func(globalExitRoot common.Hash)) *L1InfoTreeSyncer_GetInfoByGlobalExitRoot_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(common.Hash)) + }) + return _c +} + +func (_c *L1InfoTreeSyncer_GetInfoByGlobalExitRoot_Call) Return(_a0 *l1infotreesync.L1InfoTreeLeaf, _a1 error) *L1InfoTreeSyncer_GetInfoByGlobalExitRoot_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L1InfoTreeSyncer_GetInfoByGlobalExitRoot_Call) RunAndReturn(run func(common.Hash) (*l1infotreesync.L1InfoTreeLeaf, error)) *L1InfoTreeSyncer_GetInfoByGlobalExitRoot_Call { + _c.Call.Return(run) + return _c +} + +// GetL1InfoTreeMerkleProofFromIndexToRoot provides a mock function with given fields: ctx, index, root +func (_m *L1InfoTreeSyncer) GetL1InfoTreeMerkleProofFromIndexToRoot(ctx context.Context, index uint32, root common.Hash) (treetypes.Proof, error) { + ret := _m.Called(ctx, index, root) + + if len(ret) == 0 { + panic("no return value specified for GetL1InfoTreeMerkleProofFromIndexToRoot") + } + + var r0 treetypes.Proof + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint32, common.Hash) (treetypes.Proof, error)); ok { + return rf(ctx, index, root) + } + if rf, ok := ret.Get(0).(func(context.Context, uint32, common.Hash) treetypes.Proof); ok { + r0 = rf(ctx, index, root) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(treetypes.Proof) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint32, common.Hash) error); ok { + r1 = rf(ctx, index, root) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L1InfoTreeSyncer_GetL1InfoTreeMerkleProofFromIndexToRoot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetL1InfoTreeMerkleProofFromIndexToRoot' +type L1InfoTreeSyncer_GetL1InfoTreeMerkleProofFromIndexToRoot_Call struct { + *mock.Call +} + +// GetL1InfoTreeMerkleProofFromIndexToRoot is a helper method to define mock.On call +// - ctx context.Context +// - index uint32 +// - root common.Hash +func (_e *L1InfoTreeSyncer_Expecter) GetL1InfoTreeMerkleProofFromIndexToRoot(ctx interface{}, index interface{}, root interface{}) *L1InfoTreeSyncer_GetL1InfoTreeMerkleProofFromIndexToRoot_Call { + return &L1InfoTreeSyncer_GetL1InfoTreeMerkleProofFromIndexToRoot_Call{Call: _e.mock.On("GetL1InfoTreeMerkleProofFromIndexToRoot", ctx, index, root)} +} + +func (_c *L1InfoTreeSyncer_GetL1InfoTreeMerkleProofFromIndexToRoot_Call) Run(run func(ctx context.Context, index uint32, root common.Hash)) *L1InfoTreeSyncer_GetL1InfoTreeMerkleProofFromIndexToRoot_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint32), args[2].(common.Hash)) + }) + return _c +} + +func (_c *L1InfoTreeSyncer_GetL1InfoTreeMerkleProofFromIndexToRoot_Call) Return(_a0 treetypes.Proof, _a1 error) *L1InfoTreeSyncer_GetL1InfoTreeMerkleProofFromIndexToRoot_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L1InfoTreeSyncer_GetL1InfoTreeMerkleProofFromIndexToRoot_Call) RunAndReturn(run func(context.Context, uint32, common.Hash) (treetypes.Proof, error)) *L1InfoTreeSyncer_GetL1InfoTreeMerkleProofFromIndexToRoot_Call { + _c.Call.Return(run) + return _c +} + +// GetL1InfoTreeRootByIndex provides a mock function with given fields: ctx, index +func (_m *L1InfoTreeSyncer) GetL1InfoTreeRootByIndex(ctx context.Context, index uint32) (treetypes.Root, error) { + ret := _m.Called(ctx, index) + + if len(ret) == 0 { + panic("no return value specified for GetL1InfoTreeRootByIndex") + } + + var r0 treetypes.Root + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint32) (treetypes.Root, error)); ok { + return rf(ctx, index) + } + if rf, ok := ret.Get(0).(func(context.Context, uint32) treetypes.Root); ok { + r0 = rf(ctx, index) + } else { + r0 = ret.Get(0).(treetypes.Root) + } + + if rf, ok := ret.Get(1).(func(context.Context, uint32) error); ok { + r1 = rf(ctx, index) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L1InfoTreeSyncer_GetL1InfoTreeRootByIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetL1InfoTreeRootByIndex' +type L1InfoTreeSyncer_GetL1InfoTreeRootByIndex_Call struct { + *mock.Call +} + +// GetL1InfoTreeRootByIndex is a helper method to define mock.On call +// - ctx context.Context +// - index uint32 +func (_e *L1InfoTreeSyncer_Expecter) GetL1InfoTreeRootByIndex(ctx interface{}, index interface{}) *L1InfoTreeSyncer_GetL1InfoTreeRootByIndex_Call { + return &L1InfoTreeSyncer_GetL1InfoTreeRootByIndex_Call{Call: _e.mock.On("GetL1InfoTreeRootByIndex", ctx, index)} +} + +func (_c *L1InfoTreeSyncer_GetL1InfoTreeRootByIndex_Call) Run(run func(ctx context.Context, index uint32)) *L1InfoTreeSyncer_GetL1InfoTreeRootByIndex_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint32)) + }) + return _c +} + +func (_c *L1InfoTreeSyncer_GetL1InfoTreeRootByIndex_Call) Return(_a0 treetypes.Root, _a1 error) *L1InfoTreeSyncer_GetL1InfoTreeRootByIndex_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L1InfoTreeSyncer_GetL1InfoTreeRootByIndex_Call) RunAndReturn(run func(context.Context, uint32) (treetypes.Root, error)) *L1InfoTreeSyncer_GetL1InfoTreeRootByIndex_Call { + _c.Call.Return(run) + return _c +} + +// NewL1InfoTreeSyncer creates a new instance of L1InfoTreeSyncer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewL1InfoTreeSyncer(t interface { + mock.TestingT + Cleanup(func()) +}) *L1InfoTreeSyncer { + mock := &L1InfoTreeSyncer{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggsender/mocks/l2_bridge_syncer.go b/aggsender/mocks/l2_bridge_syncer.go new file mode 100644 index 00000000..800007ff --- /dev/null +++ b/aggsender/mocks/l2_bridge_syncer.go @@ -0,0 +1,423 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + bridgesync "github.com/0xPolygon/cdk/bridgesync" + common "github.com/ethereum/go-ethereum/common" + + context "context" + + etherman "github.com/0xPolygon/cdk/etherman" + + mock "github.com/stretchr/testify/mock" + + treetypes "github.com/0xPolygon/cdk/tree/types" +) + +// L2BridgeSyncer is an autogenerated mock type for the L2BridgeSyncer type +type L2BridgeSyncer struct { + mock.Mock +} + +type L2BridgeSyncer_Expecter struct { + mock *mock.Mock +} + +func (_m *L2BridgeSyncer) EXPECT() *L2BridgeSyncer_Expecter { + return &L2BridgeSyncer_Expecter{mock: &_m.Mock} +} + +// BlockFinality provides a mock function with given fields: +func (_m *L2BridgeSyncer) BlockFinality() etherman.BlockNumberFinality { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for BlockFinality") + } + + var r0 etherman.BlockNumberFinality + if rf, ok := ret.Get(0).(func() etherman.BlockNumberFinality); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(etherman.BlockNumberFinality) + } + + return r0 +} + +// L2BridgeSyncer_BlockFinality_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockFinality' +type L2BridgeSyncer_BlockFinality_Call struct { + *mock.Call +} + +// BlockFinality is a helper method to define mock.On call +func (_e *L2BridgeSyncer_Expecter) BlockFinality() *L2BridgeSyncer_BlockFinality_Call { + return &L2BridgeSyncer_BlockFinality_Call{Call: _e.mock.On("BlockFinality")} +} + +func (_c *L2BridgeSyncer_BlockFinality_Call) Run(run func()) *L2BridgeSyncer_BlockFinality_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *L2BridgeSyncer_BlockFinality_Call) Return(_a0 etherman.BlockNumberFinality) *L2BridgeSyncer_BlockFinality_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *L2BridgeSyncer_BlockFinality_Call) RunAndReturn(run func() etherman.BlockNumberFinality) *L2BridgeSyncer_BlockFinality_Call { + _c.Call.Return(run) + return _c +} + +// GetBlockByLER provides a mock function with given fields: ctx, ler +func (_m *L2BridgeSyncer) GetBlockByLER(ctx context.Context, ler common.Hash) (uint64, error) { + ret := _m.Called(ctx, ler) + + if len(ret) == 0 { + panic("no return value specified for GetBlockByLER") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (uint64, error)); ok { + return rf(ctx, ler) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) uint64); ok { + r0 = rf(ctx, ler) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(ctx, ler) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L2BridgeSyncer_GetBlockByLER_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBlockByLER' +type L2BridgeSyncer_GetBlockByLER_Call struct { + *mock.Call +} + +// GetBlockByLER is a helper method to define mock.On call +// - ctx context.Context +// - ler common.Hash +func (_e *L2BridgeSyncer_Expecter) GetBlockByLER(ctx interface{}, ler interface{}) *L2BridgeSyncer_GetBlockByLER_Call { + return &L2BridgeSyncer_GetBlockByLER_Call{Call: _e.mock.On("GetBlockByLER", ctx, ler)} +} + +func (_c *L2BridgeSyncer_GetBlockByLER_Call) Run(run func(ctx context.Context, ler common.Hash)) *L2BridgeSyncer_GetBlockByLER_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Hash)) + }) + return _c +} + +func (_c *L2BridgeSyncer_GetBlockByLER_Call) Return(_a0 uint64, _a1 error) *L2BridgeSyncer_GetBlockByLER_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L2BridgeSyncer_GetBlockByLER_Call) RunAndReturn(run func(context.Context, common.Hash) (uint64, error)) *L2BridgeSyncer_GetBlockByLER_Call { + _c.Call.Return(run) + return _c +} + +// GetBridgesPublished provides a mock function with given fields: ctx, fromBlock, toBlock +func (_m *L2BridgeSyncer) GetBridgesPublished(ctx context.Context, fromBlock uint64, toBlock uint64) ([]bridgesync.Bridge, error) { + ret := _m.Called(ctx, fromBlock, toBlock) + + if len(ret) == 0 { + panic("no return value specified for GetBridgesPublished") + } + + var r0 []bridgesync.Bridge + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64) ([]bridgesync.Bridge, error)); ok { + return rf(ctx, fromBlock, toBlock) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64) []bridgesync.Bridge); ok { + r0 = rf(ctx, fromBlock, toBlock) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]bridgesync.Bridge) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, uint64) error); ok { + r1 = rf(ctx, fromBlock, toBlock) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L2BridgeSyncer_GetBridgesPublished_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBridgesPublished' +type L2BridgeSyncer_GetBridgesPublished_Call struct { + *mock.Call +} + +// GetBridgesPublished is a helper method to define mock.On call +// - ctx context.Context +// - fromBlock uint64 +// - toBlock uint64 +func (_e *L2BridgeSyncer_Expecter) GetBridgesPublished(ctx interface{}, fromBlock interface{}, toBlock interface{}) *L2BridgeSyncer_GetBridgesPublished_Call { + return &L2BridgeSyncer_GetBridgesPublished_Call{Call: _e.mock.On("GetBridgesPublished", ctx, fromBlock, toBlock)} +} + +func (_c *L2BridgeSyncer_GetBridgesPublished_Call) Run(run func(ctx context.Context, fromBlock uint64, toBlock uint64)) *L2BridgeSyncer_GetBridgesPublished_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64), args[2].(uint64)) + }) + return _c +} + +func (_c *L2BridgeSyncer_GetBridgesPublished_Call) Return(_a0 []bridgesync.Bridge, _a1 error) *L2BridgeSyncer_GetBridgesPublished_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L2BridgeSyncer_GetBridgesPublished_Call) RunAndReturn(run func(context.Context, uint64, uint64) ([]bridgesync.Bridge, error)) *L2BridgeSyncer_GetBridgesPublished_Call { + _c.Call.Return(run) + return _c +} + +// GetClaims provides a mock function with given fields: ctx, fromBlock, toBlock +func (_m *L2BridgeSyncer) GetClaims(ctx context.Context, fromBlock uint64, toBlock uint64) ([]bridgesync.Claim, error) { + ret := _m.Called(ctx, fromBlock, toBlock) + + if len(ret) == 0 { + panic("no return value specified for GetClaims") + } + + var r0 []bridgesync.Claim + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64) ([]bridgesync.Claim, error)); ok { + return rf(ctx, fromBlock, toBlock) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64) []bridgesync.Claim); ok { + r0 = rf(ctx, fromBlock, toBlock) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]bridgesync.Claim) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, uint64) error); ok { + r1 = rf(ctx, fromBlock, toBlock) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L2BridgeSyncer_GetClaims_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetClaims' +type L2BridgeSyncer_GetClaims_Call struct { + *mock.Call +} + +// GetClaims is a helper method to define mock.On call +// - ctx context.Context +// - fromBlock uint64 +// - toBlock uint64 +func (_e *L2BridgeSyncer_Expecter) GetClaims(ctx interface{}, fromBlock interface{}, toBlock interface{}) *L2BridgeSyncer_GetClaims_Call { + return &L2BridgeSyncer_GetClaims_Call{Call: _e.mock.On("GetClaims", ctx, fromBlock, toBlock)} +} + +func (_c *L2BridgeSyncer_GetClaims_Call) Run(run func(ctx context.Context, fromBlock uint64, toBlock uint64)) *L2BridgeSyncer_GetClaims_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64), args[2].(uint64)) + }) + return _c +} + +func (_c *L2BridgeSyncer_GetClaims_Call) Return(_a0 []bridgesync.Claim, _a1 error) *L2BridgeSyncer_GetClaims_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L2BridgeSyncer_GetClaims_Call) RunAndReturn(run func(context.Context, uint64, uint64) ([]bridgesync.Claim, error)) *L2BridgeSyncer_GetClaims_Call { + _c.Call.Return(run) + return _c +} + +// GetExitRootByIndex provides a mock function with given fields: ctx, index +func (_m *L2BridgeSyncer) GetExitRootByIndex(ctx context.Context, index uint32) (treetypes.Root, error) { + ret := _m.Called(ctx, index) + + if len(ret) == 0 { + panic("no return value specified for GetExitRootByIndex") + } + + var r0 treetypes.Root + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint32) (treetypes.Root, error)); ok { + return rf(ctx, index) + } + if rf, ok := ret.Get(0).(func(context.Context, uint32) treetypes.Root); ok { + r0 = rf(ctx, index) + } else { + r0 = ret.Get(0).(treetypes.Root) + } + + if rf, ok := ret.Get(1).(func(context.Context, uint32) error); ok { + r1 = rf(ctx, index) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L2BridgeSyncer_GetExitRootByIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetExitRootByIndex' +type L2BridgeSyncer_GetExitRootByIndex_Call struct { + *mock.Call +} + +// GetExitRootByIndex is a helper method to define mock.On call +// - ctx context.Context +// - index uint32 +func (_e *L2BridgeSyncer_Expecter) GetExitRootByIndex(ctx interface{}, index interface{}) *L2BridgeSyncer_GetExitRootByIndex_Call { + return &L2BridgeSyncer_GetExitRootByIndex_Call{Call: _e.mock.On("GetExitRootByIndex", ctx, index)} +} + +func (_c *L2BridgeSyncer_GetExitRootByIndex_Call) Run(run func(ctx context.Context, index uint32)) *L2BridgeSyncer_GetExitRootByIndex_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint32)) + }) + return _c +} + +func (_c *L2BridgeSyncer_GetExitRootByIndex_Call) Return(_a0 treetypes.Root, _a1 error) *L2BridgeSyncer_GetExitRootByIndex_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L2BridgeSyncer_GetExitRootByIndex_Call) RunAndReturn(run func(context.Context, uint32) (treetypes.Root, error)) *L2BridgeSyncer_GetExitRootByIndex_Call { + _c.Call.Return(run) + return _c +} + +// GetLastProcessedBlock provides a mock function with given fields: ctx +func (_m *L2BridgeSyncer) GetLastProcessedBlock(ctx context.Context) (uint64, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetLastProcessedBlock") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (uint64, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) uint64); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L2BridgeSyncer_GetLastProcessedBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLastProcessedBlock' +type L2BridgeSyncer_GetLastProcessedBlock_Call struct { + *mock.Call +} + +// GetLastProcessedBlock is a helper method to define mock.On call +// - ctx context.Context +func (_e *L2BridgeSyncer_Expecter) GetLastProcessedBlock(ctx interface{}) *L2BridgeSyncer_GetLastProcessedBlock_Call { + return &L2BridgeSyncer_GetLastProcessedBlock_Call{Call: _e.mock.On("GetLastProcessedBlock", ctx)} +} + +func (_c *L2BridgeSyncer_GetLastProcessedBlock_Call) Run(run func(ctx context.Context)) *L2BridgeSyncer_GetLastProcessedBlock_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *L2BridgeSyncer_GetLastProcessedBlock_Call) Return(_a0 uint64, _a1 error) *L2BridgeSyncer_GetLastProcessedBlock_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L2BridgeSyncer_GetLastProcessedBlock_Call) RunAndReturn(run func(context.Context) (uint64, error)) *L2BridgeSyncer_GetLastProcessedBlock_Call { + _c.Call.Return(run) + return _c +} + +// OriginNetwork provides a mock function with given fields: +func (_m *L2BridgeSyncer) OriginNetwork() uint32 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for OriginNetwork") + } + + var r0 uint32 + if rf, ok := ret.Get(0).(func() uint32); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint32) + } + + return r0 +} + +// L2BridgeSyncer_OriginNetwork_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'OriginNetwork' +type L2BridgeSyncer_OriginNetwork_Call struct { + *mock.Call +} + +// OriginNetwork is a helper method to define mock.On call +func (_e *L2BridgeSyncer_Expecter) OriginNetwork() *L2BridgeSyncer_OriginNetwork_Call { + return &L2BridgeSyncer_OriginNetwork_Call{Call: _e.mock.On("OriginNetwork")} +} + +func (_c *L2BridgeSyncer_OriginNetwork_Call) Run(run func()) *L2BridgeSyncer_OriginNetwork_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *L2BridgeSyncer_OriginNetwork_Call) Return(_a0 uint32) *L2BridgeSyncer_OriginNetwork_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *L2BridgeSyncer_OriginNetwork_Call) RunAndReturn(run func() uint32) *L2BridgeSyncer_OriginNetwork_Call { + _c.Call.Return(run) + return _c +} + +// NewL2BridgeSyncer creates a new instance of L2BridgeSyncer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewL2BridgeSyncer(t interface { + mock.TestingT + Cleanup(func()) +}) *L2BridgeSyncer { + mock := &L2BridgeSyncer{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggsender/mocks/logger.go b/aggsender/mocks/logger.go new file mode 100644 index 00000000..bb26739e --- /dev/null +++ b/aggsender/mocks/logger.go @@ -0,0 +1,376 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import mock "github.com/stretchr/testify/mock" + +// Logger is an autogenerated mock type for the Logger type +type Logger struct { + mock.Mock +} + +type Logger_Expecter struct { + mock *mock.Mock +} + +func (_m *Logger) EXPECT() *Logger_Expecter { + return &Logger_Expecter{mock: &_m.Mock} +} + +// Debug provides a mock function with given fields: args +func (_m *Logger) Debug(args ...interface{}) { + var _ca []interface{} + _ca = append(_ca, args...) + _m.Called(_ca...) +} + +// Logger_Debug_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Debug' +type Logger_Debug_Call struct { + *mock.Call +} + +// Debug is a helper method to define mock.On call +// - args ...interface{} +func (_e *Logger_Expecter) Debug(args ...interface{}) *Logger_Debug_Call { + return &Logger_Debug_Call{Call: _e.mock.On("Debug", + append([]interface{}{}, args...)...)} +} + +func (_c *Logger_Debug_Call) Run(run func(args ...interface{})) *Logger_Debug_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]interface{}, len(args)-0) + for i, a := range args[0:] { + if a != nil { + variadicArgs[i] = a.(interface{}) + } + } + run(variadicArgs...) + }) + return _c +} + +func (_c *Logger_Debug_Call) Return() *Logger_Debug_Call { + _c.Call.Return() + return _c +} + +func (_c *Logger_Debug_Call) RunAndReturn(run func(...interface{})) *Logger_Debug_Call { + _c.Call.Return(run) + return _c +} + +// Debugf provides a mock function with given fields: format, args +func (_m *Logger) Debugf(format string, args ...interface{}) { + var _ca []interface{} + _ca = append(_ca, format) + _ca = append(_ca, args...) + _m.Called(_ca...) +} + +// Logger_Debugf_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Debugf' +type Logger_Debugf_Call struct { + *mock.Call +} + +// Debugf is a helper method to define mock.On call +// - format string +// - args ...interface{} +func (_e *Logger_Expecter) Debugf(format interface{}, args ...interface{}) *Logger_Debugf_Call { + return &Logger_Debugf_Call{Call: _e.mock.On("Debugf", + append([]interface{}{format}, args...)...)} +} + +func (_c *Logger_Debugf_Call) Run(run func(format string, args ...interface{})) *Logger_Debugf_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]interface{}, len(args)-1) + for i, a := range args[1:] { + if a != nil { + variadicArgs[i] = a.(interface{}) + } + } + run(args[0].(string), variadicArgs...) + }) + return _c +} + +func (_c *Logger_Debugf_Call) Return() *Logger_Debugf_Call { + _c.Call.Return() + return _c +} + +func (_c *Logger_Debugf_Call) RunAndReturn(run func(string, ...interface{})) *Logger_Debugf_Call { + _c.Call.Return(run) + return _c +} + +// Error provides a mock function with given fields: args +func (_m *Logger) Error(args ...interface{}) { + var _ca []interface{} + _ca = append(_ca, args...) + _m.Called(_ca...) +} + +// Logger_Error_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Error' +type Logger_Error_Call struct { + *mock.Call +} + +// Error is a helper method to define mock.On call +// - args ...interface{} +func (_e *Logger_Expecter) Error(args ...interface{}) *Logger_Error_Call { + return &Logger_Error_Call{Call: _e.mock.On("Error", + append([]interface{}{}, args...)...)} +} + +func (_c *Logger_Error_Call) Run(run func(args ...interface{})) *Logger_Error_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]interface{}, len(args)-0) + for i, a := range args[0:] { + if a != nil { + variadicArgs[i] = a.(interface{}) + } + } + run(variadicArgs...) + }) + return _c +} + +func (_c *Logger_Error_Call) Return() *Logger_Error_Call { + _c.Call.Return() + return _c +} + +func (_c *Logger_Error_Call) RunAndReturn(run func(...interface{})) *Logger_Error_Call { + _c.Call.Return(run) + return _c +} + +// Errorf provides a mock function with given fields: format, args +func (_m *Logger) Errorf(format string, args ...interface{}) { + var _ca []interface{} + _ca = append(_ca, format) + _ca = append(_ca, args...) + _m.Called(_ca...) +} + +// Logger_Errorf_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Errorf' +type Logger_Errorf_Call struct { + *mock.Call +} + +// Errorf is a helper method to define mock.On call +// - format string +// - args ...interface{} +func (_e *Logger_Expecter) Errorf(format interface{}, args ...interface{}) *Logger_Errorf_Call { + return &Logger_Errorf_Call{Call: _e.mock.On("Errorf", + append([]interface{}{format}, args...)...)} +} + +func (_c *Logger_Errorf_Call) Run(run func(format string, args ...interface{})) *Logger_Errorf_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]interface{}, len(args)-1) + for i, a := range args[1:] { + if a != nil { + variadicArgs[i] = a.(interface{}) + } + } + run(args[0].(string), variadicArgs...) + }) + return _c +} + +func (_c *Logger_Errorf_Call) Return() *Logger_Errorf_Call { + _c.Call.Return() + return _c +} + +func (_c *Logger_Errorf_Call) RunAndReturn(run func(string, ...interface{})) *Logger_Errorf_Call { + _c.Call.Return(run) + return _c +} + +// Info provides a mock function with given fields: args +func (_m *Logger) Info(args ...interface{}) { + var _ca []interface{} + _ca = append(_ca, args...) + _m.Called(_ca...) +} + +// Logger_Info_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Info' +type Logger_Info_Call struct { + *mock.Call +} + +// Info is a helper method to define mock.On call +// - args ...interface{} +func (_e *Logger_Expecter) Info(args ...interface{}) *Logger_Info_Call { + return &Logger_Info_Call{Call: _e.mock.On("Info", + append([]interface{}{}, args...)...)} +} + +func (_c *Logger_Info_Call) Run(run func(args ...interface{})) *Logger_Info_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]interface{}, len(args)-0) + for i, a := range args[0:] { + if a != nil { + variadicArgs[i] = a.(interface{}) + } + } + run(variadicArgs...) + }) + return _c +} + +func (_c *Logger_Info_Call) Return() *Logger_Info_Call { + _c.Call.Return() + return _c +} + +func (_c *Logger_Info_Call) RunAndReturn(run func(...interface{})) *Logger_Info_Call { + _c.Call.Return(run) + return _c +} + +// Infof provides a mock function with given fields: format, args +func (_m *Logger) Infof(format string, args ...interface{}) { + var _ca []interface{} + _ca = append(_ca, format) + _ca = append(_ca, args...) + _m.Called(_ca...) +} + +// Logger_Infof_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Infof' +type Logger_Infof_Call struct { + *mock.Call +} + +// Infof is a helper method to define mock.On call +// - format string +// - args ...interface{} +func (_e *Logger_Expecter) Infof(format interface{}, args ...interface{}) *Logger_Infof_Call { + return &Logger_Infof_Call{Call: _e.mock.On("Infof", + append([]interface{}{format}, args...)...)} +} + +func (_c *Logger_Infof_Call) Run(run func(format string, args ...interface{})) *Logger_Infof_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]interface{}, len(args)-1) + for i, a := range args[1:] { + if a != nil { + variadicArgs[i] = a.(interface{}) + } + } + run(args[0].(string), variadicArgs...) + }) + return _c +} + +func (_c *Logger_Infof_Call) Return() *Logger_Infof_Call { + _c.Call.Return() + return _c +} + +func (_c *Logger_Infof_Call) RunAndReturn(run func(string, ...interface{})) *Logger_Infof_Call { + _c.Call.Return(run) + return _c +} + +// Warn provides a mock function with given fields: args +func (_m *Logger) Warn(args ...interface{}) { + var _ca []interface{} + _ca = append(_ca, args...) + _m.Called(_ca...) +} + +// Logger_Warn_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Warn' +type Logger_Warn_Call struct { + *mock.Call +} + +// Warn is a helper method to define mock.On call +// - args ...interface{} +func (_e *Logger_Expecter) Warn(args ...interface{}) *Logger_Warn_Call { + return &Logger_Warn_Call{Call: _e.mock.On("Warn", + append([]interface{}{}, args...)...)} +} + +func (_c *Logger_Warn_Call) Run(run func(args ...interface{})) *Logger_Warn_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]interface{}, len(args)-0) + for i, a := range args[0:] { + if a != nil { + variadicArgs[i] = a.(interface{}) + } + } + run(variadicArgs...) + }) + return _c +} + +func (_c *Logger_Warn_Call) Return() *Logger_Warn_Call { + _c.Call.Return() + return _c +} + +func (_c *Logger_Warn_Call) RunAndReturn(run func(...interface{})) *Logger_Warn_Call { + _c.Call.Return(run) + return _c +} + +// Warnf provides a mock function with given fields: format, args +func (_m *Logger) Warnf(format string, args ...interface{}) { + var _ca []interface{} + _ca = append(_ca, format) + _ca = append(_ca, args...) + _m.Called(_ca...) +} + +// Logger_Warnf_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Warnf' +type Logger_Warnf_Call struct { + *mock.Call +} + +// Warnf is a helper method to define mock.On call +// - format string +// - args ...interface{} +func (_e *Logger_Expecter) Warnf(format interface{}, args ...interface{}) *Logger_Warnf_Call { + return &Logger_Warnf_Call{Call: _e.mock.On("Warnf", + append([]interface{}{format}, args...)...)} +} + +func (_c *Logger_Warnf_Call) Run(run func(format string, args ...interface{})) *Logger_Warnf_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]interface{}, len(args)-1) + for i, a := range args[1:] { + if a != nil { + variadicArgs[i] = a.(interface{}) + } + } + run(args[0].(string), variadicArgs...) + }) + return _c +} + +func (_c *Logger_Warnf_Call) Return() *Logger_Warnf_Call { + _c.Call.Return() + return _c +} + +func (_c *Logger_Warnf_Call) RunAndReturn(run func(string, ...interface{})) *Logger_Warnf_Call { + _c.Call.Return(run) + return _c +} + +// NewLogger creates a new instance of Logger. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewLogger(t interface { + mock.TestingT + Cleanup(func()) +}) *Logger { + mock := &Logger{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggsender/mocks/mock_aggsender_storage.go b/aggsender/mocks/mock_aggsender_storage.go deleted file mode 100644 index 17f8d227..00000000 --- a/aggsender/mocks/mock_aggsender_storage.go +++ /dev/null @@ -1,351 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package mocks - -import ( - agglayer "github.com/0xPolygon/cdk/agglayer" - common "github.com/ethereum/go-ethereum/common" - - context "context" - - mock "github.com/stretchr/testify/mock" - - types "github.com/0xPolygon/cdk/aggsender/types" -) - -// AggSenderStorageMock is an autogenerated mock type for the AggSenderStorage type -type AggSenderStorageMock struct { - mock.Mock -} - -type AggSenderStorageMock_Expecter struct { - mock *mock.Mock -} - -func (_m *AggSenderStorageMock) EXPECT() *AggSenderStorageMock_Expecter { - return &AggSenderStorageMock_Expecter{mock: &_m.Mock} -} - -// DeleteCertificate provides a mock function with given fields: ctx, certificateID -func (_m *AggSenderStorageMock) DeleteCertificate(ctx context.Context, certificateID common.Hash) error { - ret := _m.Called(ctx, certificateID) - - if len(ret) == 0 { - panic("no return value specified for DeleteCertificate") - } - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) error); ok { - r0 = rf(ctx, certificateID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// AggSenderStorageMock_DeleteCertificate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteCertificate' -type AggSenderStorageMock_DeleteCertificate_Call struct { - *mock.Call -} - -// DeleteCertificate is a helper method to define mock.On call -// - ctx context.Context -// - certificateID common.Hash -func (_e *AggSenderStorageMock_Expecter) DeleteCertificate(ctx interface{}, certificateID interface{}) *AggSenderStorageMock_DeleteCertificate_Call { - return &AggSenderStorageMock_DeleteCertificate_Call{Call: _e.mock.On("DeleteCertificate", ctx, certificateID)} -} - -func (_c *AggSenderStorageMock_DeleteCertificate_Call) Run(run func(ctx context.Context, certificateID common.Hash)) *AggSenderStorageMock_DeleteCertificate_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(common.Hash)) - }) - return _c -} - -func (_c *AggSenderStorageMock_DeleteCertificate_Call) Return(_a0 error) *AggSenderStorageMock_DeleteCertificate_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *AggSenderStorageMock_DeleteCertificate_Call) RunAndReturn(run func(context.Context, common.Hash) error) *AggSenderStorageMock_DeleteCertificate_Call { - _c.Call.Return(run) - return _c -} - -// GetCertificateByHeight provides a mock function with given fields: height -func (_m *AggSenderStorageMock) GetCertificateByHeight(height uint64) (types.CertificateInfo, error) { - ret := _m.Called(height) - - if len(ret) == 0 { - panic("no return value specified for GetCertificateByHeight") - } - - var r0 types.CertificateInfo - var r1 error - if rf, ok := ret.Get(0).(func(uint64) (types.CertificateInfo, error)); ok { - return rf(height) - } - if rf, ok := ret.Get(0).(func(uint64) types.CertificateInfo); ok { - r0 = rf(height) - } else { - r0 = ret.Get(0).(types.CertificateInfo) - } - - if rf, ok := ret.Get(1).(func(uint64) error); ok { - r1 = rf(height) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// AggSenderStorageMock_GetCertificateByHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCertificateByHeight' -type AggSenderStorageMock_GetCertificateByHeight_Call struct { - *mock.Call -} - -// GetCertificateByHeight is a helper method to define mock.On call -// - height uint64 -func (_e *AggSenderStorageMock_Expecter) GetCertificateByHeight(height interface{}) *AggSenderStorageMock_GetCertificateByHeight_Call { - return &AggSenderStorageMock_GetCertificateByHeight_Call{Call: _e.mock.On("GetCertificateByHeight", height)} -} - -func (_c *AggSenderStorageMock_GetCertificateByHeight_Call) Run(run func(height uint64)) *AggSenderStorageMock_GetCertificateByHeight_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(uint64)) - }) - return _c -} - -func (_c *AggSenderStorageMock_GetCertificateByHeight_Call) Return(_a0 types.CertificateInfo, _a1 error) *AggSenderStorageMock_GetCertificateByHeight_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *AggSenderStorageMock_GetCertificateByHeight_Call) RunAndReturn(run func(uint64) (types.CertificateInfo, error)) *AggSenderStorageMock_GetCertificateByHeight_Call { - _c.Call.Return(run) - return _c -} - -// GetCertificatesByStatus provides a mock function with given fields: status -func (_m *AggSenderStorageMock) GetCertificatesByStatus(status []agglayer.CertificateStatus) ([]*types.CertificateInfo, error) { - ret := _m.Called(status) - - if len(ret) == 0 { - panic("no return value specified for GetCertificatesByStatus") - } - - var r0 []*types.CertificateInfo - var r1 error - if rf, ok := ret.Get(0).(func([]agglayer.CertificateStatus) ([]*types.CertificateInfo, error)); ok { - return rf(status) - } - if rf, ok := ret.Get(0).(func([]agglayer.CertificateStatus) []*types.CertificateInfo); ok { - r0 = rf(status) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*types.CertificateInfo) - } - } - - if rf, ok := ret.Get(1).(func([]agglayer.CertificateStatus) error); ok { - r1 = rf(status) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// AggSenderStorageMock_GetCertificatesByStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCertificatesByStatus' -type AggSenderStorageMock_GetCertificatesByStatus_Call struct { - *mock.Call -} - -// GetCertificatesByStatus is a helper method to define mock.On call -// - status []agglayer.CertificateStatus -func (_e *AggSenderStorageMock_Expecter) GetCertificatesByStatus(status interface{}) *AggSenderStorageMock_GetCertificatesByStatus_Call { - return &AggSenderStorageMock_GetCertificatesByStatus_Call{Call: _e.mock.On("GetCertificatesByStatus", status)} -} - -func (_c *AggSenderStorageMock_GetCertificatesByStatus_Call) Run(run func(status []agglayer.CertificateStatus)) *AggSenderStorageMock_GetCertificatesByStatus_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].([]agglayer.CertificateStatus)) - }) - return _c -} - -func (_c *AggSenderStorageMock_GetCertificatesByStatus_Call) Return(_a0 []*types.CertificateInfo, _a1 error) *AggSenderStorageMock_GetCertificatesByStatus_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *AggSenderStorageMock_GetCertificatesByStatus_Call) RunAndReturn(run func([]agglayer.CertificateStatus) ([]*types.CertificateInfo, error)) *AggSenderStorageMock_GetCertificatesByStatus_Call { - _c.Call.Return(run) - return _c -} - -// GetLastSentCertificate provides a mock function with given fields: -func (_m *AggSenderStorageMock) GetLastSentCertificate() (types.CertificateInfo, error) { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for GetLastSentCertificate") - } - - var r0 types.CertificateInfo - var r1 error - if rf, ok := ret.Get(0).(func() (types.CertificateInfo, error)); ok { - return rf() - } - if rf, ok := ret.Get(0).(func() types.CertificateInfo); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(types.CertificateInfo) - } - - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// AggSenderStorageMock_GetLastSentCertificate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLastSentCertificate' -type AggSenderStorageMock_GetLastSentCertificate_Call struct { - *mock.Call -} - -// GetLastSentCertificate is a helper method to define mock.On call -func (_e *AggSenderStorageMock_Expecter) GetLastSentCertificate() *AggSenderStorageMock_GetLastSentCertificate_Call { - return &AggSenderStorageMock_GetLastSentCertificate_Call{Call: _e.mock.On("GetLastSentCertificate")} -} - -func (_c *AggSenderStorageMock_GetLastSentCertificate_Call) Run(run func()) *AggSenderStorageMock_GetLastSentCertificate_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *AggSenderStorageMock_GetLastSentCertificate_Call) Return(_a0 types.CertificateInfo, _a1 error) *AggSenderStorageMock_GetLastSentCertificate_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *AggSenderStorageMock_GetLastSentCertificate_Call) RunAndReturn(run func() (types.CertificateInfo, error)) *AggSenderStorageMock_GetLastSentCertificate_Call { - _c.Call.Return(run) - return _c -} - -// SaveLastSentCertificate provides a mock function with given fields: ctx, certificate -func (_m *AggSenderStorageMock) SaveLastSentCertificate(ctx context.Context, certificate types.CertificateInfo) error { - ret := _m.Called(ctx, certificate) - - if len(ret) == 0 { - panic("no return value specified for SaveLastSentCertificate") - } - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, types.CertificateInfo) error); ok { - r0 = rf(ctx, certificate) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// AggSenderStorageMock_SaveLastSentCertificate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SaveLastSentCertificate' -type AggSenderStorageMock_SaveLastSentCertificate_Call struct { - *mock.Call -} - -// SaveLastSentCertificate is a helper method to define mock.On call -// - ctx context.Context -// - certificate types.CertificateInfo -func (_e *AggSenderStorageMock_Expecter) SaveLastSentCertificate(ctx interface{}, certificate interface{}) *AggSenderStorageMock_SaveLastSentCertificate_Call { - return &AggSenderStorageMock_SaveLastSentCertificate_Call{Call: _e.mock.On("SaveLastSentCertificate", ctx, certificate)} -} - -func (_c *AggSenderStorageMock_SaveLastSentCertificate_Call) Run(run func(ctx context.Context, certificate types.CertificateInfo)) *AggSenderStorageMock_SaveLastSentCertificate_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(types.CertificateInfo)) - }) - return _c -} - -func (_c *AggSenderStorageMock_SaveLastSentCertificate_Call) Return(_a0 error) *AggSenderStorageMock_SaveLastSentCertificate_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *AggSenderStorageMock_SaveLastSentCertificate_Call) RunAndReturn(run func(context.Context, types.CertificateInfo) error) *AggSenderStorageMock_SaveLastSentCertificate_Call { - _c.Call.Return(run) - return _c -} - -// UpdateCertificateStatus provides a mock function with given fields: ctx, certificate -func (_m *AggSenderStorageMock) UpdateCertificateStatus(ctx context.Context, certificate types.CertificateInfo) error { - ret := _m.Called(ctx, certificate) - - if len(ret) == 0 { - panic("no return value specified for UpdateCertificateStatus") - } - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, types.CertificateInfo) error); ok { - r0 = rf(ctx, certificate) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// AggSenderStorageMock_UpdateCertificateStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateCertificateStatus' -type AggSenderStorageMock_UpdateCertificateStatus_Call struct { - *mock.Call -} - -// UpdateCertificateStatus is a helper method to define mock.On call -// - ctx context.Context -// - certificate types.CertificateInfo -func (_e *AggSenderStorageMock_Expecter) UpdateCertificateStatus(ctx interface{}, certificate interface{}) *AggSenderStorageMock_UpdateCertificateStatus_Call { - return &AggSenderStorageMock_UpdateCertificateStatus_Call{Call: _e.mock.On("UpdateCertificateStatus", ctx, certificate)} -} - -func (_c *AggSenderStorageMock_UpdateCertificateStatus_Call) Run(run func(ctx context.Context, certificate types.CertificateInfo)) *AggSenderStorageMock_UpdateCertificateStatus_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(types.CertificateInfo)) - }) - return _c -} - -func (_c *AggSenderStorageMock_UpdateCertificateStatus_Call) Return(_a0 error) *AggSenderStorageMock_UpdateCertificateStatus_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *AggSenderStorageMock_UpdateCertificateStatus_Call) RunAndReturn(run func(context.Context, types.CertificateInfo) error) *AggSenderStorageMock_UpdateCertificateStatus_Call { - _c.Call.Return(run) - return _c -} - -// NewAggSenderStorageMock creates a new instance of AggSenderStorageMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewAggSenderStorageMock(t interface { - mock.TestingT - Cleanup(func()) -}) *AggSenderStorageMock { - mock := &AggSenderStorageMock{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/aggsender/mocks/mock_l1infotree_syncer.go b/aggsender/mocks/mock_l1infotree_syncer.go deleted file mode 100644 index e113d4ed..00000000 --- a/aggsender/mocks/mock_l1infotree_syncer.go +++ /dev/null @@ -1,217 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package mocks - -import ( - context "context" - - common "github.com/ethereum/go-ethereum/common" - - l1infotreesync "github.com/0xPolygon/cdk/l1infotreesync" - - mock "github.com/stretchr/testify/mock" - - treetypes "github.com/0xPolygon/cdk/tree/types" -) - -// L1InfoTreeSyncerMock is an autogenerated mock type for the L1InfoTreeSyncer type -type L1InfoTreeSyncerMock struct { - mock.Mock -} - -type L1InfoTreeSyncerMock_Expecter struct { - mock *mock.Mock -} - -func (_m *L1InfoTreeSyncerMock) EXPECT() *L1InfoTreeSyncerMock_Expecter { - return &L1InfoTreeSyncerMock_Expecter{mock: &_m.Mock} -} - -// GetInfoByGlobalExitRoot provides a mock function with given fields: globalExitRoot -func (_m *L1InfoTreeSyncerMock) GetInfoByGlobalExitRoot(globalExitRoot common.Hash) (*l1infotreesync.L1InfoTreeLeaf, error) { - ret := _m.Called(globalExitRoot) - - if len(ret) == 0 { - panic("no return value specified for GetInfoByGlobalExitRoot") - } - - var r0 *l1infotreesync.L1InfoTreeLeaf - var r1 error - if rf, ok := ret.Get(0).(func(common.Hash) (*l1infotreesync.L1InfoTreeLeaf, error)); ok { - return rf(globalExitRoot) - } - if rf, ok := ret.Get(0).(func(common.Hash) *l1infotreesync.L1InfoTreeLeaf); ok { - r0 = rf(globalExitRoot) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*l1infotreesync.L1InfoTreeLeaf) - } - } - - if rf, ok := ret.Get(1).(func(common.Hash) error); ok { - r1 = rf(globalExitRoot) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L1InfoTreeSyncerMock_GetInfoByGlobalExitRoot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetInfoByGlobalExitRoot' -type L1InfoTreeSyncerMock_GetInfoByGlobalExitRoot_Call struct { - *mock.Call -} - -// GetInfoByGlobalExitRoot is a helper method to define mock.On call -// - globalExitRoot common.Hash -func (_e *L1InfoTreeSyncerMock_Expecter) GetInfoByGlobalExitRoot(globalExitRoot interface{}) *L1InfoTreeSyncerMock_GetInfoByGlobalExitRoot_Call { - return &L1InfoTreeSyncerMock_GetInfoByGlobalExitRoot_Call{Call: _e.mock.On("GetInfoByGlobalExitRoot", globalExitRoot)} -} - -func (_c *L1InfoTreeSyncerMock_GetInfoByGlobalExitRoot_Call) Run(run func(globalExitRoot common.Hash)) *L1InfoTreeSyncerMock_GetInfoByGlobalExitRoot_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(common.Hash)) - }) - return _c -} - -func (_c *L1InfoTreeSyncerMock_GetInfoByGlobalExitRoot_Call) Return(_a0 *l1infotreesync.L1InfoTreeLeaf, _a1 error) *L1InfoTreeSyncerMock_GetInfoByGlobalExitRoot_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L1InfoTreeSyncerMock_GetInfoByGlobalExitRoot_Call) RunAndReturn(run func(common.Hash) (*l1infotreesync.L1InfoTreeLeaf, error)) *L1InfoTreeSyncerMock_GetInfoByGlobalExitRoot_Call { - _c.Call.Return(run) - return _c -} - -// GetL1InfoTreeMerkleProofFromIndexToRoot provides a mock function with given fields: ctx, index, root -func (_m *L1InfoTreeSyncerMock) GetL1InfoTreeMerkleProofFromIndexToRoot(ctx context.Context, index uint32, root common.Hash) (treetypes.Proof, error) { - ret := _m.Called(ctx, index, root) - - if len(ret) == 0 { - panic("no return value specified for GetL1InfoTreeMerkleProofFromIndexToRoot") - } - - var r0 treetypes.Proof - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, uint32, common.Hash) (treetypes.Proof, error)); ok { - return rf(ctx, index, root) - } - if rf, ok := ret.Get(0).(func(context.Context, uint32, common.Hash) treetypes.Proof); ok { - r0 = rf(ctx, index, root) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(treetypes.Proof) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, uint32, common.Hash) error); ok { - r1 = rf(ctx, index, root) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L1InfoTreeSyncerMock_GetL1InfoTreeMerkleProofFromIndexToRoot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetL1InfoTreeMerkleProofFromIndexToRoot' -type L1InfoTreeSyncerMock_GetL1InfoTreeMerkleProofFromIndexToRoot_Call struct { - *mock.Call -} - -// GetL1InfoTreeMerkleProofFromIndexToRoot is a helper method to define mock.On call -// - ctx context.Context -// - index uint32 -// - root common.Hash -func (_e *L1InfoTreeSyncerMock_Expecter) GetL1InfoTreeMerkleProofFromIndexToRoot(ctx interface{}, index interface{}, root interface{}) *L1InfoTreeSyncerMock_GetL1InfoTreeMerkleProofFromIndexToRoot_Call { - return &L1InfoTreeSyncerMock_GetL1InfoTreeMerkleProofFromIndexToRoot_Call{Call: _e.mock.On("GetL1InfoTreeMerkleProofFromIndexToRoot", ctx, index, root)} -} - -func (_c *L1InfoTreeSyncerMock_GetL1InfoTreeMerkleProofFromIndexToRoot_Call) Run(run func(ctx context.Context, index uint32, root common.Hash)) *L1InfoTreeSyncerMock_GetL1InfoTreeMerkleProofFromIndexToRoot_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(uint32), args[2].(common.Hash)) - }) - return _c -} - -func (_c *L1InfoTreeSyncerMock_GetL1InfoTreeMerkleProofFromIndexToRoot_Call) Return(_a0 treetypes.Proof, _a1 error) *L1InfoTreeSyncerMock_GetL1InfoTreeMerkleProofFromIndexToRoot_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L1InfoTreeSyncerMock_GetL1InfoTreeMerkleProofFromIndexToRoot_Call) RunAndReturn(run func(context.Context, uint32, common.Hash) (treetypes.Proof, error)) *L1InfoTreeSyncerMock_GetL1InfoTreeMerkleProofFromIndexToRoot_Call { - _c.Call.Return(run) - return _c -} - -// GetL1InfoTreeRootByIndex provides a mock function with given fields: ctx, index -func (_m *L1InfoTreeSyncerMock) GetL1InfoTreeRootByIndex(ctx context.Context, index uint32) (treetypes.Root, error) { - ret := _m.Called(ctx, index) - - if len(ret) == 0 { - panic("no return value specified for GetL1InfoTreeRootByIndex") - } - - var r0 treetypes.Root - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, uint32) (treetypes.Root, error)); ok { - return rf(ctx, index) - } - if rf, ok := ret.Get(0).(func(context.Context, uint32) treetypes.Root); ok { - r0 = rf(ctx, index) - } else { - r0 = ret.Get(0).(treetypes.Root) - } - - if rf, ok := ret.Get(1).(func(context.Context, uint32) error); ok { - r1 = rf(ctx, index) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L1InfoTreeSyncerMock_GetL1InfoTreeRootByIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetL1InfoTreeRootByIndex' -type L1InfoTreeSyncerMock_GetL1InfoTreeRootByIndex_Call struct { - *mock.Call -} - -// GetL1InfoTreeRootByIndex is a helper method to define mock.On call -// - ctx context.Context -// - index uint32 -func (_e *L1InfoTreeSyncerMock_Expecter) GetL1InfoTreeRootByIndex(ctx interface{}, index interface{}) *L1InfoTreeSyncerMock_GetL1InfoTreeRootByIndex_Call { - return &L1InfoTreeSyncerMock_GetL1InfoTreeRootByIndex_Call{Call: _e.mock.On("GetL1InfoTreeRootByIndex", ctx, index)} -} - -func (_c *L1InfoTreeSyncerMock_GetL1InfoTreeRootByIndex_Call) Run(run func(ctx context.Context, index uint32)) *L1InfoTreeSyncerMock_GetL1InfoTreeRootByIndex_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(uint32)) - }) - return _c -} - -func (_c *L1InfoTreeSyncerMock_GetL1InfoTreeRootByIndex_Call) Return(_a0 treetypes.Root, _a1 error) *L1InfoTreeSyncerMock_GetL1InfoTreeRootByIndex_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L1InfoTreeSyncerMock_GetL1InfoTreeRootByIndex_Call) RunAndReturn(run func(context.Context, uint32) (treetypes.Root, error)) *L1InfoTreeSyncerMock_GetL1InfoTreeRootByIndex_Call { - _c.Call.Return(run) - return _c -} - -// NewL1InfoTreeSyncerMock creates a new instance of L1InfoTreeSyncerMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewL1InfoTreeSyncerMock(t interface { - mock.TestingT - Cleanup(func()) -}) *L1InfoTreeSyncerMock { - mock := &L1InfoTreeSyncerMock{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/aggsender/mocks/mock_l2bridge_syncer.go b/aggsender/mocks/mock_l2bridge_syncer.go deleted file mode 100644 index 725184c3..00000000 --- a/aggsender/mocks/mock_l2bridge_syncer.go +++ /dev/null @@ -1,423 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package mocks - -import ( - bridgesync "github.com/0xPolygon/cdk/bridgesync" - common "github.com/ethereum/go-ethereum/common" - - context "context" - - etherman "github.com/0xPolygon/cdk/etherman" - - mock "github.com/stretchr/testify/mock" - - treetypes "github.com/0xPolygon/cdk/tree/types" -) - -// L2BridgeSyncerMock is an autogenerated mock type for the L2BridgeSyncer type -type L2BridgeSyncerMock struct { - mock.Mock -} - -type L2BridgeSyncerMock_Expecter struct { - mock *mock.Mock -} - -func (_m *L2BridgeSyncerMock) EXPECT() *L2BridgeSyncerMock_Expecter { - return &L2BridgeSyncerMock_Expecter{mock: &_m.Mock} -} - -// BlockFinality provides a mock function with given fields: -func (_m *L2BridgeSyncerMock) BlockFinality() etherman.BlockNumberFinality { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for BlockFinality") - } - - var r0 etherman.BlockNumberFinality - if rf, ok := ret.Get(0).(func() etherman.BlockNumberFinality); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(etherman.BlockNumberFinality) - } - - return r0 -} - -// L2BridgeSyncerMock_BlockFinality_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockFinality' -type L2BridgeSyncerMock_BlockFinality_Call struct { - *mock.Call -} - -// BlockFinality is a helper method to define mock.On call -func (_e *L2BridgeSyncerMock_Expecter) BlockFinality() *L2BridgeSyncerMock_BlockFinality_Call { - return &L2BridgeSyncerMock_BlockFinality_Call{Call: _e.mock.On("BlockFinality")} -} - -func (_c *L2BridgeSyncerMock_BlockFinality_Call) Run(run func()) *L2BridgeSyncerMock_BlockFinality_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *L2BridgeSyncerMock_BlockFinality_Call) Return(_a0 etherman.BlockNumberFinality) *L2BridgeSyncerMock_BlockFinality_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *L2BridgeSyncerMock_BlockFinality_Call) RunAndReturn(run func() etherman.BlockNumberFinality) *L2BridgeSyncerMock_BlockFinality_Call { - _c.Call.Return(run) - return _c -} - -// GetBlockByLER provides a mock function with given fields: ctx, ler -func (_m *L2BridgeSyncerMock) GetBlockByLER(ctx context.Context, ler common.Hash) (uint64, error) { - ret := _m.Called(ctx, ler) - - if len(ret) == 0 { - panic("no return value specified for GetBlockByLER") - } - - var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (uint64, error)); ok { - return rf(ctx, ler) - } - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) uint64); ok { - r0 = rf(ctx, ler) - } else { - r0 = ret.Get(0).(uint64) - } - - if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { - r1 = rf(ctx, ler) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L2BridgeSyncerMock_GetBlockByLER_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBlockByLER' -type L2BridgeSyncerMock_GetBlockByLER_Call struct { - *mock.Call -} - -// GetBlockByLER is a helper method to define mock.On call -// - ctx context.Context -// - ler common.Hash -func (_e *L2BridgeSyncerMock_Expecter) GetBlockByLER(ctx interface{}, ler interface{}) *L2BridgeSyncerMock_GetBlockByLER_Call { - return &L2BridgeSyncerMock_GetBlockByLER_Call{Call: _e.mock.On("GetBlockByLER", ctx, ler)} -} - -func (_c *L2BridgeSyncerMock_GetBlockByLER_Call) Run(run func(ctx context.Context, ler common.Hash)) *L2BridgeSyncerMock_GetBlockByLER_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(common.Hash)) - }) - return _c -} - -func (_c *L2BridgeSyncerMock_GetBlockByLER_Call) Return(_a0 uint64, _a1 error) *L2BridgeSyncerMock_GetBlockByLER_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L2BridgeSyncerMock_GetBlockByLER_Call) RunAndReturn(run func(context.Context, common.Hash) (uint64, error)) *L2BridgeSyncerMock_GetBlockByLER_Call { - _c.Call.Return(run) - return _c -} - -// GetBridgesPublished provides a mock function with given fields: ctx, fromBlock, toBlock -func (_m *L2BridgeSyncerMock) GetBridgesPublished(ctx context.Context, fromBlock uint64, toBlock uint64) ([]bridgesync.Bridge, error) { - ret := _m.Called(ctx, fromBlock, toBlock) - - if len(ret) == 0 { - panic("no return value specified for GetBridgesPublished") - } - - var r0 []bridgesync.Bridge - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64) ([]bridgesync.Bridge, error)); ok { - return rf(ctx, fromBlock, toBlock) - } - if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64) []bridgesync.Bridge); ok { - r0 = rf(ctx, fromBlock, toBlock) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]bridgesync.Bridge) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, uint64, uint64) error); ok { - r1 = rf(ctx, fromBlock, toBlock) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L2BridgeSyncerMock_GetBridgesPublished_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBridgesPublished' -type L2BridgeSyncerMock_GetBridgesPublished_Call struct { - *mock.Call -} - -// GetBridgesPublished is a helper method to define mock.On call -// - ctx context.Context -// - fromBlock uint64 -// - toBlock uint64 -func (_e *L2BridgeSyncerMock_Expecter) GetBridgesPublished(ctx interface{}, fromBlock interface{}, toBlock interface{}) *L2BridgeSyncerMock_GetBridgesPublished_Call { - return &L2BridgeSyncerMock_GetBridgesPublished_Call{Call: _e.mock.On("GetBridgesPublished", ctx, fromBlock, toBlock)} -} - -func (_c *L2BridgeSyncerMock_GetBridgesPublished_Call) Run(run func(ctx context.Context, fromBlock uint64, toBlock uint64)) *L2BridgeSyncerMock_GetBridgesPublished_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(uint64), args[2].(uint64)) - }) - return _c -} - -func (_c *L2BridgeSyncerMock_GetBridgesPublished_Call) Return(_a0 []bridgesync.Bridge, _a1 error) *L2BridgeSyncerMock_GetBridgesPublished_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L2BridgeSyncerMock_GetBridgesPublished_Call) RunAndReturn(run func(context.Context, uint64, uint64) ([]bridgesync.Bridge, error)) *L2BridgeSyncerMock_GetBridgesPublished_Call { - _c.Call.Return(run) - return _c -} - -// GetClaims provides a mock function with given fields: ctx, fromBlock, toBlock -func (_m *L2BridgeSyncerMock) GetClaims(ctx context.Context, fromBlock uint64, toBlock uint64) ([]bridgesync.Claim, error) { - ret := _m.Called(ctx, fromBlock, toBlock) - - if len(ret) == 0 { - panic("no return value specified for GetClaims") - } - - var r0 []bridgesync.Claim - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64) ([]bridgesync.Claim, error)); ok { - return rf(ctx, fromBlock, toBlock) - } - if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64) []bridgesync.Claim); ok { - r0 = rf(ctx, fromBlock, toBlock) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]bridgesync.Claim) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, uint64, uint64) error); ok { - r1 = rf(ctx, fromBlock, toBlock) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L2BridgeSyncerMock_GetClaims_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetClaims' -type L2BridgeSyncerMock_GetClaims_Call struct { - *mock.Call -} - -// GetClaims is a helper method to define mock.On call -// - ctx context.Context -// - fromBlock uint64 -// - toBlock uint64 -func (_e *L2BridgeSyncerMock_Expecter) GetClaims(ctx interface{}, fromBlock interface{}, toBlock interface{}) *L2BridgeSyncerMock_GetClaims_Call { - return &L2BridgeSyncerMock_GetClaims_Call{Call: _e.mock.On("GetClaims", ctx, fromBlock, toBlock)} -} - -func (_c *L2BridgeSyncerMock_GetClaims_Call) Run(run func(ctx context.Context, fromBlock uint64, toBlock uint64)) *L2BridgeSyncerMock_GetClaims_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(uint64), args[2].(uint64)) - }) - return _c -} - -func (_c *L2BridgeSyncerMock_GetClaims_Call) Return(_a0 []bridgesync.Claim, _a1 error) *L2BridgeSyncerMock_GetClaims_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L2BridgeSyncerMock_GetClaims_Call) RunAndReturn(run func(context.Context, uint64, uint64) ([]bridgesync.Claim, error)) *L2BridgeSyncerMock_GetClaims_Call { - _c.Call.Return(run) - return _c -} - -// GetExitRootByIndex provides a mock function with given fields: ctx, index -func (_m *L2BridgeSyncerMock) GetExitRootByIndex(ctx context.Context, index uint32) (treetypes.Root, error) { - ret := _m.Called(ctx, index) - - if len(ret) == 0 { - panic("no return value specified for GetExitRootByIndex") - } - - var r0 treetypes.Root - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, uint32) (treetypes.Root, error)); ok { - return rf(ctx, index) - } - if rf, ok := ret.Get(0).(func(context.Context, uint32) treetypes.Root); ok { - r0 = rf(ctx, index) - } else { - r0 = ret.Get(0).(treetypes.Root) - } - - if rf, ok := ret.Get(1).(func(context.Context, uint32) error); ok { - r1 = rf(ctx, index) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L2BridgeSyncerMock_GetExitRootByIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetExitRootByIndex' -type L2BridgeSyncerMock_GetExitRootByIndex_Call struct { - *mock.Call -} - -// GetExitRootByIndex is a helper method to define mock.On call -// - ctx context.Context -// - index uint32 -func (_e *L2BridgeSyncerMock_Expecter) GetExitRootByIndex(ctx interface{}, index interface{}) *L2BridgeSyncerMock_GetExitRootByIndex_Call { - return &L2BridgeSyncerMock_GetExitRootByIndex_Call{Call: _e.mock.On("GetExitRootByIndex", ctx, index)} -} - -func (_c *L2BridgeSyncerMock_GetExitRootByIndex_Call) Run(run func(ctx context.Context, index uint32)) *L2BridgeSyncerMock_GetExitRootByIndex_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(uint32)) - }) - return _c -} - -func (_c *L2BridgeSyncerMock_GetExitRootByIndex_Call) Return(_a0 treetypes.Root, _a1 error) *L2BridgeSyncerMock_GetExitRootByIndex_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L2BridgeSyncerMock_GetExitRootByIndex_Call) RunAndReturn(run func(context.Context, uint32) (treetypes.Root, error)) *L2BridgeSyncerMock_GetExitRootByIndex_Call { - _c.Call.Return(run) - return _c -} - -// GetLastProcessedBlock provides a mock function with given fields: ctx -func (_m *L2BridgeSyncerMock) GetLastProcessedBlock(ctx context.Context) (uint64, error) { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for GetLastProcessedBlock") - } - - var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (uint64, error)); ok { - return rf(ctx) - } - if rf, ok := ret.Get(0).(func(context.Context) uint64); ok { - r0 = rf(ctx) - } else { - r0 = ret.Get(0).(uint64) - } - - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L2BridgeSyncerMock_GetLastProcessedBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLastProcessedBlock' -type L2BridgeSyncerMock_GetLastProcessedBlock_Call struct { - *mock.Call -} - -// GetLastProcessedBlock is a helper method to define mock.On call -// - ctx context.Context -func (_e *L2BridgeSyncerMock_Expecter) GetLastProcessedBlock(ctx interface{}) *L2BridgeSyncerMock_GetLastProcessedBlock_Call { - return &L2BridgeSyncerMock_GetLastProcessedBlock_Call{Call: _e.mock.On("GetLastProcessedBlock", ctx)} -} - -func (_c *L2BridgeSyncerMock_GetLastProcessedBlock_Call) Run(run func(ctx context.Context)) *L2BridgeSyncerMock_GetLastProcessedBlock_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context)) - }) - return _c -} - -func (_c *L2BridgeSyncerMock_GetLastProcessedBlock_Call) Return(_a0 uint64, _a1 error) *L2BridgeSyncerMock_GetLastProcessedBlock_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L2BridgeSyncerMock_GetLastProcessedBlock_Call) RunAndReturn(run func(context.Context) (uint64, error)) *L2BridgeSyncerMock_GetLastProcessedBlock_Call { - _c.Call.Return(run) - return _c -} - -// OriginNetwork provides a mock function with given fields: -func (_m *L2BridgeSyncerMock) OriginNetwork() uint32 { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for OriginNetwork") - } - - var r0 uint32 - if rf, ok := ret.Get(0).(func() uint32); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(uint32) - } - - return r0 -} - -// L2BridgeSyncerMock_OriginNetwork_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'OriginNetwork' -type L2BridgeSyncerMock_OriginNetwork_Call struct { - *mock.Call -} - -// OriginNetwork is a helper method to define mock.On call -func (_e *L2BridgeSyncerMock_Expecter) OriginNetwork() *L2BridgeSyncerMock_OriginNetwork_Call { - return &L2BridgeSyncerMock_OriginNetwork_Call{Call: _e.mock.On("OriginNetwork")} -} - -func (_c *L2BridgeSyncerMock_OriginNetwork_Call) Run(run func()) *L2BridgeSyncerMock_OriginNetwork_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *L2BridgeSyncerMock_OriginNetwork_Call) Return(_a0 uint32) *L2BridgeSyncerMock_OriginNetwork_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *L2BridgeSyncerMock_OriginNetwork_Call) RunAndReturn(run func() uint32) *L2BridgeSyncerMock_OriginNetwork_Call { - _c.Call.Return(run) - return _c -} - -// NewL2BridgeSyncerMock creates a new instance of L2BridgeSyncerMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewL2BridgeSyncerMock(t interface { - mock.TestingT - Cleanup(func()) -}) *L2BridgeSyncerMock { - mock := &L2BridgeSyncerMock{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/aggsender/mocks/mock_logger.go b/aggsender/mocks/mock_logger.go deleted file mode 100644 index 5b0eb4e9..00000000 --- a/aggsender/mocks/mock_logger.go +++ /dev/null @@ -1,290 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package mocks - -import mock "github.com/stretchr/testify/mock" - -// LoggerMock is an autogenerated mock type for the Logger type -type LoggerMock struct { - mock.Mock -} - -type LoggerMock_Expecter struct { - mock *mock.Mock -} - -func (_m *LoggerMock) EXPECT() *LoggerMock_Expecter { - return &LoggerMock_Expecter{mock: &_m.Mock} -} - -// Debug provides a mock function with given fields: args -func (_m *LoggerMock) Debug(args ...interface{}) { - var _ca []interface{} - _ca = append(_ca, args...) - _m.Called(_ca...) -} - -// LoggerMock_Debug_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Debug' -type LoggerMock_Debug_Call struct { - *mock.Call -} - -// Debug is a helper method to define mock.On call -// - args ...interface{} -func (_e *LoggerMock_Expecter) Debug(args ...interface{}) *LoggerMock_Debug_Call { - return &LoggerMock_Debug_Call{Call: _e.mock.On("Debug", - append([]interface{}{}, args...)...)} -} - -func (_c *LoggerMock_Debug_Call) Run(run func(args ...interface{})) *LoggerMock_Debug_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]interface{}, len(args)-0) - for i, a := range args[0:] { - if a != nil { - variadicArgs[i] = a.(interface{}) - } - } - run(variadicArgs...) - }) - return _c -} - -func (_c *LoggerMock_Debug_Call) Return() *LoggerMock_Debug_Call { - _c.Call.Return() - return _c -} - -func (_c *LoggerMock_Debug_Call) RunAndReturn(run func(...interface{})) *LoggerMock_Debug_Call { - _c.Call.Return(run) - return _c -} - -// Debugf provides a mock function with given fields: format, args -func (_m *LoggerMock) Debugf(format string, args ...interface{}) { - var _ca []interface{} - _ca = append(_ca, format) - _ca = append(_ca, args...) - _m.Called(_ca...) -} - -// LoggerMock_Debugf_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Debugf' -type LoggerMock_Debugf_Call struct { - *mock.Call -} - -// Debugf is a helper method to define mock.On call -// - format string -// - args ...interface{} -func (_e *LoggerMock_Expecter) Debugf(format interface{}, args ...interface{}) *LoggerMock_Debugf_Call { - return &LoggerMock_Debugf_Call{Call: _e.mock.On("Debugf", - append([]interface{}{format}, args...)...)} -} - -func (_c *LoggerMock_Debugf_Call) Run(run func(format string, args ...interface{})) *LoggerMock_Debugf_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]interface{}, len(args)-1) - for i, a := range args[1:] { - if a != nil { - variadicArgs[i] = a.(interface{}) - } - } - run(args[0].(string), variadicArgs...) - }) - return _c -} - -func (_c *LoggerMock_Debugf_Call) Return() *LoggerMock_Debugf_Call { - _c.Call.Return() - return _c -} - -func (_c *LoggerMock_Debugf_Call) RunAndReturn(run func(string, ...interface{})) *LoggerMock_Debugf_Call { - _c.Call.Return(run) - return _c -} - -// Error provides a mock function with given fields: args -func (_m *LoggerMock) Error(args ...interface{}) { - var _ca []interface{} - _ca = append(_ca, args...) - _m.Called(_ca...) -} - -// LoggerMock_Error_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Error' -type LoggerMock_Error_Call struct { - *mock.Call -} - -// Error is a helper method to define mock.On call -// - args ...interface{} -func (_e *LoggerMock_Expecter) Error(args ...interface{}) *LoggerMock_Error_Call { - return &LoggerMock_Error_Call{Call: _e.mock.On("Error", - append([]interface{}{}, args...)...)} -} - -func (_c *LoggerMock_Error_Call) Run(run func(args ...interface{})) *LoggerMock_Error_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]interface{}, len(args)-0) - for i, a := range args[0:] { - if a != nil { - variadicArgs[i] = a.(interface{}) - } - } - run(variadicArgs...) - }) - return _c -} - -func (_c *LoggerMock_Error_Call) Return() *LoggerMock_Error_Call { - _c.Call.Return() - return _c -} - -func (_c *LoggerMock_Error_Call) RunAndReturn(run func(...interface{})) *LoggerMock_Error_Call { - _c.Call.Return(run) - return _c -} - -// Errorf provides a mock function with given fields: format, args -func (_m *LoggerMock) Errorf(format string, args ...interface{}) { - var _ca []interface{} - _ca = append(_ca, format) - _ca = append(_ca, args...) - _m.Called(_ca...) -} - -// LoggerMock_Errorf_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Errorf' -type LoggerMock_Errorf_Call struct { - *mock.Call -} - -// Errorf is a helper method to define mock.On call -// - format string -// - args ...interface{} -func (_e *LoggerMock_Expecter) Errorf(format interface{}, args ...interface{}) *LoggerMock_Errorf_Call { - return &LoggerMock_Errorf_Call{Call: _e.mock.On("Errorf", - append([]interface{}{format}, args...)...)} -} - -func (_c *LoggerMock_Errorf_Call) Run(run func(format string, args ...interface{})) *LoggerMock_Errorf_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]interface{}, len(args)-1) - for i, a := range args[1:] { - if a != nil { - variadicArgs[i] = a.(interface{}) - } - } - run(args[0].(string), variadicArgs...) - }) - return _c -} - -func (_c *LoggerMock_Errorf_Call) Return() *LoggerMock_Errorf_Call { - _c.Call.Return() - return _c -} - -func (_c *LoggerMock_Errorf_Call) RunAndReturn(run func(string, ...interface{})) *LoggerMock_Errorf_Call { - _c.Call.Return(run) - return _c -} - -// Info provides a mock function with given fields: args -func (_m *LoggerMock) Info(args ...interface{}) { - var _ca []interface{} - _ca = append(_ca, args...) - _m.Called(_ca...) -} - -// LoggerMock_Info_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Info' -type LoggerMock_Info_Call struct { - *mock.Call -} - -// Info is a helper method to define mock.On call -// - args ...interface{} -func (_e *LoggerMock_Expecter) Info(args ...interface{}) *LoggerMock_Info_Call { - return &LoggerMock_Info_Call{Call: _e.mock.On("Info", - append([]interface{}{}, args...)...)} -} - -func (_c *LoggerMock_Info_Call) Run(run func(args ...interface{})) *LoggerMock_Info_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]interface{}, len(args)-0) - for i, a := range args[0:] { - if a != nil { - variadicArgs[i] = a.(interface{}) - } - } - run(variadicArgs...) - }) - return _c -} - -func (_c *LoggerMock_Info_Call) Return() *LoggerMock_Info_Call { - _c.Call.Return() - return _c -} - -func (_c *LoggerMock_Info_Call) RunAndReturn(run func(...interface{})) *LoggerMock_Info_Call { - _c.Call.Return(run) - return _c -} - -// Infof provides a mock function with given fields: format, args -func (_m *LoggerMock) Infof(format string, args ...interface{}) { - var _ca []interface{} - _ca = append(_ca, format) - _ca = append(_ca, args...) - _m.Called(_ca...) -} - -// LoggerMock_Infof_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Infof' -type LoggerMock_Infof_Call struct { - *mock.Call -} - -// Infof is a helper method to define mock.On call -// - format string -// - args ...interface{} -func (_e *LoggerMock_Expecter) Infof(format interface{}, args ...interface{}) *LoggerMock_Infof_Call { - return &LoggerMock_Infof_Call{Call: _e.mock.On("Infof", - append([]interface{}{format}, args...)...)} -} - -func (_c *LoggerMock_Infof_Call) Run(run func(format string, args ...interface{})) *LoggerMock_Infof_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]interface{}, len(args)-1) - for i, a := range args[1:] { - if a != nil { - variadicArgs[i] = a.(interface{}) - } - } - run(args[0].(string), variadicArgs...) - }) - return _c -} - -func (_c *LoggerMock_Infof_Call) Return() *LoggerMock_Infof_Call { - _c.Call.Return() - return _c -} - -func (_c *LoggerMock_Infof_Call) RunAndReturn(run func(string, ...interface{})) *LoggerMock_Infof_Call { - _c.Call.Return(run) - return _c -} - -// NewLoggerMock creates a new instance of LoggerMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewLoggerMock(t interface { - mock.TestingT - Cleanup(func()) -}) *LoggerMock { - mock := &LoggerMock{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/aggsender/types/block_notifier.go b/aggsender/types/block_notifier.go new file mode 100644 index 00000000..475abc1b --- /dev/null +++ b/aggsender/types/block_notifier.go @@ -0,0 +1,15 @@ +package types + +import "github.com/0xPolygon/cdk/etherman" + +type EventNewBlock struct { + BlockNumber uint64 + BlockFinalityType etherman.BlockNumberFinality +} + +// BlockNotifier is the interface that wraps the basic methods to notify a new block. +type BlockNotifier interface { + // NotifyEpochStarted notifies the epoch has started. + Subscribe(id string) <-chan EventNewBlock + String() string +} diff --git a/aggsender/types/epoch_notifier.go b/aggsender/types/epoch_notifier.go new file mode 100644 index 00000000..426ad362 --- /dev/null +++ b/aggsender/types/epoch_notifier.go @@ -0,0 +1,25 @@ +package types + +import ( + "context" + "fmt" +) + +// EpochEvent is the event that notifies the neear end epoch +type EpochEvent struct { + Epoch uint64 + // ExtraInfo if a detailed information about the epoch that depends on implementation + ExtraInfo fmt.Stringer +} + +func (e EpochEvent) String() string { + return fmt.Sprintf("EpochEvent: epoch=%d extra=%s", e.Epoch, e.ExtraInfo) +} + +type EpochNotifier interface { + // NotifyEpochStarted notifies the epoch is close to end. + Subscribe(id string) <-chan EpochEvent + // Start starts the notifier synchronously + Start(ctx context.Context) + String() string +} diff --git a/aggsender/types/generic_subscriber.go b/aggsender/types/generic_subscriber.go new file mode 100644 index 00000000..67038c5c --- /dev/null +++ b/aggsender/types/generic_subscriber.go @@ -0,0 +1,6 @@ +package types + +type GenericSubscriber[T any] interface { + Subscribe(subscriberName string) <-chan T + Publish(data T) +} diff --git a/aggsender/types/types.go b/aggsender/types/types.go index 46d31176..d9e0b2e7 100644 --- a/aggsender/types/types.go +++ b/aggsender/types/types.go @@ -47,6 +47,8 @@ type Logger interface { Infof(format string, args ...interface{}) Error(args ...interface{}) Errorf(format string, args ...interface{}) + Warn(args ...interface{}) + Warnf(format string, args ...interface{}) Debug(args ...interface{}) Debugf(format string, args ...interface{}) } diff --git a/cmd/run.go b/cmd/run.go index c30da739..6042e935 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -125,6 +125,7 @@ func start(cliCtx *cli.Context) error { aggsender, err := createAggSender( cliCtx.Context, c.AggSender, + l1Client, l1InfoTreeSync, l2BridgeSync, ) @@ -144,13 +145,35 @@ func start(cliCtx *cli.Context) error { func createAggSender( ctx context.Context, cfg aggsender.Config, + l1EthClient *ethclient.Client, l1InfoTreeSync *l1infotreesync.L1InfoTreeSync, - l2Syncer *bridgesync.BridgeSync, -) (*aggsender.AggSender, error) { + l2Syncer *bridgesync.BridgeSync) (*aggsender.AggSender, error) { logger := log.WithFields("module", cdkcommon.AGGSENDER) agglayerClient := agglayer.NewAggLayerClient(cfg.AggLayerURL) + blockNotifier, err := aggsender.NewBlockNotifierPolling(l1EthClient, aggsender.ConfigBlockNotifierPolling{ + BlockFinalityType: etherman.BlockNumberFinality(cfg.BlockFinality), + CheckNewBlockInterval: aggsender.AutomaticBlockInterval, + }, logger, nil) + if err != nil { + return nil, err + } - return aggsender.New(ctx, logger, cfg, agglayerClient, l1InfoTreeSync, l2Syncer) + notifierCfg, err := aggsender.NewConfigEpochNotifierPerBlock(agglayerClient, cfg.EpochNotificationPercentage) + if err != nil { + return nil, fmt.Errorf("cant generate config for Epoch Notifier because: %w", err) + } + epochNotifier, err := aggsender.NewEpochNotifierPerBlock( + blockNotifier, + logger, + *notifierCfg, nil) + if err != nil { + return nil, err + } + log.Infof("Starting blockNotifier: %s", blockNotifier.String()) + go blockNotifier.Start(ctx) + log.Infof("Starting epochNotifier: %s", epochNotifier.String()) + go epochNotifier.Start(ctx) + return aggsender.New(ctx, logger, cfg, agglayerClient, l1InfoTreeSync, l2Syncer, epochNotifier) } func createAggregator(ctx context.Context, c config.Config, runMigrations bool) *aggregator.Aggregator { diff --git a/config/default.go b/config/default.go index bbf4d2e0..d7188e43 100644 --- a/config/default.go +++ b/config/default.go @@ -7,6 +7,7 @@ L1URL = "http://localhost:8545" L2URL = "http://localhost:8123" AggLayerURL = "https://agglayer-dev.polygon.technology" + ForkId = 9 ContractVersions = "elderberry" IsValidiumMode = false @@ -215,7 +216,7 @@ DBPath = "{{PathRWData}}/reorgdetectorl2.sqlite" DBPath = "{{PathRWData}}/L1InfoTreeSync.sqlite" GlobalExitRootAddr="{{NetworkConfig.L1.GlobalExitRootManagerAddr}}" RollupManagerAddr = "{{NetworkConfig.L1.RollupManagerAddr}}" -SyncBlockChunkSize=10 +SyncBlockChunkSize=100 BlockFinality="LatestBlock" URLRPCL1="{{L1URL}}" WaitForNewBlocksPeriod="100ms" @@ -340,5 +341,7 @@ AggsenderPrivateKey = {Path = "{{SequencerPrivateKeyPath}}", Password = "{{Seque BlockGetInterval = "2s" URLRPCL2="{{L2URL}}" CheckSettledInterval = "2s" +BlockFinality = "LatestBlock" +EpochNotificationPercentage = 50 SaveCertificatesToFiles = false ` diff --git a/dataavailability/datacommittee/datacommittee.go b/dataavailability/datacommittee/datacommittee.go index 474c5934..369fc0fe 100644 --- a/dataavailability/datacommittee/datacommittee.go +++ b/dataavailability/datacommittee/datacommittee.go @@ -105,53 +105,40 @@ func (d *Backend) Init() error { return nil } -// GetSequence gets backend data one hash at a time. This should be optimized on the DAC side to get them all at once. +// GetSequence retrieves backend data by querying committee members for each hash concurrently. func (d *Backend) GetSequence(_ context.Context, hashes []common.Hash, _ []byte) ([][]byte, error) { - intialMember := d.selectedCommitteeMember + initialMember := d.selectedCommitteeMember - var found bool - for !found && intialMember != -1 { + var batchData [][]byte + for retries := 0; retries < len(d.committeeMembers); retries++ { member := d.committeeMembers[d.selectedCommitteeMember] d.logger.Infof("trying to get data from %s at %s", member.Addr.Hex(), member.URL) c := d.dataCommitteeClientFactory.New(member.URL) - dataMap, err := c.ListOffChainData(d.ctx, hashes) if err != nil { - d.logger.Warnf( - "error getting data from DAC node %s at %s: %s", - member.Addr.Hex(), member.URL, err, - ) + d.logger.Warnf("error getting data from DAC node %s at %s: %s", member.Addr.Hex(), member.URL, err) d.selectedCommitteeMember = (d.selectedCommitteeMember + 1) % len(d.committeeMembers) - if d.selectedCommitteeMember == intialMember { + if d.selectedCommitteeMember == initialMember { break } - continue } - batchData := make([][]byte, 0, len(hashes)) + batchData = make([][]byte, 0, len(hashes)) for _, hash := range hashes { actualTransactionsHash := crypto.Keccak256Hash(dataMap[hash]) if actualTransactionsHash != hash { - unexpectedHash := fmt.Errorf( - unexpectedHashTemplate, hash, actualTransactionsHash, - ) - d.logger.Warnf( - "error getting data from DAC node %s at %s: %s", - member.Addr.Hex(), member.URL, unexpectedHash, - ) + unexpectedHash := fmt.Errorf(unexpectedHashTemplate, hash, actualTransactionsHash) + d.logger.Warnf("error getting data from DAC node %s at %s: %s", member.Addr.Hex(), member.URL, unexpectedHash) d.selectedCommitteeMember = (d.selectedCommitteeMember + 1) % len(d.committeeMembers) - if d.selectedCommitteeMember == intialMember { + if d.selectedCommitteeMember == initialMember { break } - continue } - batchData = append(batchData, dataMap[hash]) } - return batchData, nil } diff --git a/go.mod b/go.mod index 0061c72f..430e8326 100644 --- a/go.mod +++ b/go.mod @@ -33,7 +33,7 @@ require ( go.uber.org/zap v1.27.0 golang.org/x/crypto v0.27.0 golang.org/x/net v0.29.0 - golang.org/x/sync v0.8.0 + golang.org/x/sync v0.9.0 google.golang.org/grpc v1.64.0 google.golang.org/protobuf v1.34.2 modernc.org/sqlite v1.32.0 @@ -151,7 +151,7 @@ require ( github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect go.opentelemetry.io/otel/trace v1.24.0 // indirect go.uber.org/multierr v1.10.0 // indirect - golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa // indirect + golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f // indirect golang.org/x/sys v0.25.0 // indirect golang.org/x/text v0.18.0 // indirect golang.org/x/time v0.5.0 // indirect diff --git a/go.sum b/go.sum index ceb905ac..3ad80938 100644 --- a/go.sum +++ b/go.sum @@ -6,6 +6,7 @@ github.com/0xPolygon/cdk-rpc v0.0.0-20241004114257-6c3cb6eebfb6 h1:FXL/rcO7/GtZ3 github.com/0xPolygon/cdk-rpc v0.0.0-20241004114257-6c3cb6eebfb6/go.mod h1:2scWqMMufrQXu7TikDgQ3BsyaKoX8qP26D6E262vSOg= github.com/0xPolygon/zkevm-ethtx-manager v0.2.1 h1:2Yb+KdJFMpVrS9LIkd658XiWuN+MCTs7SgeWaopXScg= github.com/0xPolygon/zkevm-ethtx-manager v0.2.1/go.mod h1:lqQmzSo2OXEZItD0R4Cd+lqKFxphXEWgqHefVcGDZZc= +github.com/0xPolygonHermez/zkevm-data-streamer v0.2.7/go.mod h1:7nM7Ihk+fTG1TQPwdZoGOYd3wprqqyIyjtS514uHzWE= github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.5 h1:YmnhuCl349MoNASN0fMeGKU1o9HqJhiZkfMsA/1cTRA= github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.5/go.mod h1:X4Su/M/+hSISqdl9yomKlRsbTyuZHsRohporyHsP8gg= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -482,6 +483,8 @@ golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ= golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE= +golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f h1:XdNn9LlyWAhLVp6P/i8QYBW+hlyhrhei9uErw2B5GJo= +golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f/go.mod h1:D5SMRVC3C2/4+F/DB1wZsLRnSNimn2Sp/NPsCrsv8ak= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= @@ -489,6 +492,7 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -510,6 +514,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -571,6 +577,7 @@ golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= +golang.org/x/tools v0.27.0 h1:qEKojBykQkQ4EynWy4S8Weg69NumxKdn40Fce3uc/8o= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/scripts/local_config b/scripts/local_config index 09e0167a..5830b6e6 100755 --- a/scripts/local_config +++ b/scripts/local_config @@ -447,4 +447,4 @@ EOF echo " -----------------------------------------------------------" echo " " echo " - rembember to clean previous execution data: " -echo " rm -Rf ${path_rw_data}/*" +echo " rm -Rf ${zkevm_path_rw_data}/*" diff --git a/sonar-project.properties b/sonar-project.properties index a6245819..3b6ddc8a 100644 --- a/sonar-project.properties +++ b/sonar-project.properties @@ -7,11 +7,11 @@ sonar.projectName=cdk sonar.organization=0xpolygon sonar.sources=. -sonar.exclusions=**/test/**,**/vendor/**,**/mocks/**,**/build/**,**/target/**,**/proto/include/**,**/*.pb.go,**/docs/**,**/*.sql,**/mocks_*/*,scripts/**,**/mock_*.go,**/agglayer/**,**/cmd/** +sonar.exclusions=**/test/**,**/vendor/**,**/mocks/**,**/build/**,**/target/**,**/proto/include/**,**/*.pb.go,**/docs/**,**/*.sql,**/mocks_*/*,scripts/**,**/mock_*.go,**/cmd/** sonar.tests=. sonar.test.inclusions=**/*_test.go -sonar.test.exclusions=test/contracts/**,**/vendor/**,**/docs/**,**/mocks/**,**/*.pb.go,**/*.yml,**/*.yaml,**/*.json,**/*.xml,**/*.toml,**/mocks_*/*,**/mock_*.go,**/agglayer/**,**/cmd/** +sonar.test.exclusions=test/contracts/**,**/vendor/**,**/docs/**,**/mocks/**,**/*.pb.go,**/*.yml,**/*.yaml,**/*.json,**/*.xml,**/*.toml,**/mocks_*/*,**/mock_*.go,**/cmd/** sonar.issue.enforceSemantic=true # ===================================================== diff --git a/test/Makefile b/test/Makefile index 51a475ed..2435730c 100644 --- a/test/Makefile +++ b/test/Makefile @@ -3,6 +3,8 @@ generate-mocks: generate-mocks-bridgesync generate-mocks-reorgdetector generate- generate-mocks-da generate-mocks-l1infotreesync generate-mocks-helpers \ generate-mocks-sync generate-mocks-l1infotreesync generate-mocks-aggregator \ generate-mocks-aggsender generate-mocks-agglayer generate-mocks-bridgesync + generate-mocks-sync generate-mocks-l1infotreesync generate-mocks-aggregator \ + generate-mocks-aggsender generate-mocks-agglayer generate-mocks-bridgesync .PHONY: generate-mocks-bridgesync generate-mocks-bridgesync: ## Generates mocks for bridgesync, using mockery tool @@ -61,11 +63,8 @@ generate-mocks-aggregator: ## Generates mocks for aggregator, using mockery tool .PHONY: generate-mocks-aggsender generate-mocks-aggsender: ## Generates mocks for aggsender, using mockery tool - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=L1InfoTreeSyncer --dir=../aggsender/types --output=../aggsender/mocks --outpkg=mocks --structname=L1InfoTreeSyncerMock --filename=mock_l1infotree_syncer.go ${COMMON_MOCKERY_PARAMS} - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=L2BridgeSyncer --dir=../aggsender/types --output=../aggsender/mocks --outpkg=mocks --structname=L2BridgeSyncerMock --filename=mock_l2bridge_syncer.go ${COMMON_MOCKERY_PARAMS} - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=Logger --dir=../aggsender/types --output=../aggsender/mocks --outpkg=mocks --structname=LoggerMock --filename=mock_logger.go ${COMMON_MOCKERY_PARAMS} - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=AggSenderStorage --dir=../aggsender/db --output=../aggsender/mocks --outpkg=mocks --structname=AggSenderStorageMock --filename=mock_aggsender_storage.go ${COMMON_MOCKERY_PARAMS} - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=EthClient --dir=../aggsender/types --output=../aggsender/mocks --outpkg=mocks --structname=EthClientMock --filename=mock_eth_client.go ${COMMON_MOCKERY_PARAMS} + rm -Rf ../aggsender/mocks + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --all --case snake --dir ../aggsender --output ../aggsender/mocks --outpkg mocks ${COMMON_MOCKERY_PARAMS} .PHONY: generate-mocks-agglayer generate-mocks-agglayer: ## Generates mocks for agglayer, using mockery tool diff --git a/test/bridge-e2e.bats b/test/bridge-e2e.bats index ed599c7d..e754ef70 100644 --- a/test/bridge-e2e.bats +++ b/test/bridge-e2e.bats @@ -48,23 +48,35 @@ setup() { } @test "Native gas token deposit to WETH" { + destination_addr=$sender_addr local initial_receiver_balance=$(cast call --rpc-url "$l2_rpc_url" "$weth_token_addr" "$balance_of_fn_sig" "$destination_addr" | awk '{print $1}') echo "Initial receiver balance of native token on L2 $initial_receiver_balance" >&3 - echo "Running LxLy deposit" >&3 + echo "=== Running LxLy deposit on L1 to network: $l2_rpc_network_id native_token: $native_token_addr" >&3 + + destination_net=$l2_rpc_network_id run bridgeAsset "$native_token_addr" "$l1_rpc_url" assert_success - echo "Running LxLy claim" >&3 + echo "=== Running LxLy claim on L2" >&3 timeout="120" claim_frequency="10" run wait_for_claim "$timeout" "$claim_frequency" "$l2_rpc_url" assert_success run verify_balance "$l2_rpc_url" "$weth_token_addr" "$destination_addr" "$initial_receiver_balance" "$ether_value" - if [ $status -eq 0 ]; then - break - fi + assert_success + + echo "=== bridgeAsset L2 WETH: $weth_token_addr to L1 ETH" >&3 + destination_addr=$sender_addr + destination_net=0 + run bridgeAsset "$weth_token_addr" "$l2_rpc_url" + assert_success + + echo "=== Claim in L1 ETH" >&3 + timeout="400" + claim_frequency="60" + run wait_for_claim "$timeout" "$claim_frequency" "$l1_rpc_url" assert_success } diff --git a/test/config/kurtosis-cdk-node-config.toml.template b/test/config/kurtosis-cdk-node-config.toml.template index 508c1286..4069b350 100644 --- a/test/config/kurtosis-cdk-node-config.toml.template +++ b/test/config/kurtosis-cdk-node-config.toml.template @@ -25,8 +25,6 @@ AggregatorPrivateKeyPassword = "{{.zkevm_l2_keystore_password}}" SenderProofToL1Addr = "{{.zkevm_l2_agglayer_address}}" polygonBridgeAddr = "{{.zkevm_bridge_address}}" - -RPCURL = "http://{{.l2_rpc_name}}{{.deployment_suffix}}:{{.zkevm_rpc_http_port}}" WitnessURL = "http://{{.l2_rpc_name}}{{.deployment_suffix}}:{{.zkevm_rpc_http_port}}" diff --git a/test/helpers/lxly-bridge-test.bash b/test/helpers/lxly-bridge-test.bash index 7b3cb008..ad5ab943 100644 --- a/test/helpers/lxly-bridge-test.bash +++ b/test/helpers/lxly-bridge-test.bash @@ -36,6 +36,7 @@ function claim() { readonly bridge_deposit_file=$(mktemp) readonly claimable_deposit_file=$(mktemp) echo "Getting full list of deposits" >&3 + echo " curl -s \"$bridge_api_url/bridges/$destination_addr?limit=100&offset=0\"" >&3 curl -s "$bridge_api_url/bridges/$destination_addr?limit=100&offset=0" | jq '.' | tee $bridge_deposit_file echo "Looking for claimable deposits" >&3 From 7c2b7f30b62b9af1dee8fc1502aef50c0405aac9 Mon Sep 17 00:00:00 2001 From: Goran Rojovic Date: Wed, 13 Nov 2024 14:40:22 +0100 Subject: [PATCH 28/33] fix: add new error --- .../l1_info_root_incorrect_error.json | 6 +++ agglayer/type_conversion_error.go | 51 +++++++++++++++++++ 2 files changed, 57 insertions(+) create mode 100644 agglayer/testdata/type_conversion_errors/l1_info_root_incorrect_error.json diff --git a/agglayer/testdata/type_conversion_errors/l1_info_root_incorrect_error.json b/agglayer/testdata/type_conversion_errors/l1_info_root_incorrect_error.json new file mode 100644 index 00000000..daebff15 --- /dev/null +++ b/agglayer/testdata/type_conversion_errors/l1_info_root_incorrect_error.json @@ -0,0 +1,6 @@ +[ + { + "test_name": "L1InfoRootIncorrect", + "certificate_header": "{\"network_id\":1,\"height\":6,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"TypeConversionError\":{\"L1InfoRootIncorrect\":{\"leaf_count\":11,\"declared\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"retrieved\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\"}}}}}}" + } +] \ No newline at end of file diff --git a/agglayer/type_conversion_error.go b/agglayer/type_conversion_error.go index 89129253..3d75658f 100644 --- a/agglayer/type_conversion_error.go +++ b/agglayer/type_conversion_error.go @@ -3,6 +3,8 @@ package agglayer import ( "errors" "fmt" + + "github.com/ethereum/go-ethereum/common" ) const ( @@ -12,6 +14,7 @@ const ( BalanceUnderflowErrorType = "BalanceUnderflow" BalanceProofGenerationFailedErrorType = "BalanceProofGenerationFailed" NullifierPathGenerationFailedErrorType = "NullifierPathGenerationFailed" + L1InfoRootIncorrectErrorType = "L1InfoRootIncorrect" ) // TypeConversionError is an error that is returned when verifying a certficate @@ -57,6 +60,12 @@ func (p *TypeConversionError) Unmarshal(data interface{}) error { return nil, err } return nullifierPathGenerationFailed, nil + case L1InfoRootIncorrectErrorType: + l1InfoRootIncorrect := &L1InfoRootIncorrect{} + if err := l1InfoRootIncorrect.Unmarshal(value); err != nil { + return nil, err + } + return l1InfoRootIncorrect, nil default: return nil, fmt.Errorf("unknown type conversion error type: %v", key) } @@ -253,3 +262,45 @@ func (e *NullifierPathGenerationFailed) UnmarshalFromMap(data interface{}) error e.GlobalIndex = &GlobalIndex{} return e.GlobalIndex.UnmarshalFromMap(globalIndexMap) } + +// L1InfoRootIncorrect is an error that is returned when the L1 Info Root is invalid or unsettled +type L1InfoRootIncorrect struct { + Declared common.Hash `json:"declared"` + Retrieved common.Hash `json:"retrieved"` + LeafCount uint32 `json:"leaf_count"` +} + +// String is the implementation of the Error interface +func (e *L1InfoRootIncorrect) String() string { + return fmt.Sprintf("%s: The L1 Info Root is incorrect. Declared: %s, Retrieved: %s, LeafCount: %d", + L1InfoRootIncorrectErrorType, e.Declared.String(), e.Retrieved.String(), e.LeafCount) +} + +// Unmarshal unmarshals the data from a map into a L1InfoRootIncorrect struct. +func (e *L1InfoRootIncorrect) Unmarshal(data interface{}) error { + dataMap, ok := data.(map[string]interface{}) + if !ok { + return errNotMap + } + + declared, err := convertMapValue[string](dataMap, "declared") + if err != nil { + return err + } + + retrieved, err := convertMapValue[string](dataMap, "retrieved") + if err != nil { + return err + } + + leafCount, err := convertMapValue[uint32](dataMap, "leaf_count") + if err != nil { + return err + } + + e.Declared = common.HexToHash(declared) + e.Retrieved = common.HexToHash(retrieved) + e.LeafCount = leafCount + + return nil +} From 141a1d88f502709d501329367d887e07da87696f Mon Sep 17 00:00:00 2001 From: Goran Rojovic Date: Wed, 13 Nov 2024 15:20:25 +0100 Subject: [PATCH 29/33] fix: ut --- .../type_conversion_errors/l1_info_root_incorrect_error.json | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/agglayer/testdata/type_conversion_errors/l1_info_root_incorrect_error.json b/agglayer/testdata/type_conversion_errors/l1_info_root_incorrect_error.json index daebff15..dc74e325 100644 --- a/agglayer/testdata/type_conversion_errors/l1_info_root_incorrect_error.json +++ b/agglayer/testdata/type_conversion_errors/l1_info_root_incorrect_error.json @@ -2,5 +2,10 @@ { "test_name": "L1InfoRootIncorrect", "certificate_header": "{\"network_id\":1,\"height\":6,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"TypeConversionError\":{\"L1InfoRootIncorrect\":{\"leaf_count\":11,\"declared\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"retrieved\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\"}}}}}}" + }, + { + "test_name": "L1InfoRootIncorrect - unmarshal error", + "expected_error": "value of key leaf_count is not of type uint32", + "certificate_header": "{\"network_id\":1,\"height\":6,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"TypeConversionError\":{\"L1InfoRootIncorrect\":{\"leaf_count\":\"invalid\",\"declared\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"retrieved\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\"}}}}}}" } ] \ No newline at end of file From a58326360ca34c08fa08097d8a0e3f4223d1423f Mon Sep 17 00:00:00 2001 From: Joan Esteban <129153821+joanestebanr@users.noreply.github.com> Date: Thu, 14 Nov 2024 10:22:54 +0100 Subject: [PATCH 30/33] feat: improve aggsender logs (#186) --- agglayer/mock_agglayer_client.go | 150 +++++++++++++++++++++++++++++- agglayer/types.go | 2 +- aggsender/aggsender.go | 43 +++++---- aggsender/aggsender_test.go | 97 +++++++++++++++---- aggsender/types/epoch_notifier.go | 3 + config/default.go | 2 +- test/Makefile | 2 +- 7 files changed, 256 insertions(+), 43 deletions(-) diff --git a/agglayer/mock_agglayer_client.go b/agglayer/mock_agglayer_client.go index 1b756713..b7f70ee8 100644 --- a/agglayer/mock_agglayer_client.go +++ b/agglayer/mock_agglayer_client.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.39.0. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package agglayer @@ -15,6 +15,14 @@ type AgglayerClientMock struct { mock.Mock } +type AgglayerClientMock_Expecter struct { + mock *mock.Mock +} + +func (_m *AgglayerClientMock) EXPECT() *AgglayerClientMock_Expecter { + return &AgglayerClientMock_Expecter{mock: &_m.Mock} +} + // GetCertificateHeader provides a mock function with given fields: certificateHash func (_m *AgglayerClientMock) GetCertificateHeader(certificateHash common.Hash) (*CertificateHeader, error) { ret := _m.Called(certificateHash) @@ -45,6 +53,34 @@ func (_m *AgglayerClientMock) GetCertificateHeader(certificateHash common.Hash) return r0, r1 } +// AgglayerClientMock_GetCertificateHeader_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCertificateHeader' +type AgglayerClientMock_GetCertificateHeader_Call struct { + *mock.Call +} + +// GetCertificateHeader is a helper method to define mock.On call +// - certificateHash common.Hash +func (_e *AgglayerClientMock_Expecter) GetCertificateHeader(certificateHash interface{}) *AgglayerClientMock_GetCertificateHeader_Call { + return &AgglayerClientMock_GetCertificateHeader_Call{Call: _e.mock.On("GetCertificateHeader", certificateHash)} +} + +func (_c *AgglayerClientMock_GetCertificateHeader_Call) Run(run func(certificateHash common.Hash)) *AgglayerClientMock_GetCertificateHeader_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(common.Hash)) + }) + return _c +} + +func (_c *AgglayerClientMock_GetCertificateHeader_Call) Return(_a0 *CertificateHeader, _a1 error) *AgglayerClientMock_GetCertificateHeader_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *AgglayerClientMock_GetCertificateHeader_Call) RunAndReturn(run func(common.Hash) (*CertificateHeader, error)) *AgglayerClientMock_GetCertificateHeader_Call { + _c.Call.Return(run) + return _c +} + // GetEpochConfiguration provides a mock function with given fields: func (_m *AgglayerClientMock) GetEpochConfiguration() (*ClockConfiguration, error) { ret := _m.Called() @@ -75,6 +111,33 @@ func (_m *AgglayerClientMock) GetEpochConfiguration() (*ClockConfiguration, erro return r0, r1 } +// AgglayerClientMock_GetEpochConfiguration_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetEpochConfiguration' +type AgglayerClientMock_GetEpochConfiguration_Call struct { + *mock.Call +} + +// GetEpochConfiguration is a helper method to define mock.On call +func (_e *AgglayerClientMock_Expecter) GetEpochConfiguration() *AgglayerClientMock_GetEpochConfiguration_Call { + return &AgglayerClientMock_GetEpochConfiguration_Call{Call: _e.mock.On("GetEpochConfiguration")} +} + +func (_c *AgglayerClientMock_GetEpochConfiguration_Call) Run(run func()) *AgglayerClientMock_GetEpochConfiguration_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *AgglayerClientMock_GetEpochConfiguration_Call) Return(_a0 *ClockConfiguration, _a1 error) *AgglayerClientMock_GetEpochConfiguration_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *AgglayerClientMock_GetEpochConfiguration_Call) RunAndReturn(run func() (*ClockConfiguration, error)) *AgglayerClientMock_GetEpochConfiguration_Call { + _c.Call.Return(run) + return _c +} + // SendCertificate provides a mock function with given fields: certificate func (_m *AgglayerClientMock) SendCertificate(certificate *SignedCertificate) (common.Hash, error) { ret := _m.Called(certificate) @@ -105,6 +168,34 @@ func (_m *AgglayerClientMock) SendCertificate(certificate *SignedCertificate) (c return r0, r1 } +// AgglayerClientMock_SendCertificate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SendCertificate' +type AgglayerClientMock_SendCertificate_Call struct { + *mock.Call +} + +// SendCertificate is a helper method to define mock.On call +// - certificate *SignedCertificate +func (_e *AgglayerClientMock_Expecter) SendCertificate(certificate interface{}) *AgglayerClientMock_SendCertificate_Call { + return &AgglayerClientMock_SendCertificate_Call{Call: _e.mock.On("SendCertificate", certificate)} +} + +func (_c *AgglayerClientMock_SendCertificate_Call) Run(run func(certificate *SignedCertificate)) *AgglayerClientMock_SendCertificate_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*SignedCertificate)) + }) + return _c +} + +func (_c *AgglayerClientMock_SendCertificate_Call) Return(_a0 common.Hash, _a1 error) *AgglayerClientMock_SendCertificate_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *AgglayerClientMock_SendCertificate_Call) RunAndReturn(run func(*SignedCertificate) (common.Hash, error)) *AgglayerClientMock_SendCertificate_Call { + _c.Call.Return(run) + return _c +} + // SendTx provides a mock function with given fields: signedTx func (_m *AgglayerClientMock) SendTx(signedTx SignedTx) (common.Hash, error) { ret := _m.Called(signedTx) @@ -135,6 +226,34 @@ func (_m *AgglayerClientMock) SendTx(signedTx SignedTx) (common.Hash, error) { return r0, r1 } +// AgglayerClientMock_SendTx_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SendTx' +type AgglayerClientMock_SendTx_Call struct { + *mock.Call +} + +// SendTx is a helper method to define mock.On call +// - signedTx SignedTx +func (_e *AgglayerClientMock_Expecter) SendTx(signedTx interface{}) *AgglayerClientMock_SendTx_Call { + return &AgglayerClientMock_SendTx_Call{Call: _e.mock.On("SendTx", signedTx)} +} + +func (_c *AgglayerClientMock_SendTx_Call) Run(run func(signedTx SignedTx)) *AgglayerClientMock_SendTx_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(SignedTx)) + }) + return _c +} + +func (_c *AgglayerClientMock_SendTx_Call) Return(_a0 common.Hash, _a1 error) *AgglayerClientMock_SendTx_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *AgglayerClientMock_SendTx_Call) RunAndReturn(run func(SignedTx) (common.Hash, error)) *AgglayerClientMock_SendTx_Call { + _c.Call.Return(run) + return _c +} + // WaitTxToBeMined provides a mock function with given fields: hash, ctx func (_m *AgglayerClientMock) WaitTxToBeMined(hash common.Hash, ctx context.Context) error { ret := _m.Called(hash, ctx) @@ -153,6 +272,35 @@ func (_m *AgglayerClientMock) WaitTxToBeMined(hash common.Hash, ctx context.Cont return r0 } +// AgglayerClientMock_WaitTxToBeMined_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'WaitTxToBeMined' +type AgglayerClientMock_WaitTxToBeMined_Call struct { + *mock.Call +} + +// WaitTxToBeMined is a helper method to define mock.On call +// - hash common.Hash +// - ctx context.Context +func (_e *AgglayerClientMock_Expecter) WaitTxToBeMined(hash interface{}, ctx interface{}) *AgglayerClientMock_WaitTxToBeMined_Call { + return &AgglayerClientMock_WaitTxToBeMined_Call{Call: _e.mock.On("WaitTxToBeMined", hash, ctx)} +} + +func (_c *AgglayerClientMock_WaitTxToBeMined_Call) Run(run func(hash common.Hash, ctx context.Context)) *AgglayerClientMock_WaitTxToBeMined_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(common.Hash), args[1].(context.Context)) + }) + return _c +} + +func (_c *AgglayerClientMock_WaitTxToBeMined_Call) Return(_a0 error) *AgglayerClientMock_WaitTxToBeMined_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *AgglayerClientMock_WaitTxToBeMined_Call) RunAndReturn(run func(common.Hash, context.Context) error) *AgglayerClientMock_WaitTxToBeMined_Call { + _c.Call.Return(run) + return _c +} + // NewAgglayerClientMock creates a new instance of AgglayerClientMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func NewAgglayerClientMock(t interface { diff --git a/agglayer/types.go b/agglayer/types.go index b6a3198e..aece93f0 100644 --- a/agglayer/types.go +++ b/agglayer/types.go @@ -556,7 +556,7 @@ func (c CertificateHeader) String() string { errors = c.Error.String() } - return fmt.Sprintf("Height: %d, CertificateID: %s, NewLocalExitRoot: %s. Status: %s. Errors: %s", + return fmt.Sprintf("Height: %d, CertificateID: %s, NewLocalExitRoot: %s. Status: %s. Errors: [%s]", c.Height, c.CertificateID.String(), c.NewLocalExitRoot.String(), c.Status.String(), errors) } diff --git a/aggsender/aggsender.go b/aggsender/aggsender.go index dcbbc268..08730572 100644 --- a/aggsender/aggsender.go +++ b/aggsender/aggsender.go @@ -55,7 +55,7 @@ func New( cfg Config, aggLayerClient agglayer.AgglayerClientInterface, l1InfoTreeSyncer *l1infotreesync.L1InfoTreeSync, - l2Syncer *bridgesync.BridgeSync, + l2Syncer types.L2BridgeSyncer, epochNotifier types.EpochNotifier) (*AggSender, error) { storage, err := db.NewAggSenderSQLStorage(logger, cfg.StoragePath) if err != nil { @@ -93,14 +93,14 @@ func (a *AggSender) sendCertificates(ctx context.Context) { select { case epoch := <-chEpoch: a.log.Infof("Epoch received: %s", epoch.String()) - thereArePendingCerts, err := a.checkPendingCertificatesStatus(ctx) - if err == nil && !thereArePendingCerts { + thereArePendingCerts := a.checkPendingCertificatesStatus(ctx) + if !thereArePendingCerts { if _, err := a.sendCertificate(ctx); err != nil { log.Error(err) } } else { - log.Warnf("Skipping epoch %s because there are pending certificates %v or error: %w", - epoch.String(), thereArePendingCerts, err) + log.Infof("Skipping epoch %s because there are pending certificates", + epoch.String()) } case <-ctx.Done(): a.log.Info("AggSender stopped") @@ -177,7 +177,7 @@ func (a *AggSender) sendCertificate(ctx context.Context) (*agglayer.SignedCertif } a.saveCertificateToFile(signedCertificate) - a.log.Debugf("certificate ready to be send to AggLayer: %s", signedCertificate.String()) + a.log.Infof("certificate ready to be send to AggLayer: %s", signedCertificate.String()) certificateHash, err := a.aggLayerClient.SendCertificate(signedCertificate) if err != nil { @@ -488,15 +488,14 @@ func (a *AggSender) signCertificate(certificate *agglayer.Certificate) (*agglaye // and updates in the storage if it changed on agglayer // It returns: // bool -> if there are pending certificates -// error -> if there was an error -func (a *AggSender) checkPendingCertificatesStatus(ctx context.Context) (bool, error) { +func (a *AggSender) checkPendingCertificatesStatus(ctx context.Context) bool { pendingCertificates, err := a.storage.GetCertificatesByStatus(nonSettledStatuses) if err != nil { err = fmt.Errorf("error getting pending certificates: %w", err) a.log.Error(err) - return true, err + return true } - thereArePendingCertificates := false + thereArePendingCerts := false a.log.Debugf("checkPendingCertificatesStatus num of pendingCertificates: %d", len(pendingCertificates)) for _, certificate := range pendingCertificates { certificateHeader, err := a.aggLayerClient.GetCertificateHeader(certificate.CertificateID) @@ -504,18 +503,17 @@ func (a *AggSender) checkPendingCertificatesStatus(ctx context.Context) (bool, e err = fmt.Errorf("error getting certificate header of %d/%s from agglayer: %w", certificate.Height, certificate.String(), err) a.log.Error(err) - return true, err + return true } - if slices.Contains(nonSettledStatuses, certificateHeader.Status) { - thereArePendingCertificates = true - } - a.log.Debugf("aggLayerClient.GetCertificateHeader status [%s] of certificate %s ", + elapsedTime := time.Now().UTC().Sub(time.UnixMilli(certificate.CreatedAt)) + a.log.Debugf("aggLayerClient.GetCertificateHeader status [%s] of certificate %s elapsed time:%s", certificateHeader.Status, - certificateHeader.String()) + certificateHeader.String(), + elapsedTime) if certificateHeader.Status != certificate.Status { - a.log.Infof("certificate %s changed status from [%s] to [%s]", - certificateHeader.String(), certificate.Status, certificateHeader.Status) + a.log.Infof("certificate %s changed status from [%s] to [%s] elapsed time: %s", + certificateHeader.String(), certificate.Status, certificateHeader.Status, elapsedTime) certificate.Status = certificateHeader.Status certificate.UpdatedAt = time.Now().UTC().UnixMilli() @@ -523,11 +521,16 @@ func (a *AggSender) checkPendingCertificatesStatus(ctx context.Context) (bool, e if err := a.storage.UpdateCertificateStatus(ctx, *certificate); err != nil { err = fmt.Errorf("error updating certificate %s status in storage: %w", certificateHeader.String(), err) a.log.Error(err) - return true, err + return true } } + if slices.Contains(nonSettledStatuses, certificateHeader.Status) { + a.log.Infof("certificate %s is still pending, elapsed time:%s ", + certificateHeader.String(), elapsedTime) + thereArePendingCerts = true + } } - return thereArePendingCertificates, nil + return thereArePendingCerts } // shouldSendCertificate checks if a certificate should be sent at given time diff --git a/aggsender/aggsender_test.go b/aggsender/aggsender_test.go index 0d071e76..b9242bdf 100644 --- a/aggsender/aggsender_test.go +++ b/aggsender/aggsender_test.go @@ -280,6 +280,70 @@ func TestGetBridgeExits(t *testing.T) { } } +func TestAggSenderStart(t *testing.T) { + AggLayerMock := agglayer.NewAgglayerClientMock(t) + epochNotifierMock := mocks.NewEpochNotifier(t) + bridgeL2SyncerMock := mocks.NewL2BridgeSyncer(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + aggSender, err := New( + ctx, + log.WithFields("test", "unittest"), + Config{ + StoragePath: "file::memory:?cache=shared", + }, + AggLayerMock, + nil, + bridgeL2SyncerMock, + epochNotifierMock) + require.NoError(t, err) + require.NotNil(t, aggSender) + ch := make(chan aggsendertypes.EpochEvent) + epochNotifierMock.EXPECT().Subscribe("aggsender").Return(ch) + bridgeL2SyncerMock.EXPECT().GetLastProcessedBlock(mock.Anything).Return(uint64(0), nil) + + go aggSender.Start(ctx) + ch <- aggsendertypes.EpochEvent{ + Epoch: 1, + } + time.Sleep(200 * time.Millisecond) +} + +func TestAggSenderSendCertificates(t *testing.T) { + AggLayerMock := agglayer.NewAgglayerClientMock(t) + epochNotifierMock := mocks.NewEpochNotifier(t) + bridgeL2SyncerMock := mocks.NewL2BridgeSyncer(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + aggSender, err := New( + ctx, + log.WithFields("test", "unittest"), + Config{ + StoragePath: "file::memory:?cache=shared", + }, + AggLayerMock, + nil, + bridgeL2SyncerMock, + epochNotifierMock) + require.NoError(t, err) + require.NotNil(t, aggSender) + ch := make(chan aggsendertypes.EpochEvent, 2) + epochNotifierMock.EXPECT().Subscribe("aggsender").Return(ch) + err = aggSender.storage.SaveLastSentCertificate(ctx, aggsendertypes.CertificateInfo{ + Height: 1, + Status: agglayer.Pending, + }) + AggLayerMock.EXPECT().GetCertificateHeader(mock.Anything).Return(&agglayer.CertificateHeader{ + Status: agglayer.Pending, + }, nil) + require.NoError(t, err) + ch <- aggsendertypes.EpochEvent{ + Epoch: 1, + } + go aggSender.sendCertificates(ctx) + time.Sleep(200 * time.Millisecond) +} + //nolint:dupl func TestGetImportedBridgeExits(t *testing.T) { t.Parallel() @@ -751,16 +815,15 @@ func generateTestProof(t *testing.T) treeTypes.Proof { func TestCheckIfCertificatesAreSettled(t *testing.T) { tests := []struct { - name string - pendingCertificates []*aggsendertypes.CertificateInfo - certificateHeaders map[common.Hash]*agglayer.CertificateHeader - getFromDBError error - clientError error - updateDBError error - expectedErrorLogMessages []string - expectedInfoMessages []string - expectedThereArePendingCerts bool - expectedError bool + name string + pendingCertificates []*aggsendertypes.CertificateInfo + certificateHeaders map[common.Hash]*agglayer.CertificateHeader + getFromDBError error + clientError error + updateDBError error + expectedErrorLogMessages []string + expectedInfoMessages []string + expectedError bool }{ { name: "All certificates settled - update successful", @@ -796,8 +859,7 @@ func TestCheckIfCertificatesAreSettled(t *testing.T) { expectedErrorLogMessages: []string{ "error getting pending certificates: %w", }, - expectedThereArePendingCerts: true, - expectedError: true, + expectedError: true, }, { name: "Error getting certificate header", @@ -811,8 +873,7 @@ func TestCheckIfCertificatesAreSettled(t *testing.T) { expectedErrorLogMessages: []string{ "error getting header of certificate %s with height: %d from agglayer: %w", }, - expectedThereArePendingCerts: true, - expectedError: true, + expectedError: true, }, { name: "Error updating certificate status", @@ -829,8 +890,7 @@ func TestCheckIfCertificatesAreSettled(t *testing.T) { expectedInfoMessages: []string{ "certificate %s changed status to %s", }, - expectedThereArePendingCerts: true, - expectedError: true, + expectedError: true, }, } @@ -864,9 +924,8 @@ func TestCheckIfCertificatesAreSettled(t *testing.T) { } ctx := context.TODO() - thereArePendingCerts, err := aggSender.checkPendingCertificatesStatus(ctx) - require.Equal(t, tt.expectedThereArePendingCerts, thereArePendingCerts) - require.Equal(t, tt.expectedError, err != nil) + thereArePendingCerts := aggSender.checkPendingCertificatesStatus(ctx) + require.Equal(t, tt.expectedError, thereArePendingCerts) mockAggLayerClient.AssertExpectations(t) mockStorage.AssertExpectations(t) }) diff --git a/aggsender/types/epoch_notifier.go b/aggsender/types/epoch_notifier.go index 426ad362..045ba7ff 100644 --- a/aggsender/types/epoch_notifier.go +++ b/aggsender/types/epoch_notifier.go @@ -23,3 +23,6 @@ type EpochNotifier interface { Start(ctx context.Context) String() string } + +type BridgeL2Syncer interface { +} diff --git a/config/default.go b/config/default.go index d7188e43..61b099c8 100644 --- a/config/default.go +++ b/config/default.go @@ -343,5 +343,5 @@ URLRPCL2="{{L2URL}}" CheckSettledInterval = "2s" BlockFinality = "LatestBlock" EpochNotificationPercentage = 50 -SaveCertificatesToFiles = false +SaveCertificatesToFilesPath = "" ` diff --git a/test/Makefile b/test/Makefile index 2435730c..12f406fd 100644 --- a/test/Makefile +++ b/test/Makefile @@ -68,7 +68,7 @@ generate-mocks-aggsender: ## Generates mocks for aggsender, using mockery tool .PHONY: generate-mocks-agglayer generate-mocks-agglayer: ## Generates mocks for agglayer, using mockery tool - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=AgglayerClientInterface --dir=../agglayer --output=../agglayer --outpkg=agglayer --inpackage --structname=AgglayerClientMock --filename=mock_agglayer_client.go + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=AgglayerClientInterface --dir=../agglayer --output=../agglayer --outpkg=agglayer --inpackage --structname=AgglayerClientMock --filename=mock_agglayer_client.go ${COMMON_MOCKERY_PARAMS} .PHONY: generate-mocks-bridgesync generate-mocks-bridgesync: ## Generates mocks for bridgesync, using mockery tool From 97c2d58bba814b3df2bc456f0b8cf4d5d5c69aca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toni=20Ram=C3=ADrez?= <58293609+ToniRamirezM@users.noreply.github.com> Date: Fri, 15 Nov 2024 16:03:21 +0100 Subject: [PATCH 31/33] fix: aggregating proofs (#191) * ensure oldAccInputHash is ready * feat: updata sync lib * feat: acc input hash sanity check * feat: check acc input hash -1 * feat: refactor * feat: refactor * fix: batch1 acc input hash * fix: timestamp in input prover * fix: timestamp in input prover * fix: timestamp * feat: remove test * fix: test * fix: test * fix: comments * fix: comments --- aggregator/aggregator.go | 64 +++- aggregator/aggregator_test.go | 539 +------------------------------ aggregator/interfaces.go | 2 +- aggregator/mocks/mock_prover.go | 21 +- aggregator/prover/prover.go | 31 +- aggregator/prover/prover_test.go | 6 +- go.mod | 3 +- go.sum | 15 +- 8 files changed, 102 insertions(+), 579 deletions(-) diff --git a/aggregator/aggregator.go b/aggregator/aggregator.go index 72c316be..58e97402 100644 --- a/aggregator/aggregator.go +++ b/aggregator/aggregator.go @@ -970,7 +970,7 @@ func (a *Aggregator) tryAggregateProofs(ctx context.Context, prover ProverInterf tmpLogger.Infof("Proof ID for aggregated proof: %v", *proof.ProofID) tmpLogger = tmpLogger.WithFields("proofId", *proof.ProofID) - recursiveProof, _, err := prover.WaitRecursiveProof(ctx, *proof.ProofID) + recursiveProof, _, _, err := prover.WaitRecursiveProof(ctx, *proof.ProofID) if err != nil { err = fmt.Errorf("failed to get aggregated proof from prover, %w", err) tmpLogger.Error(FirstToUpper(err.Error())) @@ -1121,7 +1121,7 @@ func (a *Aggregator) getAndLockBatchToProve( // Not found, so it it not possible to verify the batch yet if sequence == nil || errors.Is(err, entities.ErrNotFound) { tmpLogger.Infof("Sequencing event for batch %d has not been synced yet, "+ - "so it is not possible to verify it yet. Waiting...", batchNumberToVerify) + "so it is not possible to verify it yet. Waiting ...", batchNumberToVerify) return nil, nil, nil, state.ErrNotFound } @@ -1138,7 +1138,7 @@ func (a *Aggregator) getAndLockBatchToProve( return nil, nil, nil, err } else if errors.Is(err, entities.ErrNotFound) { a.logger.Infof("Virtual batch %d has not been synced yet, "+ - "so it is not possible to verify it yet. Waiting...", batchNumberToVerify) + "so it is not possible to verify it yet. Waiting ...", batchNumberToVerify) return nil, nil, nil, state.ErrNotFound } @@ -1163,21 +1163,43 @@ func (a *Aggregator) getAndLockBatchToProve( virtualBatch.L1InfoRoot = &l1InfoRoot } + // Ensure the old acc input hash is in memory + oldAccInputHash := a.getAccInputHash(batchNumberToVerify - 1) + if oldAccInputHash == (common.Hash{}) && batchNumberToVerify > 1 { + tmpLogger.Warnf("AccInputHash for previous batch (%d) is not in memory. Waiting ...", batchNumberToVerify-1) + return nil, nil, nil, state.ErrNotFound + } + + forcedBlockHashL1 := rpcBatch.ForcedBlockHashL1() + l1InfoRoot = *virtualBatch.L1InfoRoot + + if batchNumberToVerify == 1 { + l1Block, err := a.l1Syncr.GetL1BlockByNumber(ctx, virtualBatch.BlockNumber) + if err != nil { + a.logger.Errorf("Error getting l1 block: %v", err) + return nil, nil, nil, err + } + + forcedBlockHashL1 = l1Block.ParentHash + l1InfoRoot = rpcBatch.GlobalExitRoot() + } + // Calculate acc input hash as the RPC is not returning the correct one at the moment accInputHash := cdkcommon.CalculateAccInputHash( a.logger, - a.getAccInputHash(batchNumberToVerify-1), + oldAccInputHash, virtualBatch.BatchL2Data, - *virtualBatch.L1InfoRoot, + l1InfoRoot, uint64(sequence.Timestamp.Unix()), rpcBatch.LastCoinbase(), - rpcBatch.ForcedBlockHashL1(), + forcedBlockHashL1, ) // Store the acc input hash a.setAccInputHash(batchNumberToVerify, accInputHash) // Log params to calculate acc input hash a.logger.Debugf("Calculated acc input hash for batch %d: %v", batchNumberToVerify, accInputHash) + a.logger.Debugf("OldAccInputHash: %v", oldAccInputHash) a.logger.Debugf("L1InfoRoot: %v", virtualBatch.L1InfoRoot) // a.logger.Debugf("LastL2BLockTimestamp: %v", rpcBatch.LastL2BLockTimestamp()) a.logger.Debugf("TimestampLimit: %v", uint64(sequence.Timestamp.Unix())) @@ -1196,7 +1218,7 @@ func (a *Aggregator) getAndLockBatchToProve( AccInputHash: accInputHash, L1InfoTreeIndex: rpcBatch.L1InfoTreeIndex(), L1InfoRoot: *virtualBatch.L1InfoRoot, - Timestamp: time.Unix(int64(rpcBatch.LastL2BLockTimestamp()), 0), + Timestamp: sequence.Timestamp, GlobalExitRoot: rpcBatch.GlobalExitRoot(), ChainID: a.cfg.ChainID, ForkID: a.cfg.ForkId, @@ -1325,7 +1347,7 @@ func (a *Aggregator) tryGenerateBatchProof(ctx context.Context, prover ProverInt tmpLogger = tmpLogger.WithFields("proofId", *proof.ProofID) - resGetProof, stateRoot, err := prover.WaitRecursiveProof(ctx, *proof.ProofID) + resGetProof, stateRoot, accInputHash, err := prover.WaitRecursiveProof(ctx, *proof.ProofID) if err != nil { err = fmt.Errorf("failed to get proof from prover, %w", err) tmpLogger.Error(FirstToUpper(err.Error())) @@ -1337,7 +1359,8 @@ func (a *Aggregator) tryGenerateBatchProof(ctx context.Context, prover ProverInt // Sanity Check: state root from the proof must match the one from the batch if a.cfg.BatchProofSanityCheckEnabled && (stateRoot != common.Hash{}) && (stateRoot != batchToProve.StateRoot) { for { - tmpLogger.Errorf("State root from the proof does not match the expected for batch %d: Proof = [%s] Expected = [%s]", + tmpLogger.Errorf("HALTING: "+ + "State root from the proof does not match the expected for batch %d: Proof = [%s] Expected = [%s]", batchToProve.BatchNumber, stateRoot.String(), batchToProve.StateRoot.String(), ) time.Sleep(a.cfg.RetryTime.Duration) @@ -1346,6 +1369,20 @@ func (a *Aggregator) tryGenerateBatchProof(ctx context.Context, prover ProverInt tmpLogger.Infof("State root sanity check for batch %d passed", batchToProve.BatchNumber) } + // Sanity Check: acc input hash from the proof must match the one from the batch + if a.cfg.BatchProofSanityCheckEnabled && (accInputHash != common.Hash{}) && + (accInputHash != batchToProve.AccInputHash) { + for { + tmpLogger.Errorf("HALTING: Acc input hash from the proof does not match the expected for "+ + "batch %d: Proof = [%s] Expected = [%s]", + batchToProve.BatchNumber, accInputHash.String(), batchToProve.AccInputHash.String(), + ) + time.Sleep(a.cfg.RetryTime.Duration) + } + } else { + tmpLogger.Infof("Acc input hash sanity check for batch %d passed", batchToProve.BatchNumber) + } + proof.Proof = resGetProof // NOTE(pg): the defer func is useless from now on, use a different variable @@ -1505,10 +1542,17 @@ func (a *Aggregator) buildInputProver( } } + // Ensure the old acc input hash is in memory + oldAccInputHash := a.getAccInputHash(batchToVerify.BatchNumber - 1) + if oldAccInputHash == (common.Hash{}) && batchToVerify.BatchNumber > 1 { + a.logger.Warnf("AccInputHash for previous batch (%d) is not in memory. Waiting ...", batchToVerify.BatchNumber-1) + return nil, fmt.Errorf("acc input hash for previous batch (%d) is not in memory", batchToVerify.BatchNumber-1) + } + inputProver := &prover.StatelessInputProver{ PublicInputs: &prover.StatelessPublicInputs{ Witness: witness, - OldAccInputHash: a.getAccInputHash(batchToVerify.BatchNumber - 1).Bytes(), + OldAccInputHash: oldAccInputHash.Bytes(), OldBatchNum: batchToVerify.BatchNumber - 1, ChainId: batchToVerify.ChainID, ForkId: batchToVerify.ForkID, diff --git a/aggregator/aggregator_test.go b/aggregator/aggregator_test.go index 506ce16c..ff788190 100644 --- a/aggregator/aggregator_test.go +++ b/aggregator/aggregator_test.go @@ -6,7 +6,6 @@ import ( "crypto/ecdsa" "crypto/elliptic" "crypto/rand" - "encoding/hex" "encoding/json" "errors" "fmt" @@ -1114,7 +1113,7 @@ func Test_tryAggregateProofs(t *testing.T) { Return(nil). Once() m.proverMock.On("AggregatedProof", proof1.Proof, proof2.Proof).Return(&proofID, nil).Once() - m.proverMock.On("WaitRecursiveProof", mock.MatchedBy(matchProverCtxFn), proofID).Return("", common.Hash{}, errTest).Once() + m.proverMock.On("WaitRecursiveProof", mock.MatchedBy(matchProverCtxFn), proofID).Return("", common.Hash{}, common.Hash{}, errTest).Once() m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchAggregatorCtxFn)).Return(dbTx, nil).Once().NotBefore(lockProofsTxBegin) m.stateMock. On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proof1, dbTx). @@ -1172,7 +1171,7 @@ func Test_tryAggregateProofs(t *testing.T) { Return(nil). Once() m.proverMock.On("AggregatedProof", proof1.Proof, proof2.Proof).Return(&proofID, nil).Once() - m.proverMock.On("WaitRecursiveProof", mock.MatchedBy(matchProverCtxFn), proofID).Return("", common.Hash{}, errTest).Once() + m.proverMock.On("WaitRecursiveProof", mock.MatchedBy(matchProverCtxFn), proofID).Return("", common.Hash{}, common.Hash{}, errTest).Once() m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchAggregatorCtxFn)).Return(dbTx, nil).Once().NotBefore(lockProofsTxBegin) m.stateMock. On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proof1, dbTx). @@ -1220,7 +1219,7 @@ func Test_tryAggregateProofs(t *testing.T) { Return(nil). Once() m.proverMock.On("AggregatedProof", proof1.Proof, proof2.Proof).Return(&proofID, nil).Once() - m.proverMock.On("WaitRecursiveProof", mock.MatchedBy(matchProverCtxFn), proofID).Return(recursiveProof, common.Hash{}, nil).Once() + m.proverMock.On("WaitRecursiveProof", mock.MatchedBy(matchProverCtxFn), proofID).Return(recursiveProof, common.Hash{}, common.Hash{}, nil).Once() m.stateMock.On("DeleteGeneratedProofs", mock.MatchedBy(matchProverCtxFn), proof1.BatchNumber, proof2.BatchNumberFinal, dbTx).Return(errTest).Once() dbTx.On("Rollback", mock.MatchedBy(matchProverCtxFn)).Return(nil).Once() m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchAggregatorCtxFn)).Return(dbTx, nil).Once().NotBefore(lockProofsTxBegin) @@ -1280,7 +1279,7 @@ func Test_tryAggregateProofs(t *testing.T) { Return(nil). Once() m.proverMock.On("AggregatedProof", proof1.Proof, proof2.Proof).Return(&proofID, nil).Once() - m.proverMock.On("WaitRecursiveProof", mock.MatchedBy(matchProverCtxFn), proofID).Return(recursiveProof, common.Hash{}, nil).Once() + m.proverMock.On("WaitRecursiveProof", mock.MatchedBy(matchProverCtxFn), proofID).Return(recursiveProof, common.Hash{}, common.Hash{}, nil).Once() m.stateMock.On("DeleteGeneratedProofs", mock.MatchedBy(matchProverCtxFn), proof1.BatchNumber, proof2.BatchNumberFinal, dbTx).Return(nil).Once() m.stateMock.On("AddGeneratedProof", mock.MatchedBy(matchProverCtxFn), mock.Anything, dbTx).Return(errTest).Once() dbTx.On("Rollback", mock.MatchedBy(matchProverCtxFn)).Return(nil).Once() @@ -1343,7 +1342,7 @@ func Test_tryAggregateProofs(t *testing.T) { Once() m.proverMock.On("AggregatedProof", proof1.Proof, proof2.Proof).Return(&proofID, nil).Once() - m.proverMock.On("WaitRecursiveProof", mock.MatchedBy(matchProverCtxFn), proofID).Return(recursiveProof, common.Hash{}, nil).Once() + m.proverMock.On("WaitRecursiveProof", mock.MatchedBy(matchProverCtxFn), proofID).Return(recursiveProof, common.Hash{}, common.Hash{}, nil).Once() m.stateMock.On("DeleteGeneratedProofs", mock.MatchedBy(matchProverCtxFn), proof1.BatchNumber, proof2.BatchNumberFinal, dbTx).Return(nil).Once() expectedInputProver := map[string]interface{}{ "recursive_proof_1": proof1.Proof, @@ -1432,534 +1431,6 @@ func Test_tryAggregateProofs(t *testing.T) { } } -func Test_tryGenerateBatchProof(t *testing.T) { - require := require.New(t) - assert := assert.New(t) - from := common.BytesToAddress([]byte("from")) - cfg := Config{ - VerifyProofInterval: types.Duration{Duration: time.Duration(10000000)}, - TxProfitabilityCheckerType: ProfitabilityAcceptAll, - SenderAddress: from.Hex(), - IntervalAfterWhichBatchConsolidateAnyway: types.Duration{Duration: time.Second * 1}, - ChainID: uint64(1), - ForkId: uint64(12), - } - lastVerifiedBatchNum := uint64(22) - - batchNum := uint64(23) - - batchToProve := state.Batch{ - BatchNumber: batchNum, - } - - proofID := "proofId" - - proverName := "proverName" - proverID := "proverID" - recursiveProof := "recursiveProof" - errTest := errors.New("test error") - proverCtx := context.WithValue(context.Background(), "owner", ownerProver) //nolint:staticcheck - matchProverCtxFn := func(ctx context.Context) bool { return ctx.Value("owner") == ownerProver } - matchAggregatorCtxFn := func(ctx context.Context) bool { return ctx.Value("owner") == ownerAggregator } - fixedTimestamp := time.Date(2023, 10, 13, 15, 0, 0, 0, time.UTC) - - l1InfoTreeLeaf := []synchronizer.L1InfoTreeLeaf{ - { - GlobalExitRoot: common.Hash{}, - PreviousBlockHash: common.Hash{}, - Timestamp: fixedTimestamp, - }, - { - GlobalExitRoot: common.Hash{}, - PreviousBlockHash: common.Hash{}, - Timestamp: fixedTimestamp, - }, - { - GlobalExitRoot: common.Hash{}, - PreviousBlockHash: common.Hash{}, - Timestamp: fixedTimestamp, - }, - { - GlobalExitRoot: common.Hash{}, - PreviousBlockHash: common.Hash{}, - Timestamp: fixedTimestamp, - }, - } - - testCases := []struct { - name string - setup func(mox, *Aggregator) - asserts func(bool, *Aggregator, error) - }{ - { - name: "getAndLockBatchToProve returns generic error", - setup: func(m mox, a *Aggregator) { - m.proverMock.On("Name").Return(proverName).Twice() - m.proverMock.On("ID").Return(proverID).Twice() - m.proverMock.On("Addr").Return("addr") - m.etherman.On("GetLatestVerifiedBatchNum").Return(uint64(0), errTest).Once() - }, - asserts: func(result bool, a *Aggregator, err error) { - assert.False(result) - assert.ErrorIs(err, errTest) - }, - }, - { - name: "getAndLockBatchToProve returns ErrNotFound", - setup: func(m mox, a *Aggregator) { - m.proverMock.On("Name").Return(proverName).Twice() - m.proverMock.On("ID").Return(proverID).Twice() - m.proverMock.On("Addr").Return("addr") - m.etherman.On("GetLatestVerifiedBatchNum").Return(uint64(0), state.ErrNotFound).Once() - }, - asserts: func(result bool, a *Aggregator, err error) { - assert.False(result) - assert.NoError(err) - }, - }, - { - name: "BatchProof prover error", - setup: func(m mox, a *Aggregator) { - m.proverMock.On("Name").Return(proverName).Twice() - m.proverMock.On("ID").Return(proverID).Twice() - m.proverMock.On("Addr").Return("addr").Twice() - - batchL2Data, err := hex.DecodeString(codedL2Block1) - require.NoError(err) - l1InfoRoot := common.HexToHash("0x057e9950fbd39b002e323f37c2330d0c096e66919e24cc96fb4b2dfa8f4af782") - - virtualBatch := synchronizer.VirtualBatch{ - BatchNumber: lastVerifiedBatchNum + 1, - BatchL2Data: batchL2Data, - L1InfoRoot: &l1InfoRoot, - } - - m.synchronizerMock.On("GetVirtualBatchByBatchNumber", mock.Anything, lastVerifiedBatchNum).Return(&virtualBatch, nil).Once() - m.etherman.On("GetLatestVerifiedBatchNum").Return(lastVerifiedBatchNum, nil).Once() - m.stateMock.On("CheckProofExistsForBatch", mock.MatchedBy(matchProverCtxFn), lastVerifiedBatchNum+1, nil).Return(true, nil).Once() - m.stateMock.On("CleanupGeneratedProofs", mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() - sequence := synchronizer.SequencedBatches{ - FromBatchNumber: uint64(10), - ToBatchNumber: uint64(20), - } - m.synchronizerMock.On("GetSequenceByBatchNumber", mock.MatchedBy(matchProverCtxFn), lastVerifiedBatchNum).Return(&sequence, nil).Once() - - rpcBatch := rpctypes.NewRPCBatch(lastVerifiedBatchNum+1, common.Hash{}, []string{}, batchL2Data, common.Hash{}, common.BytesToHash([]byte("mock LocalExitRoot")), common.BytesToHash([]byte("mock StateRoot")), common.Address{}, false) - rpcBatch.SetLastL2BLockTimestamp(uint64(time.Now().Unix())) - m.rpcMock.On("GetWitness", lastVerifiedBatchNum, false).Return([]byte("witness"), nil) - m.rpcMock.On("GetBatch", lastVerifiedBatchNum).Return(rpcBatch, nil) - m.stateMock.On("AddSequence", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Return(nil).Once() - m.stateMock.On("AddGeneratedProof", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Run( - func(args mock.Arguments) { - proof, ok := args[1].(*state.Proof) - if !ok { - t.Fatalf("expected args[1] to be of type *state.Proof, got %T", args[1]) - } - assert.Equal(batchToProve.BatchNumber, proof.BatchNumber) - assert.Equal(batchToProve.BatchNumber, proof.BatchNumberFinal) - assert.Equal(&proverName, proof.Prover) - assert.Equal(&proverID, proof.ProverID) - assert.InDelta(time.Now().Unix(), proof.GeneratingSince.Unix(), float64(time.Second)) - }, - ).Return(nil).Once() - m.synchronizerMock.On("GetLeafsByL1InfoRoot", mock.Anything, l1InfoRoot).Return(l1InfoTreeLeaf, nil) - m.synchronizerMock.On("GetL1InfoTreeLeaves", mock.Anything, mock.Anything).Return(map[uint32]synchronizer.L1InfoTreeLeaf{ - 1: { - BlockNumber: uint64(35), - }, - }, nil) - - m.proverMock.On("BatchProof", mock.Anything).Return(nil, errTest).Once() - m.stateMock.On("DeleteGeneratedProofs", mock.MatchedBy(matchAggregatorCtxFn), batchToProve.BatchNumber, batchToProve.BatchNumber, nil).Return(nil).Once() - }, - asserts: func(result bool, a *Aggregator, err error) { - assert.False(result) - assert.ErrorIs(err, errTest) - }, - }, - { - name: "WaitRecursiveProof prover error", - setup: func(m mox, a *Aggregator) { - m.proverMock.On("Name").Return(proverName).Twice() - m.proverMock.On("ID").Return(proverID).Twice() - m.proverMock.On("Addr").Return("addr").Twice() - - batchL2Data, err := hex.DecodeString(codedL2Block1) - require.NoError(err) - l1InfoRoot := common.HexToHash("0x057e9950fbd39b002e323f37c2330d0c096e66919e24cc96fb4b2dfa8f4af782") - batch := state.Batch{ - BatchNumber: lastVerifiedBatchNum + 1, - BatchL2Data: batchL2Data, - L1InfoRoot: l1InfoRoot, - Timestamp: time.Now(), - Coinbase: common.Address{}, - ChainID: uint64(1), - ForkID: uint64(12), - } - - virtualBatch := synchronizer.VirtualBatch{ - BatchNumber: lastVerifiedBatchNum + 1, - BatchL2Data: batchL2Data, - L1InfoRoot: &l1InfoRoot, - } - - m.synchronizerMock.On("GetVirtualBatchByBatchNumber", mock.Anything, lastVerifiedBatchNum+1).Return(&virtualBatch, nil).Once() - - m.etherman.On("GetLatestVerifiedBatchNum").Return(lastVerifiedBatchNum, nil).Once() - m.stateMock.On("CheckProofExistsForBatch", mock.MatchedBy(matchProverCtxFn), mock.AnythingOfType("uint64"), nil).Return(false, nil).Once() - sequence := synchronizer.SequencedBatches{ - FromBatchNumber: uint64(10), - ToBatchNumber: uint64(20), - } - m.synchronizerMock.On("GetSequenceByBatchNumber", mock.MatchedBy(matchProverCtxFn), lastVerifiedBatchNum+1).Return(&sequence, nil).Once() - rpcBatch := rpctypes.NewRPCBatch(lastVerifiedBatchNum+1, common.Hash{}, []string{}, batchL2Data, common.Hash{}, common.BytesToHash([]byte("mock LocalExitRoot")), common.BytesToHash([]byte("mock StateRoot")), common.Address{}, false) - rpcBatch.SetLastL2BLockTimestamp(uint64(time.Now().Unix())) - m.rpcMock.On("GetWitness", lastVerifiedBatchNum+1, false).Return([]byte("witness"), nil) - m.stateMock.On("AddSequence", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Return(nil).Once() - m.stateMock.On("AddGeneratedProof", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Run( - func(args mock.Arguments) { - proof, ok := args[1].(*state.Proof) - if !ok { - t.Fatalf("expected args[1] to be of type *state.Proof, got %T", args[1]) - } - assert.Equal(batchToProve.BatchNumber, proof.BatchNumber) - assert.Equal(batchToProve.BatchNumber, proof.BatchNumberFinal) - assert.Equal(&proverName, proof.Prover) - assert.Equal(&proverID, proof.ProverID) - assert.InDelta(time.Now().Unix(), proof.GeneratingSince.Unix(), float64(time.Second)) - }, - ).Return(nil).Once() - - m.synchronizerMock.On("GetLeafsByL1InfoRoot", mock.Anything, l1InfoRoot).Return(l1InfoTreeLeaf, nil).Twice() - m.synchronizerMock.On("GetL1InfoTreeLeaves", mock.Anything, mock.Anything).Return(map[uint32]synchronizer.L1InfoTreeLeaf{ - 1: { - BlockNumber: uint64(35), - }, - }, nil).Twice() - - m.rpcMock.On("GetBatch", lastVerifiedBatchNum+1).Return(rpcBatch, nil) - expectedInputProver, err := a.buildInputProver(context.Background(), &batch, []byte("witness")) - require.NoError(err) - - m.proverMock.On("BatchProof", expectedInputProver).Return(&proofID, nil).Once() - m.proverMock.On("WaitRecursiveProof", mock.MatchedBy(matchProverCtxFn), proofID).Return("", common.Hash{}, errTest).Once() - m.stateMock.On("DeleteGeneratedProofs", mock.MatchedBy(matchAggregatorCtxFn), batchToProve.BatchNumber, batchToProve.BatchNumber, nil).Return(nil).Once() - }, - asserts: func(result bool, a *Aggregator, err error) { - assert.False(result) - assert.ErrorIs(err, errTest) - }, - }, - { - name: "DeleteBatchProofs error after WaitRecursiveProof prover error", - setup: func(m mox, a *Aggregator) { - m.proverMock.On("Name").Return(proverName).Twice() - m.proverMock.On("ID").Return(proverID).Twice() - m.proverMock.On("Addr").Return("addr").Twice() - - batchL2Data, err := hex.DecodeString(codedL2Block1) - require.NoError(err) - l1InfoRoot := common.HexToHash("0x057e9950fbd39b002e323f37c2330d0c096e66919e24cc96fb4b2dfa8f4af782") - batch := state.Batch{ - BatchNumber: lastVerifiedBatchNum + 1, - BatchL2Data: batchL2Data, - L1InfoRoot: l1InfoRoot, - Timestamp: time.Now(), - Coinbase: common.Address{}, - ChainID: uint64(1), - ForkID: uint64(12), - } - - m.etherman.On("GetLatestVerifiedBatchNum").Return(lastVerifiedBatchNum, nil).Once() - m.stateMock.On("CheckProofExistsForBatch", mock.MatchedBy(matchProverCtxFn), mock.AnythingOfType("uint64"), nil).Return(false, nil).Once() - sequence := synchronizer.SequencedBatches{ - FromBatchNumber: uint64(10), - ToBatchNumber: uint64(20), - } - m.synchronizerMock.On("GetSequenceByBatchNumber", mock.MatchedBy(matchProverCtxFn), lastVerifiedBatchNum+1).Return(&sequence, nil).Once() - rpcBatch := rpctypes.NewRPCBatch(lastVerifiedBatchNum+1, common.Hash{}, []string{}, batchL2Data, common.Hash{}, common.BytesToHash([]byte("mock LocalExitRoot")), common.BytesToHash([]byte("mock StateRoot")), common.Address{}, false) - rpcBatch.SetLastL2BLockTimestamp(uint64(time.Now().Unix())) - m.rpcMock.On("GetBatch", lastVerifiedBatchNum+1).Return(rpcBatch, nil) - m.stateMock.On("AddSequence", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Return(nil).Once() - m.stateMock.On("AddGeneratedProof", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Run( - func(args mock.Arguments) { - proof, ok := args[1].(*state.Proof) - if !ok { - t.Fatalf("expected args[1] to be of type *state.Proof, got %T", args[1]) - } - assert.Equal(batchToProve.BatchNumber, proof.BatchNumber) - assert.Equal(batchToProve.BatchNumber, proof.BatchNumberFinal) - assert.Equal(&proverName, proof.Prover) - assert.Equal(&proverID, proof.ProverID) - assert.InDelta(time.Now().Unix(), proof.GeneratingSince.Unix(), float64(time.Second)) - }, - ).Return(nil).Once() - - m.synchronizerMock.On("GetLeafsByL1InfoRoot", mock.Anything, l1InfoRoot).Return(l1InfoTreeLeaf, nil).Twice() - m.synchronizerMock.On("GetL1InfoTreeLeaves", mock.Anything, mock.Anything).Return(map[uint32]synchronizer.L1InfoTreeLeaf{ - 1: { - BlockNumber: uint64(35), - }, - }, nil).Twice() - - expectedInputProver, err := a.buildInputProver(context.Background(), &batch, []byte("witness")) - require.NoError(err) - - m.rpcMock.On("GetWitness", lastVerifiedBatchNum+1, false).Return([]byte("witness"), nil) - - virtualBatch := synchronizer.VirtualBatch{ - BatchNumber: lastVerifiedBatchNum + 1, - BatchL2Data: batchL2Data, - L1InfoRoot: &l1InfoRoot, - } - - m.synchronizerMock.On("GetVirtualBatchByBatchNumber", mock.Anything, lastVerifiedBatchNum+1).Return(&virtualBatch, nil).Once() - - m.proverMock.On("BatchProof", expectedInputProver).Return(&proofID, nil).Once() - m.proverMock.On("WaitRecursiveProof", mock.MatchedBy(matchProverCtxFn), proofID).Return("", common.Hash{}, errTest).Once() - m.stateMock.On("DeleteGeneratedProofs", mock.MatchedBy(matchAggregatorCtxFn), batchToProve.BatchNumber, batchToProve.BatchNumber, nil).Return(errTest).Once() - }, - asserts: func(result bool, a *Aggregator, err error) { - assert.False(result) - assert.ErrorIs(err, errTest) - }, - }, - { - name: "not time to send final ok", - setup: func(m mox, a *Aggregator) { - a.cfg.BatchProofSanityCheckEnabled = false - m.proverMock.On("Name").Return(proverName).Times(3) - m.proverMock.On("ID").Return(proverID).Times(3) - m.proverMock.On("Addr").Return("addr").Times(3) - - batchL2Data, err := hex.DecodeString(codedL2Block1) - require.NoError(err) - l1InfoRoot := common.HexToHash("0x057e9950fbd39b002e323f37c2330d0c096e66919e24cc96fb4b2dfa8f4af782") - batch := state.Batch{ - BatchNumber: lastVerifiedBatchNum + 1, - BatchL2Data: batchL2Data, - L1InfoRoot: l1InfoRoot, - Timestamp: time.Now(), - Coinbase: common.Address{}, - ChainID: uint64(1), - ForkID: uint64(12), - } - - m.etherman.On("GetLatestVerifiedBatchNum").Return(lastVerifiedBatchNum, nil).Once() - m.stateMock.On("CheckProofExistsForBatch", mock.MatchedBy(matchProverCtxFn), mock.AnythingOfType("uint64"), nil).Return(false, nil).Once() - sequence := synchronizer.SequencedBatches{ - FromBatchNumber: uint64(10), - ToBatchNumber: uint64(20), - } - m.synchronizerMock.On("GetSequenceByBatchNumber", mock.MatchedBy(matchProverCtxFn), lastVerifiedBatchNum+1).Return(&sequence, nil).Once() - - m.stateMock.On("AddSequence", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Return(nil).Once() - m.stateMock.On("AddGeneratedProof", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Run( - func(args mock.Arguments) { - proof, ok := args[1].(*state.Proof) - if !ok { - t.Fatalf("expected args[1] to be of type *state.Proof, got %T", args[1]) - } - assert.Equal(batchToProve.BatchNumber, proof.BatchNumber) - assert.Equal(batchToProve.BatchNumber, proof.BatchNumberFinal) - assert.Equal(&proverName, proof.Prover) - assert.Equal(&proverID, proof.ProverID) - assert.InDelta(time.Now().Unix(), proof.GeneratingSince.Unix(), float64(time.Second)) - }, - ).Return(nil).Once() - - m.synchronizerMock.On("GetLeafsByL1InfoRoot", mock.Anything, l1InfoRoot).Return(l1InfoTreeLeaf, nil).Twice() - m.synchronizerMock.On("GetL1InfoTreeLeaves", mock.Anything, mock.Anything).Return(map[uint32]synchronizer.L1InfoTreeLeaf{ - 1: { - BlockNumber: uint64(35), - }, - }, nil).Twice() - - rpcBatch := rpctypes.NewRPCBatch(lastVerifiedBatchNum+1, common.Hash{}, []string{}, batchL2Data, common.Hash{}, common.BytesToHash([]byte("mock LocalExitRoot")), common.BytesToHash([]byte("mock StateRoot")), common.Address{}, false) - rpcBatch.SetLastL2BLockTimestamp(uint64(time.Now().Unix())) - m.rpcMock.On("GetBatch", lastVerifiedBatchNum+1).Return(rpcBatch, nil) - m.rpcMock.On("GetWitness", lastVerifiedBatchNum+1, false).Return([]byte("witness"), nil) - - virtualBatch := synchronizer.VirtualBatch{ - BatchNumber: lastVerifiedBatchNum + 1, - BatchL2Data: batchL2Data, - L1InfoRoot: &l1InfoRoot, - } - - m.synchronizerMock.On("GetVirtualBatchByBatchNumber", mock.Anything, lastVerifiedBatchNum+1).Return(&virtualBatch, nil).Once() - - expectedInputProver, err := a.buildInputProver(context.Background(), &batch, []byte("witness")) - require.NoError(err) - - m.proverMock.On("BatchProof", expectedInputProver).Return(&proofID, nil).Once() - m.proverMock.On("WaitRecursiveProof", mock.MatchedBy(matchProverCtxFn), proofID).Return(recursiveProof, common.Hash{}, nil).Once() - m.stateMock.On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), mock.Anything, nil).Run( - func(args mock.Arguments) { - proof, ok := args[1].(*state.Proof) - if !ok { - t.Fatalf("expected args[1] to be of type *state.Proof, got %T", args[1]) - } - assert.Equal(batchToProve.BatchNumber, proof.BatchNumber) - assert.Equal(batchToProve.BatchNumber, proof.BatchNumberFinal) - assert.Equal(&proverName, proof.Prover) - assert.Equal(&proverID, proof.ProverID) - assert.Equal("", proof.InputProver) - assert.Equal(recursiveProof, proof.Proof) - assert.Nil(proof.GeneratingSince) - }, - ).Return(nil).Once() - }, - asserts: func(result bool, a *Aggregator, err error) { - assert.True(result) - assert.NoError(err) - }, - }, - { - name: "time to send final, state error ok", - setup: func(m mox, a *Aggregator) { - a.cfg.VerifyProofInterval = types.NewDuration(0) - a.cfg.BatchProofSanityCheckEnabled = false - m.proverMock.On("Name").Return(proverName).Times(3) - m.proverMock.On("ID").Return(proverID).Times(3) - m.proverMock.On("Addr").Return("addr").Times(3) - - batchL2Data, err := hex.DecodeString(codedL2Block1) - require.NoError(err) - l1InfoRoot := common.HexToHash("0x057e9950fbd39b002e323f37c2330d0c096e66919e24cc96fb4b2dfa8f4af782") - batch := state.Batch{ - BatchNumber: lastVerifiedBatchNum + 1, - BatchL2Data: batchL2Data, - L1InfoRoot: l1InfoRoot, - Timestamp: time.Now(), - Coinbase: common.Address{}, - ChainID: uint64(1), - ForkID: uint64(12), - } - - m.etherman.On("GetLatestVerifiedBatchNum").Return(lastVerifiedBatchNum, nil).Once() - m.stateMock.On("CheckProofExistsForBatch", mock.MatchedBy(matchProverCtxFn), mock.AnythingOfType("uint64"), nil).Return(false, nil).Once() - sequence := synchronizer.SequencedBatches{ - FromBatchNumber: uint64(10), - ToBatchNumber: uint64(20), - } - m.synchronizerMock.On("GetSequenceByBatchNumber", mock.MatchedBy(matchProverCtxFn), lastVerifiedBatchNum+1).Return(&sequence, nil).Once() - - m.rpcMock.On("GetWitness", lastVerifiedBatchNum+1, false).Return([]byte("witness"), nil) - - virtualBatch := synchronizer.VirtualBatch{ - BatchNumber: lastVerifiedBatchNum + 1, - BatchL2Data: batchL2Data, - L1InfoRoot: &l1InfoRoot, - } - - m.synchronizerMock.On("GetVirtualBatchByBatchNumber", mock.Anything, lastVerifiedBatchNum+1).Return(&virtualBatch, nil).Once() - - m.rpcMock.On("GetWitness", lastVerifiedBatchNum+1, false).Return([]byte("witness"), nil) - rpcBatch := rpctypes.NewRPCBatch(lastVerifiedBatchNum+1, common.Hash{}, []string{}, batchL2Data, common.Hash{}, common.BytesToHash([]byte("mock LocalExitRoot")), common.BytesToHash([]byte("mock StateRoot")), common.Address{}, false) - rpcBatch.SetLastL2BLockTimestamp(uint64(time.Now().Unix())) - m.rpcMock.On("GetBatch", lastVerifiedBatchNum+1).Return(rpcBatch, nil) - - m.stateMock.On("AddSequence", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Return(nil).Once() - m.stateMock.On("AddGeneratedProof", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Run( - func(args mock.Arguments) { - proof, ok := args[1].(*state.Proof) - if !ok { - t.Fatalf("expected args[1] to be of type *state.Proof, got %T", args[1]) - } - assert.Equal(batchToProve.BatchNumber, proof.BatchNumber) - assert.Equal(batchToProve.BatchNumber, proof.BatchNumberFinal) - assert.Equal(&proverName, proof.Prover) - assert.Equal(&proverID, proof.ProverID) - assert.InDelta(time.Now().Unix(), proof.GeneratingSince.Unix(), float64(time.Second)) - }, - ).Return(nil).Once() - - m.synchronizerMock.On("GetLeafsByL1InfoRoot", mock.Anything, l1InfoRoot).Return(l1InfoTreeLeaf, nil).Twice() - m.synchronizerMock.On("GetL1InfoTreeLeaves", mock.Anything, mock.Anything).Return(map[uint32]synchronizer.L1InfoTreeLeaf{ - 1: { - BlockNumber: uint64(35), - }, - }, nil).Twice() - - expectedInputProver, err := a.buildInputProver(context.Background(), &batch, []byte("witness")) - require.NoError(err) - - m.proverMock.On("BatchProof", expectedInputProver).Return(&proofID, nil).Once() - m.proverMock.On("WaitRecursiveProof", mock.MatchedBy(matchProverCtxFn), proofID).Return(recursiveProof, common.Hash{}, nil).Once() - m.etherman.On("GetLatestVerifiedBatchNum").Return(uint64(42), errTest).Once() - m.stateMock.On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), mock.Anything, nil).Run( - func(args mock.Arguments) { - proof, ok := args[1].(*state.Proof) - if !ok { - t.Fatalf("expected args[1] to be of type *state.Proof, got %T", args[1]) - } - assert.Equal(batchToProve.BatchNumber, proof.BatchNumber) - assert.Equal(batchToProve.BatchNumber, proof.BatchNumberFinal) - assert.Equal(&proverName, proof.Prover) - assert.Equal(&proverID, proof.ProverID) - assert.Equal("", proof.InputProver) - assert.Equal(recursiveProof, proof.Proof) - assert.Nil(proof.GeneratingSince) - }, - ).Return(nil).Once() - }, - asserts: func(result bool, a *Aggregator, err error) { - assert.True(result) - assert.NoError(err) - }, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - stateMock := mocks.NewStateInterfaceMock(t) - ethTxManager := mocks.NewEthTxManagerClientMock(t) - etherman := mocks.NewEthermanMock(t) - proverMock := mocks.NewProverInterfaceMock(t) - synchronizerMock := mocks.NewSynchronizerInterfaceMock(t) - mockRPC := mocks.NewRPCInterfaceMock(t) - - a := Aggregator{ - cfg: cfg, - state: stateMock, - etherman: etherman, - ethTxManager: ethTxManager, - logger: log.GetDefaultLogger(), - stateDBMutex: &sync.Mutex{}, - timeSendFinalProofMutex: &sync.RWMutex{}, - timeCleanupLockedProofs: cfg.CleanupLockedProofsInterval, - finalProof: make(chan finalProofMsg), - profitabilityChecker: NewTxProfitabilityCheckerAcceptAll(stateMock, cfg.IntervalAfterWhichBatchConsolidateAnyway.Duration), - l1Syncr: synchronizerMock, - rpcClient: mockRPC, - accInputHashes: make(map[uint64]common.Hash), - accInputHashesMutex: &sync.Mutex{}, - } - aggregatorCtx := context.WithValue(context.Background(), "owner", ownerAggregator) //nolint:staticcheck - a.ctx, a.exit = context.WithCancel(aggregatorCtx) - - m := mox{ - stateMock: stateMock, - ethTxManager: ethTxManager, - etherman: etherman, - proverMock: proverMock, - synchronizerMock: synchronizerMock, - rpcMock: mockRPC, - } - if tc.setup != nil { - tc.setup(m, &a) - } - a.resetVerifyProofTime() - - result, err := a.tryGenerateBatchProof(proverCtx, proverMock) - - if tc.asserts != nil { - tc.asserts(result, &a, err) - } - }) - } -} - func Test_accInputHashFunctions(t *testing.T) { aggregator := Aggregator{ accInputHashes: make(map[uint64]common.Hash), diff --git a/aggregator/interfaces.go b/aggregator/interfaces.go index 81f63d94..f1673c46 100644 --- a/aggregator/interfaces.go +++ b/aggregator/interfaces.go @@ -30,7 +30,7 @@ type ProverInterface interface { BatchProof(input *prover.StatelessInputProver) (*string, error) AggregatedProof(inputProof1, inputProof2 string) (*string, error) FinalProof(inputProof string, aggregatorAddr string) (*string, error) - WaitRecursiveProof(ctx context.Context, proofID string) (string, common.Hash, error) + WaitRecursiveProof(ctx context.Context, proofID string) (string, common.Hash, common.Hash, error) WaitFinalProof(ctx context.Context, proofID string) (*prover.FinalProof, error) } diff --git a/aggregator/mocks/mock_prover.go b/aggregator/mocks/mock_prover.go index 72bd66dc..b6ce1011 100644 --- a/aggregator/mocks/mock_prover.go +++ b/aggregator/mocks/mock_prover.go @@ -220,7 +220,7 @@ func (_m *ProverInterfaceMock) WaitFinalProof(ctx context.Context, proofID strin } // WaitRecursiveProof provides a mock function with given fields: ctx, proofID -func (_m *ProverInterfaceMock) WaitRecursiveProof(ctx context.Context, proofID string) (string, common.Hash, error) { +func (_m *ProverInterfaceMock) WaitRecursiveProof(ctx context.Context, proofID string) (string, common.Hash, common.Hash, error) { ret := _m.Called(ctx, proofID) if len(ret) == 0 { @@ -229,8 +229,9 @@ func (_m *ProverInterfaceMock) WaitRecursiveProof(ctx context.Context, proofID s var r0 string var r1 common.Hash - var r2 error - if rf, ok := ret.Get(0).(func(context.Context, string) (string, common.Hash, error)); ok { + var r2 common.Hash + var r3 error + if rf, ok := ret.Get(0).(func(context.Context, string) (string, common.Hash, common.Hash, error)); ok { return rf(ctx, proofID) } if rf, ok := ret.Get(0).(func(context.Context, string) string); ok { @@ -247,13 +248,21 @@ func (_m *ProverInterfaceMock) WaitRecursiveProof(ctx context.Context, proofID s } } - if rf, ok := ret.Get(2).(func(context.Context, string) error); ok { + if rf, ok := ret.Get(2).(func(context.Context, string) common.Hash); ok { r2 = rf(ctx, proofID) } else { - r2 = ret.Error(2) + if ret.Get(2) != nil { + r2 = ret.Get(2).(common.Hash) + } + } + + if rf, ok := ret.Get(3).(func(context.Context, string) error); ok { + r3 = rf(ctx, proofID) + } else { + r3 = ret.Error(3) } - return r0, r1, r2 + return r0, r1, r2, r3 } // NewProverInterfaceMock creates a new instance of ProverInterfaceMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. diff --git a/aggregator/prover/prover.go b/aggregator/prover/prover.go index 8cb13b1d..a5f7e9eb 100644 --- a/aggregator/prover/prover.go +++ b/aggregator/prover/prover.go @@ -18,8 +18,10 @@ import ( ) const ( - stateRootStartIndex = 19 - stateRootFinalIndex = stateRootStartIndex + 8 + stateRootStartIndex = 19 + stateRootFinalIndex = stateRootStartIndex + 8 + accInputHashStartIndex = 27 + accInputHashFinalIndex = accInputHashStartIndex + 8 ) var ( @@ -282,30 +284,36 @@ func (p *Prover) CancelProofRequest(proofID string) error { // WaitRecursiveProof waits for a recursive proof to be generated by the prover // and returns it. -func (p *Prover) WaitRecursiveProof(ctx context.Context, proofID string) (string, common.Hash, error) { +func (p *Prover) WaitRecursiveProof(ctx context.Context, proofID string) (string, common.Hash, common.Hash, error) { res, err := p.waitProof(ctx, proofID) if err != nil { - return "", common.Hash{}, err + return "", common.Hash{}, common.Hash{}, err } resProof, ok := res.Proof.(*GetProofResponse_RecursiveProof) if !ok { - return "", common.Hash{}, fmt.Errorf( + return "", common.Hash{}, common.Hash{}, fmt.Errorf( "%w, wanted %T, got %T", ErrBadProverResponse, &GetProofResponse_RecursiveProof{}, res.Proof, ) } - sr, err := GetStateRootFromProof(p.logger, resProof.RecursiveProof) + sr, err := GetSanityCheckHashFromProof(p.logger, resProof.RecursiveProof, stateRootStartIndex, stateRootFinalIndex) if err != nil && sr != (common.Hash{}) { p.logger.Errorf("Error getting state root from proof: %v", err) } + accInputHash, err := GetSanityCheckHashFromProof(p.logger, resProof.RecursiveProof, + accInputHashStartIndex, accInputHashFinalIndex) + if err != nil && accInputHash != (common.Hash{}) { + p.logger.Errorf("Error getting acc input hash from proof: %v", err) + } + if sr == (common.Hash{}) { p.logger.Info("Recursive proof does not contain state root. Possibly mock prover is in use.") } - return resProof.RecursiveProof, sr, nil + return resProof.RecursiveProof, sr, accInputHash, nil } // WaitFinalProof waits for the final proof to be generated by the prover and @@ -395,11 +403,8 @@ func (p *Prover) call(req *AggregatorMessage) (*ProverMessage, error) { return res, nil } -// GetStateRootFromProof returns the state root from the proof. -func GetStateRootFromProof(logger *log.Logger, proof string) (common.Hash, error) { - // Log received proof - logger.Debugf("Received proof to get SR from: %s", proof) - +// GetSanityCheckHashFromProof returns info from the proof +func GetSanityCheckHashFromProof(logger *log.Logger, proof string, startIndex, endIndex int) (common.Hash, error) { type Publics struct { Publics []string `mapstructure:"publics"` } @@ -420,7 +425,7 @@ func GetStateRootFromProof(logger *log.Logger, proof string) (common.Hash, error v [8]uint64 j = 0 ) - for i := stateRootStartIndex; i < stateRootFinalIndex; i++ { + for i := startIndex; i < endIndex; i++ { u64, err := strconv.ParseInt(publics.Publics[i], 10, 64) if err != nil { logger.Fatal(err) diff --git a/aggregator/prover/prover_test.go b/aggregator/prover/prover_test.go index 737d5592..ee12c3ac 100644 --- a/aggregator/prover/prover_test.go +++ b/aggregator/prover/prover_test.go @@ -11,7 +11,9 @@ import ( ) const ( - dir = "../../test/vectors/proofs" + dir = "../../test/vectors/proofs" + stateRootStartIndex = 19 + stateRootFinalIndex = stateRootStartIndex + 8 ) type TestStateRoot struct { @@ -40,7 +42,7 @@ func TestCalculateStateRoots(t *testing.T) { require.NoError(t, err) // Get the state root from the batch proof - fileStateRoot, err := prover.GetStateRootFromProof(log.GetDefaultLogger(), string(data)) + fileStateRoot, err := prover.GetSanityCheckHashFromProof(log.GetDefaultLogger(), string(data), stateRootStartIndex, stateRootFinalIndex) require.NoError(t, err) // Get the expected state root diff --git a/go.mod b/go.mod index 430e8326..70ec5e69 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/0xPolygon/cdk-data-availability v0.0.10 github.com/0xPolygon/cdk-rpc v0.0.0-20241004114257-6c3cb6eebfb6 github.com/0xPolygon/zkevm-ethtx-manager v0.2.1 - github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.5 + github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.6 github.com/ethereum/go-ethereum v1.14.8 github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3 github.com/hermeznetwork/tracerr v0.3.2 @@ -40,7 +40,6 @@ require ( ) require ( - github.com/0xPolygonHermez/zkevm-data-streamer v0.2.7 // indirect github.com/DataDog/zstd v1.5.6 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/StackExchange/wmi v1.2.1 // indirect diff --git a/go.sum b/go.sum index 3ad80938..ccf812c4 100644 --- a/go.sum +++ b/go.sum @@ -6,9 +6,8 @@ github.com/0xPolygon/cdk-rpc v0.0.0-20241004114257-6c3cb6eebfb6 h1:FXL/rcO7/GtZ3 github.com/0xPolygon/cdk-rpc v0.0.0-20241004114257-6c3cb6eebfb6/go.mod h1:2scWqMMufrQXu7TikDgQ3BsyaKoX8qP26D6E262vSOg= github.com/0xPolygon/zkevm-ethtx-manager v0.2.1 h1:2Yb+KdJFMpVrS9LIkd658XiWuN+MCTs7SgeWaopXScg= github.com/0xPolygon/zkevm-ethtx-manager v0.2.1/go.mod h1:lqQmzSo2OXEZItD0R4Cd+lqKFxphXEWgqHefVcGDZZc= -github.com/0xPolygonHermez/zkevm-data-streamer v0.2.7/go.mod h1:7nM7Ihk+fTG1TQPwdZoGOYd3wprqqyIyjtS514uHzWE= -github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.5 h1:YmnhuCl349MoNASN0fMeGKU1o9HqJhiZkfMsA/1cTRA= -github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.5/go.mod h1:X4Su/M/+hSISqdl9yomKlRsbTyuZHsRohporyHsP8gg= +github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.6 h1:+XsCHXvQezRdMnkI37Wa/nV4sOZshJavxNzRpH/R6dw= +github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.6/go.mod h1:X4Su/M/+hSISqdl9yomKlRsbTyuZHsRohporyHsP8gg= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/DataDog/zstd v1.5.6 h1:LbEglqepa/ipmmQJUDnSsfvA8e8IStVcGaFWDuxvGOY= github.com/DataDog/zstd v1.5.6/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= @@ -481,8 +480,6 @@ golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= -golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ= -golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE= golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f h1:XdNn9LlyWAhLVp6P/i8QYBW+hlyhrhei9uErw2B5GJo= golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f/go.mod h1:D5SMRVC3C2/4+F/DB1wZsLRnSNimn2Sp/NPsCrsv8ak= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -490,9 +487,8 @@ golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKG golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= -golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -512,8 +508,6 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -575,9 +569,8 @@ golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= -golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= golang.org/x/tools v0.27.0 h1:qEKojBykQkQ4EynWy4S8Weg69NumxKdn40Fce3uc/8o= +golang.org/x/tools v0.27.0/go.mod h1:sUi0ZgbwW9ZPAq26Ekut+weQPR5eIM6GQLQ1Yjm1H0Q= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From a5c6aa17848a10b601a43ae88b93261c3062b659 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toni=20Ram=C3=ADrez?= <58293609+ToniRamirezM@users.noreply.github.com> Date: Wed, 20 Nov 2024 09:10:19 +0100 Subject: [PATCH 32/33] feat: l1infotreesync can be run as individual component (#188) (#195) Co-authored-by: Joan Esteban <129153821+joanestebanr@users.noreply.github.com> --- cmd/run.go | 6 ++++-- common/components.go | 2 ++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/cmd/run.go b/cmd/run.go index 6042e935..727533e8 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -529,7 +529,7 @@ func runL1InfoTreeSyncerIfNeeded( reorgDetector *reorgdetector.ReorgDetector, ) *l1infotreesync.L1InfoTreeSync { if !isNeeded([]string{cdkcommon.AGGORACLE, cdkcommon.RPC, - cdkcommon.SEQUENCE_SENDER, cdkcommon.AGGSENDER}, components) { + cdkcommon.SEQUENCE_SENDER, cdkcommon.AGGSENDER, cdkcommon.L1INFOTREESYNC}, components) { return nil } l1InfoTreeSync, err := l1infotreesync.New( @@ -560,6 +560,7 @@ func runL1ClientIfNeeded(components []string, urlRPCL1 string) *ethclient.Client cdkcommon.SEQUENCE_SENDER, cdkcommon.AGGREGATOR, cdkcommon.AGGORACLE, cdkcommon.RPC, cdkcommon.AGGSENDER, + cdkcommon.L1INFOTREESYNC, }, components) { return nil } @@ -594,7 +595,8 @@ func runReorgDetectorL1IfNeeded( ) (*reorgdetector.ReorgDetector, chan error) { if !isNeeded([]string{ cdkcommon.SEQUENCE_SENDER, cdkcommon.AGGREGATOR, - cdkcommon.AGGORACLE, cdkcommon.RPC, cdkcommon.AGGSENDER}, + cdkcommon.AGGORACLE, cdkcommon.RPC, cdkcommon.AGGSENDER, + cdkcommon.L1INFOTREESYNC}, components) { return nil, nil } diff --git a/common/components.go b/common/components.go index 7ef9d285..2c8ab188 100644 --- a/common/components.go +++ b/common/components.go @@ -15,4 +15,6 @@ const ( PROVER = "prover" // AGGSENDER name to identify the aggsender component AGGSENDER = "aggsender" + // L1INFOTREESYNC name to identify the l1infotreesync component + L1INFOTREESYNC = "l1infotreesync" ) From a5b898498bebcb2b2b2b4a69a8457623d6806e0a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toni=20Ram=C3=ADrez?= <58293609+ToniRamirezM@users.noreply.github.com> Date: Thu, 28 Nov 2024 15:03:08 +0100 Subject: [PATCH 33/33] fix: clean proof table on start (#207) * fix: clean proof table on start --- aggregator/aggregator.go | 12 ++++++------ aggregator/aggregator_test.go | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/aggregator/aggregator.go b/aggregator/aggregator.go index 58e97402..76fcd4ab 100644 --- a/aggregator/aggregator.go +++ b/aggregator/aggregator.go @@ -335,12 +335,6 @@ func (a *Aggregator) Start() error { healthService := newHealthChecker() grpchealth.RegisterHealthServer(a.srv, healthService) - // Delete ungenerated recursive proofs - err = a.state.DeleteUngeneratedProofs(a.ctx, nil) - if err != nil { - return fmt.Errorf("failed to initialize proofs cache %w", err) - } - // Get last verified batch number to set the starting point for verifications lastVerifiedBatchNumber, err := a.etherman.GetLatestVerifiedBatchNum() if err != nil { @@ -357,6 +351,12 @@ func (a *Aggregator) Start() error { a.logger.Infof("Starting AccInputHash:%v", accInputHash.String()) a.setAccInputHash(lastVerifiedBatchNumber, *accInputHash) + // Delete existing proofs + err = a.state.DeleteGeneratedProofs(a.ctx, lastVerifiedBatchNumber, maxDBBigIntValue, nil) + if err != nil { + return fmt.Errorf("failed to delete proofs table %w", err) + } + a.resetVerifyProofTime() go a.cleanupLockedProofs() diff --git a/aggregator/aggregator_test.go b/aggregator/aggregator_test.go index ff788190..eee00584 100644 --- a/aggregator/aggregator_test.go +++ b/aggregator/aggregator_test.go @@ -83,7 +83,7 @@ func Test_Start(t *testing.T) { mockL1Syncr.On("Sync", mock.Anything).Return(nil) mockEtherman.On("GetLatestVerifiedBatchNum").Return(uint64(90), nil).Once() mockEtherman.On("GetBatchAccInputHash", mock.Anything, uint64(90)).Return(common.Hash{}, nil).Once() - mockState.On("DeleteUngeneratedProofs", mock.Anything, nil).Return(nil).Once() + mockState.On("DeleteGeneratedProofs", mock.Anything, uint64(90), mock.Anything, nil).Return(nil).Once() mockState.On("CleanupLockedProofs", mock.Anything, "", nil).Return(int64(0), nil) mockEthTxManager.On("Start").Return(nil)