From 2a5f3115d15bc7454b7f2e6cd0d20438d35fbdec Mon Sep 17 00:00:00 2001
From: Victor Castell <victor@polygon.technology>
Date: Thu, 5 Sep 2024 22:41:43 +0200
Subject: [PATCH 1/6] chore: setup codeowners (#65)

---
 .github/CODEOWNERS | 31 +------------------------------
 1 file changed, 1 insertion(+), 30 deletions(-)

diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 847bd845c..2ac98e7ce 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -1,30 +1 @@
-# This is a comment.
-
-# Each line is a file pattern followed by one or more owners.
-
-# These owners will be the default owners for everything in the repo.
-*       @default-owner
-
-# Order is important. The last matching pattern has the most precedence.
-
-# To set ownership of a particular folder, add a line like this:
-/docs/          @documentation-team
-
-# To set ownership of all .js files in the src/js/ directory, add a line like this:
-/src/js/*.js    @js-developers
-
-# You can also specify individual users. This will make @octocat the owner of any .md files at the root of the repository:
-/*.md           @octocat
-
-# In this example, @doctocat owns any files in the /apps/ directory and any of its subdirectories.
-/apps/          @doctocat
-
-# The `@backend-team` team owns any files in the `/api/` directory in the root of your repository and any of its subdirectories.
-/api/           @backend-team
-
-# The `@frontend-team` team owns any files in the `/web/` directory in the root of your repository and any of its subdirectories.
-/web/           @frontend-team
-
-# Add this line at the end of the file to always require pull request reviews
-# when someone on the team is not an owner and the pull request modifies code owned by the team.
-*       @global-owner
+@0xPolygon/core-cdk

From 10fee9326bf0087f1070edb8ca4c1840dbf82ac4 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Toni=20Ram=C3=ADrez?=
 <58293609+ToniRamirezM@users.noreply.github.com>
Date: Mon, 9 Sep 2024 16:19:11 +0200
Subject: [PATCH 2/6] feat: protect ssender nonce (#67)

* feat: protect ssender nonce
---
 sequencesender/sequencesender.go | 12 ++++++++----
 1 file changed, 8 insertions(+), 4 deletions(-)

diff --git a/sequencesender/sequencesender.go b/sequencesender/sequencesender.go
index 82e600345..8c96789a9 100644
--- a/sequencesender/sequencesender.go
+++ b/sequencesender/sequencesender.go
@@ -32,6 +32,7 @@ type SequenceSender struct {
 	ethTxManager           *ethtxmanager.Client
 	etherman               *etherman.Client
 	currentNonce           uint64
+	nonceMutex             sync.Mutex
 	latestVirtualBatch     uint64                     // Latest virtualized batch obtained from L1
 	latestVirtualTime      time.Time                  // Latest virtual batch timestamp
 	latestSentToL1Batch    uint64                     // Latest batch sent to L1
@@ -136,12 +137,14 @@ func (s *SequenceSender) Start(ctx context.Context) {
 
 	// Get current nonce
 	var err error
+	s.nonceMutex.Lock()
 	s.currentNonce, err = s.etherman.CurrentNonce(ctx, s.cfg.L2Coinbase)
 	if err != nil {
 		log.Fatalf("failed to get current nonce from %v, error: %v", s.cfg.L2Coinbase, err)
 	} else {
 		log.Infof("current nonce for %v is %d", s.cfg.L2Coinbase, s.currentNonce)
 	}
+	s.nonceMutex.Unlock()
 
 	// Get latest virtual state batch from L1
 	err = s.updateLatestVirtualBatch()
@@ -572,8 +575,12 @@ func (s *SequenceSender) sendTx(ctx context.Context, resend bool, txOldHash *com
 	var valueToAddress common.Address
 
 	if !resend {
+		s.nonceMutex.Lock()
+		nonce := s.currentNonce
+		s.currentNonce++
+		s.nonceMutex.Unlock()
+		paramNonce = &nonce
 		paramTo = to
-		paramNonce = &s.currentNonce
 		paramData = data
 		valueFromBatch = fromBatch
 		valueToBatch = toBatch
@@ -598,9 +605,6 @@ func (s *SequenceSender) sendTx(ctx context.Context, resend bool, txOldHash *com
 		log.Errorf("error adding sequence to ethtxmanager: %v", err)
 		return err
 	}
-	if !resend {
-		s.currentNonce++
-	}
 
 	// Add new eth tx
 	txData := ethTxData{

From 29d3a8b6183b425935271a002de9584313e340df Mon Sep 17 00:00:00 2001
From: Rachit Sonthalia <54906134+rachit77@users.noreply.github.com>
Date: Wed, 11 Sep 2024 13:56:54 +0530
Subject: [PATCH 3/6] feat: Linters Fix (#62)

* wip linters

* add SonarQube config

* linter fix bridgesync

* linter fix aggOracle

* linter fix claimsponsor

* linter fix cmd

* linter fix common config

* linter fix dataavailability

* etherman lint

* hex lint

* l1bridge2infoindexsync lint

* l1infotree lint fix

* l1infotreesync lastgersync lint fix

* log lint fix

* merkletree lint fix

* merkletree lint fix

* reorgdetactor rpc lint fix

* sequencesender lint fix

* state lint fix

* sync tree lint fix

* wip

* wip

* lint fix

* addressed feedback

* wip

* wip

* sonar cloud fix (#66)

* sonar cloud fix

* wip

* addressed feedback

* upgrade actions

* reorg detactor errors
---
 .github/workflows/codeql.yml                  |   2 +-
 .github/workflows/lint.yml                    |  23 ++
 .github/workflows/security-build.yml          |  22 --
 .github/workflows/test-e2e.yml                |   2 +-
 .github/workflows/test-resequence.yml         |   2 +-
 .github/workflows/test-unit.yml               |  40 ++++
 .github/workflows/test-unittest.yml           |  20 --
 .golangci.yml                                 |  89 +++----
 Makefile                                      |   6 +-
 aggoracle/chaingersender/evm.go               |  15 +-
 aggoracle/config.go                           |   2 +-
 aggoracle/e2e_test.go                         |   2 +
 aggoracle/oracle.go                           |  11 +-
 aggregator/agglayer_tx.go                     |   2 +
 aggregator/aggregator.go                      | 222 +++++++++++++++---
 aggregator/config.go                          |  10 +-
 aggregator/db/db.go                           |   7 +-
 aggregator/db/migrations.go                   |  23 +-
 aggregator/interfaces.go                      |   4 +-
 aggregator/profitabilitychecker.go            |   9 +-
 aggregator/prover/prover.go                   |  87 +++++--
 bridgesync/bridgesync.go                      |   9 +-
 bridgesync/config.go                          |   2 +-
 bridgesync/downloader.go                      |  77 ++++--
 bridgesync/e2e_test.go                        |  16 +-
 bridgesync/processor.go                       |  46 +++-
 bridgesync/processor_test.go                  |  17 +-
 claimsponsor/claimsponsor.go                  |  31 ++-
 claimsponsor/e2e_test.go                      |   1 +
 claimsponsor/evmclaimsponsor.go               |  12 +-
 cmd/main.go                                   |   6 +-
 cmd/run.go                                    | 114 +++++++--
 cmd/version.go                                |   1 +
 common/common.go                              |  16 +-
 common/config.go                              |   1 +
 config/config.go                              |  21 +-
 config/network.go                             |  10 +-
 config/types/duration.go                      |   1 +
 dataavailability/dataavailability.go          |   6 +-
 .../datacommittee/datacommittee.go            |  24 +-
 .../datacommittee/datacommittee_test.go       |   8 +-
 dataavailability/interfaces.go                |   2 +
 etherman/aggregator.go                        |  17 +-
 etherman/contracts/base.go                    |  10 +-
 etherman/contracts/contracts_banana.go        |  18 +-
 etherman/contracts/contracts_elderberry.go    |  28 ++-
 etherman/errors.go                            |  12 +-
 etherman/errors_test.go                       |   2 +-
 etherman/etherman.go                          |  60 ++++-
 hex/hex.go                                    |   5 +-
 l1bridge2infoindexsync/downloader.go          |   3 +
 l1bridge2infoindexsync/driver.go              |  26 +-
 l1bridge2infoindexsync/e2e_test.go            |  32 +--
 .../l1bridge2infoindexsync.go                 |   7 +-
 l1bridge2infoindexsync/processor.go           |  28 ++-
 l1infotree/hash.go                            |  10 +-
 l1infotree/tree.go                            |  17 +-
 l1infotreesync/config.go                      |   2 +-
 l1infotreesync/downloader.go                  |  31 ++-
 l1infotreesync/e2e_test.go                    |  11 +-
 l1infotreesync/l1infotreesync.go              |  15 +-
 l1infotreesync/processor.go                   |  43 +++-
 lastgersync/config.go                         |   5 +-
 lastgersync/e2e_test.go                       |   1 +
 lastgersync/evmdownloader.go                  |  26 +-
 lastgersync/lastgersync.go                    |   6 +-
 lastgersync/processor.go                      |  55 ++++-
 log/config.go                                 |   6 +-
 log/log.go                                    |   4 +-
 merkletree/key.go                             |  26 +-
 merkletree/split.go                           |   6 +-
 reorgdetector/reorgdetector_test.go           |   4 +-
 rpc/bridge.go                                 |  67 ++++--
 rpc/bridge_client.go                          |   6 +-
 sequencesender/config.go                      |   7 +-
 sequencesender/sequencesender.go              | 170 ++++++++++----
 sequencesender/txbuilder/banana_base.go       |  37 ++-
 sequencesender/txbuilder/banana_base_test.go  |   2 +
 sequencesender/txbuilder/banana_types.go      |  16 +-
 sequencesender/txbuilder/banana_validium.go   |  27 ++-
 .../txbuilder/banana_validium_test.go         |   8 +-
 sequencesender/txbuilder/banana_zkevm.go      |  25 +-
 sequencesender/txbuilder/banana_zkevm_test.go |   7 +-
 sequencesender/txbuilder/elderberry_base.go   |   4 +-
 .../txbuilder/elderberry_base_test.go         |   3 +-
 .../txbuilder/elderberry_validium.go          |  26 +-
 .../txbuilder/elderberry_validium_test.go     |   2 +
 sequencesender/txbuilder/elderberry_zkevm.go  |  34 ++-
 .../txbuilder/elderberry_zkevm_test.go        |   2 +
 sequencesender/txbuilder/interface.go         |  12 +-
 sequencesender/txbuilder/interface_test.go    |   6 +
 .../txbuilder/validium_cond_num_batches.go    |   4 +-
 .../txbuilder/zkevm_cond_max_size.go          |  27 ++-
 sonar-project.properties                      |  28 +++
 state/encoding_batch_v2.go                    |  37 ++-
 state/errors.go                               |   8 +-
 state/forkid.go                               |   8 +-
 state/helper.go                               |  20 +-
 state/pgstatestorage/batch.go                 |  20 +-
 state/pgstatestorage/proof.go                 |  65 ++++-
 state/pgstatestorage/sequence.go              |   6 +-
 state/types.go                                |   2 +-
 sync/evmdownloader.go                         |  10 +-
 sync/evmdownloader_test.go                    |   2 +
 sync/evmdriver.go                             |   2 +-
 sync/evmdriver_test.go                        |  15 +-
 test/helpers/aggoracle_e2e.go                 | 140 +++++++----
 test/helpers/ethtxmanmock_e2e.go              |  37 ++-
 translator/translator_impl.go                 |   7 +-
 tree/appendonlytree.go                        |   8 +-
 tree/testvectors/types.go                     |   2 +-
 tree/tree.go                                  |  14 +-
 tree/updatabletree.go                         |   2 +-
 113 files changed, 1853 insertions(+), 609 deletions(-)
 create mode 100644 .github/workflows/lint.yml
 delete mode 100644 .github/workflows/security-build.yml
 create mode 100644 .github/workflows/test-unit.yml
 delete mode 100644 .github/workflows/test-unittest.yml

diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml
index 8a5d2d904..75c0ab874 100644
--- a/.github/workflows/codeql.yml
+++ b/.github/workflows/codeql.yml
@@ -46,7 +46,7 @@ jobs:
 
     steps:
     - name: Checkout repository
-      uses: actions/checkout@v3
+      uses: actions/checkout@v4
 
     # Initializes the CodeQL tools for scanning.
     - name: Initialize CodeQL
diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml
new file mode 100644
index 000000000..e644f97b4
--- /dev/null
+++ b/.github/workflows/lint.yml
@@ -0,0 +1,23 @@
+name: Lint
+on:
+  push:
+    branches:
+      - main
+      - develop
+      - update-external-dependencies
+      - 'release/**'
+  pull_request:
+jobs:
+  lint:
+    runs-on: ubuntu-latest
+    steps:
+      - name: Install Go
+        uses: actions/setup-go@v3
+        with:
+          go-version: 1.21.x
+      - name: Checkout code
+        uses: actions/checkout@v3
+      - name: Lint
+        run: |
+          make install-linter
+          make lint
diff --git a/.github/workflows/security-build.yml b/.github/workflows/security-build.yml
deleted file mode 100644
index a4def6772..000000000
--- a/.github/workflows/security-build.yml
+++ /dev/null
@@ -1,22 +0,0 @@
-name: Security Build
-on:
-  push:
-    branches:
-      - main # or the name of your main branch
-  workflow_dispatch: {}
-  pull_request:
-    types: [opened, synchronize, reopened]
-    
-jobs:
-  sonarcloud:
-    name: SonarCloud
-    runs-on: ubuntu-latest
-    steps:
-      - uses: actions/checkout@v3
-        with:
-          fetch-depth: 0  # Shallow clones should be disabled for a better relevancy of analysis
-      - name: SonarCloud Scan
-        uses: SonarSource/sonarcloud-github-action@master
-        env:
-          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}  # Needed to get PR information, if any
-          SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
diff --git a/.github/workflows/test-e2e.yml b/.github/workflows/test-e2e.yml
index fad10c796..670ce8b37 100644
--- a/.github/workflows/test-e2e.yml
+++ b/.github/workflows/test-e2e.yml
@@ -20,7 +20,7 @@ jobs:
       uses: actions/checkout@v4
 
     - name: Install Go
-      uses: actions/setup-go@v3
+      uses: actions/setup-go@v5
       with:
         go-version: ${{ matrix.go-version }}
       env:
diff --git a/.github/workflows/test-resequence.yml b/.github/workflows/test-resequence.yml
index 3e34cd63d..9ac51af0c 100644
--- a/.github/workflows/test-resequence.yml
+++ b/.github/workflows/test-resequence.yml
@@ -94,7 +94,7 @@ jobs:
 
       - name: Upload logs
         if: always()
-        uses: actions/upload-artifact@v3
+        uses: actions/upload-artifact@v4
         with:
           name: logs_${{ github.run_id }}
           path: ./kurtosis-cdk/ci_logs
diff --git a/.github/workflows/test-unit.yml b/.github/workflows/test-unit.yml
new file mode 100644
index 000000000..66cfc0107
--- /dev/null
+++ b/.github/workflows/test-unit.yml
@@ -0,0 +1,40 @@
+name: Test Unit and SonarCloud analysis
+
+on:
+  push:
+    branches:
+      - main
+      - develop
+      - 'release/**'
+  pull_request:
+  workflow_dispatch: {}
+
+jobs:
+  test-unit:
+    strategy:
+      fail-fast: false
+      matrix:
+        go-version: [1.22.4]
+        goarch: ["amd64"]
+    runs-on: ubuntu-latest
+    steps:
+      - name: Checkout code
+        uses: actions/checkout@v4
+        with:
+          fetch-depth: 0  # Shallow clones should be disabled for a better relevancy of analysis
+
+      - name: Install Go
+        uses: actions/setup-go@v5
+        with:
+          go-version: ${{ matrix.go-version }}
+        env:
+          GOARCH: ${{ matrix.goarch }}
+
+      - name: Test
+        run: make test-unit
+          
+      - name: Analyze with SonarCloud
+        uses: sonarsource/sonarcloud-github-action@master
+        env:
+          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+          SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
\ No newline at end of file
diff --git a/.github/workflows/test-unittest.yml b/.github/workflows/test-unittest.yml
deleted file mode 100644
index 156a01441..000000000
--- a/.github/workflows/test-unittest.yml
+++ /dev/null
@@ -1,20 +0,0 @@
-name: Test Unittest
-on: 
-  push:
-     branches:
-      - '**'
-  workflow_dispatch: {}
-  
- 
-jobs:
-  test-unittest:
-    runs-on: ubuntu-latest
-    steps:
-    - uses: actions/checkout@v4
-      with:
-        fetch-depth: 0
-    - name: Install Go
-      uses: actions/setup-go@v3
-
-    - name: Launch unittest
-      run: make test
\ No newline at end of file
diff --git a/.golangci.yml b/.golangci.yml
index a0a1caef1..5dd6e0ecd 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -8,70 +8,79 @@ run:
   skip-dirs-use-default: true
   skip-dirs:
     - tests
+    - aggregator/db/migrations
 
 service:
   golangci-lint-version: 1.59.1
-
+    
 linters:
   disable-all: true
   enable:
-    - whitespace # Tool for detection of leading and trailing whitespace
-    # - wsl # Forces you to use empty lines
-    - wastedassign # Finds wasted assignment statements
-    - unconvert # Unnecessary type conversions
-    - tparallel # Detects inappropriate usage of t.Parallel() method in your Go test codes
-    - thelper # Detects golang test helpers without t.Helper() call and checks the consistency of test helpers
-    # - stylecheck # Stylecheck is a replacement for golint
-    # - prealloc # Finds slice declarations that could potentially be pre-allocated
-    - predeclared # Finds code that shadows one of Go's predeclared identifiers
-    # - nolintlint # Ill-formed or insufficient nolint directives
-    # - nlreturn # Checks for a new line before return and branch statements to increase code clarity
-    - misspell # Misspelled English words in comments
-    - makezero # Finds slice declarations with non-zero initial length
-    # - lll # Long lines
-    - importas # Enforces consistent import aliases
-    - gosec # Security problems
-    - gofmt # Whether the code was gofmt-ed
-    - goimports # Unused imports
-    - goconst # Repeated strings that could be replaced by a constant
-    # - forcetypeassert # Finds forced type assertions
-    - dogsled # Checks assignments with too many blank identifiers (e.g. x, , , _, := f())
-    # - dupl # Code clone detection
-    - errname # Checks that sentinel errors are prefixed with the Err and error types are suffixed with the Error
-    # - errorlint # errorlint is a linter for that can be used to find code that will cause problems with the error wrapping scheme introduced in Go 1.13
-    # - gocritic
-    - errcheck # Errcheck is a go lint rule for checking for unchecked errors in go programs. These unchecked errors can be critical bugs in some cases
-    # - godox # Godox is a linter for TODOs and FIXMEs left in the code
+  - whitespace # Tool for detection of leading and trailing whitespace
+  # - wsl # Forces you to use empty lines
+  - wastedassign # Finds wasted assignment statements
+  - unconvert  # Unnecessary type conversions
+  - tparallel # Detects inappropriate usage of t.Parallel() method in your Go test codes
+  - thelper # Detects golang test helpers without t.Helper() call and checks the consistency of test helpers
+  - stylecheck # Stylecheck is a replacement for golint
+  - prealloc # Finds slice declarations that could potentially be pre-allocated
+  - predeclared # Finds code that shadows one of Go's predeclared identifiers
+  - nolintlint # Ill-formed or insufficient nolint directives
+  # - nlreturn # Checks for a new line before return and branch statements to increase code clarity
+  - misspell # Misspelled English words in comments
+  - makezero # Finds slice declarations with non-zero initial length
+  - lll # Long lines
+  - importas  # Enforces consistent import aliases
+  - gosec # Security problems
+  - gofmt # Whether the code was gofmt-ed
+  - goimports # Unused imports
+  - goconst # Repeated strings that could be replaced by a constant
+  - forcetypeassert # Finds forced type assertions
+  - dogsled # Checks assignments with too many blank identifiers (e.g. x, , , _, := f())
+  - dupl # Code clone detection
+  - errname # Checks that sentinel errors are prefixed with the Err and error types are suffixed with the Error
+  - errorlint # errorlint is a linter for that can be used to find code that will cause problems with the error wrapping scheme introduced in Go 1.13
+  - gocritic # gocritic is a Go source code linter that maintains checks that are not in other linters
+  - errcheck # Errcheck is a go lint rule for checking for unchecked errors in go programs. These unchecked errors can be critical bugs in some cases
+  # - godox # Godox is a linter for TODOs and FIXMEs left in the code
+  - gci # Gci is a linter for checking the consistency of the code with the go code style guide
+  - gomnd # Gomnd is a linter for magic numbers
+  # - revive 
+  - unparam # Unparam is a linter for unused function parameters
 
 linters-settings:
   gofmt:
     simplify: true
-  goconst:
-    min-len: 3
-    min-occurrences: 3
   gocritic:
     enabled-checks:
       - ruleguard
-    settings:
-      ruleguard:
-        rules: "./gorules/rules.go"
+    # settings:
+    #   ruleguard:
+    #     rules: "./gorules/rules.go"
+  revive:
+    rules:
+    - name: exported
+      arguments:
+      - disableStutteringCheck
+  goconst:
+    min-len: 3
+    min-occurrences: 3
 
 issues:
-  new-from-rev: origin/develop # report only new issues with reference to develop branch
+  # new-from-rev: origin/develop # report only new issues with reference to develop branch
   whole-files: true
   exclude-rules:
-    - path: _test\.go
+    - path: '(_test\.go|^test/.*)'
       linters:
         - gosec
         - unparam
         - lll
-    - path: gen_sc_data\.go
+    - path: 'etherman/contracts/contracts_(banana|elderberry)\.go'
       linters:
-        - wsl
-        - lll
-        - stylecheck
+        - dupl
   include:
     - EXC0012 # Exported (.+) should have comment( \(or a comment on this block\))? or be unexported
     - EXC0013 # Package comment should be of the form "(.+)...
     - EXC0014 # Comment on exported (.+) should be of the form "(.+)..."
     - EXC0015 # Should have a package comment
+  
\ No newline at end of file
diff --git a/Makefile b/Makefile
index 256509bc0..fa71f98ad 100644
--- a/Makefile
+++ b/Makefile
@@ -80,9 +80,9 @@ build-docker-nc: ## Builds a docker image with the cdk binary - but without buil
 stop: ## Stops all services
 	docker-compose down
 
-.PHONY: test
-test:
-	trap '$(STOP)' EXIT; MallocNanoZone=0 go test -count=1 -short -race -p 1 -covermode=atomic -coverprofile=../coverage.out  -coverpkg ./... -timeout 200s ./...
+.PHONY: test-unit
+test-unit:
+	trap '$(STOP)' EXIT; MallocNanoZone=0 go test -count=1 -short -race -p 1 -covermode=atomic -coverprofile=coverage.out  -coverpkg ./... -timeout 200s ./...
 
 .PHONY: test-seq_sender
 test-seq_sender:
diff --git a/aggoracle/chaingersender/evm.go b/aggoracle/chaingersender/evm.go
index 859f4b8b4..6bd2ea76c 100644
--- a/aggoracle/chaingersender/evm.go
+++ b/aggoracle/chaingersender/evm.go
@@ -25,9 +25,18 @@ type EthClienter interface {
 
 type EthTxManager interface {
 	Remove(ctx context.Context, id common.Hash) error
-	ResultsByStatus(ctx context.Context, statuses []ethtxmanager.MonitoredTxStatus) ([]ethtxmanager.MonitoredTxResult, error)
+	ResultsByStatus(ctx context.Context,
+		statuses []ethtxmanager.MonitoredTxStatus,
+	) ([]ethtxmanager.MonitoredTxResult, error)
 	Result(ctx context.Context, id common.Hash) (ethtxmanager.MonitoredTxResult, error)
-	Add(ctx context.Context, to *common.Address, forcedNonce *uint64, value *big.Int, data []byte, gasOffset uint64, sidecar *types.BlobTxSidecar) (common.Hash, error)
+	Add(ctx context.Context,
+		to *common.Address,
+		forcedNonce *uint64,
+		value *big.Int,
+		data []byte,
+		gasOffset uint64,
+		sidecar *types.BlobTxSidecar,
+	) (common.Hash, error)
 }
 
 type EVMChainGERSender struct {
@@ -61,6 +70,7 @@ func NewEVMChainGERSender(
 	if err != nil {
 		return nil, err
 	}
+
 	return &EVMChainGERSender{
 		gerContract:         gerContract,
 		gerAddr:             l2GlobalExitRoot,
@@ -77,6 +87,7 @@ func (c *EVMChainGERSender) IsGERAlreadyInjected(ger common.Hash) (bool, error)
 	if err != nil {
 		return false, fmt.Errorf("error calling gerContract.GlobalExitRootMap: %w", err)
 	}
+
 	return timestamp.Cmp(big.NewInt(0)) != 0, nil
 }
 
diff --git a/aggoracle/config.go b/aggoracle/config.go
index e60977070..8559ddb61 100644
--- a/aggoracle/config.go
+++ b/aggoracle/config.go
@@ -19,7 +19,7 @@ type Config struct {
 	TargetChainType TargetChainType `mapstructure:"TargetChainType"`
 	URLRPCL1        string          `mapstructure:"URLRPCL1"`
 	// BlockFinality indicates the status of the blocks that will be queried in order to sync
-	BlockFinality     string                   `jsonschema:"enum=LatestBlock, enum=SafeBlock, enum=PendingBlock, enum=FinalizedBlock, enum=EarliestBlock" mapstructure:"BlockFinality"`
+	BlockFinality     string                   `jsonschema:"enum=LatestBlock, enum=SafeBlock, enum=PendingBlock, enum=FinalizedBlock, enum=EarliestBlock" mapstructure:"BlockFinality"` //nolint:lll
 	WaitPeriodNextGER types.Duration           `mapstructure:"WaitPeriodNextGER"`
 	EVMSender         chaingersender.EVMConfig `mapstructure:"EVMSender"`
 }
diff --git a/aggoracle/e2e_test.go b/aggoracle/e2e_test.go
index 39150c004..25a8a96dc 100644
--- a/aggoracle/e2e_test.go
+++ b/aggoracle/e2e_test.go
@@ -27,6 +27,8 @@ func runTest(
 	l1Client *simulated.Backend,
 	authL1 *bind.TransactOpts,
 ) {
+	t.Helper()
+
 	for i := 0; i < 10; i++ {
 		_, err := gerL1Contract.UpdateExitRoot(authL1, common.HexToHash(strconv.Itoa(i)))
 		require.NoError(t, err)
diff --git a/aggoracle/oracle.go b/aggoracle/oracle.go
index f22ee1f0a..27383f436 100644
--- a/aggoracle/oracle.go
+++ b/aggoracle/oracle.go
@@ -2,6 +2,7 @@ package aggoracle
 
 import (
 	"context"
+	"errors"
 	"math/big"
 	"time"
 
@@ -41,6 +42,7 @@ func New(
 	if err != nil {
 		return nil, err
 	}
+
 	return &AggOracle{
 		ticker:        ticker,
 		l1Client:      l1Client,
@@ -61,26 +63,30 @@ func (a *AggOracle) Start(ctx context.Context) {
 		case <-a.ticker.C:
 			blockNumToFetch, gerToInject, err = a.getLastFinalisedGER(ctx, blockNumToFetch)
 			if err != nil {
-				if err == l1infotreesync.ErrBlockNotProcessed {
+				if errors.Is(err, l1infotreesync.ErrBlockNotProcessed) {
 					log.Debugf("syncer is not ready for the block %d", blockNumToFetch)
-				} else if err == l1infotreesync.ErrNotFound {
+				} else if errors.Is(err, l1infotreesync.ErrNotFound) {
 					blockNumToFetch = 0
 					log.Debugf("syncer has not found any GER until block %d", blockNumToFetch)
 				} else {
 					log.Error("error calling getLastFinalisedGER: ", err)
 				}
+
 				continue
 			}
 			if alreadyInjected, err := a.chainSender.IsGERAlreadyInjected(gerToInject); err != nil {
 				log.Error("error calling isGERAlreadyInjected: ", err)
+
 				continue
 			} else if alreadyInjected {
 				log.Debugf("GER %s already injected", gerToInject.Hex())
+
 				continue
 			}
 			log.Infof("injecting new GER: %s", gerToInject.Hex())
 			if err := a.chainSender.UpdateGERWaitUntilMined(ctx, gerToInject); err != nil {
 				log.Errorf("error calling updateGERWaitUntilMined, when trying to inject GER %s: %v", gerToInject.Hex(), err)
+
 				continue
 			}
 			log.Infof("GER %s injected", gerToInject.Hex())
@@ -106,5 +112,6 @@ func (a *AggOracle) getLastFinalisedGER(ctx context.Context, blockNumToFetch uin
 	if err != nil {
 		return blockNumToFetch, common.Hash{}, err
 	}
+
 	return 0, info.GlobalExitRoot, nil
 }
diff --git a/aggregator/agglayer_tx.go b/aggregator/agglayer_tx.go
index b0cd09c9e..30a483aea 100644
--- a/aggregator/agglayer_tx.go
+++ b/aggregator/agglayer_tx.go
@@ -41,6 +41,7 @@ func (t *Tx) Sign(privateKey *ecdsa.PrivateKey) (*SignedTx, error) {
 	if err != nil {
 		return nil, err
 	}
+
 	return &SignedTx{
 		Tx:        *t,
 		Signature: sig,
@@ -59,5 +60,6 @@ func (s *SignedTx) Signer() (common.Address, error) {
 	if err != nil {
 		return common.Address{}, err
 	}
+
 	return crypto.PubkeyToAddress(*pubKey), nil
 }
diff --git a/aggregator/aggregator.go b/aggregator/aggregator.go
index 2b90826ee..f3e039f99 100644
--- a/aggregator/aggregator.go
+++ b/aggregator/aggregator.go
@@ -106,9 +106,13 @@ func New(
 
 	switch cfg.TxProfitabilityCheckerType {
 	case ProfitabilityBase:
-		profitabilityChecker = NewTxProfitabilityCheckerBase(stateInterface, cfg.IntervalAfterWhichBatchConsolidateAnyway.Duration, cfg.TxProfitabilityMinReward.Int)
+		profitabilityChecker = NewTxProfitabilityCheckerBase(
+			stateInterface, cfg.IntervalAfterWhichBatchConsolidateAnyway.Duration, cfg.TxProfitabilityMinReward.Int,
+		)
 	case ProfitabilityAcceptAll:
-		profitabilityChecker = NewTxProfitabilityCheckerAcceptAll(stateInterface, cfg.IntervalAfterWhichBatchConsolidateAnyway.Duration)
+		profitabilityChecker = NewTxProfitabilityCheckerAcceptAll(
+			stateInterface, cfg.IntervalAfterWhichBatchConsolidateAnyway.Duration,
+		)
 	}
 
 	// Create ethtxmanager client
@@ -225,12 +229,16 @@ func (a *Aggregator) retrieveWitness() {
 			// Get Witness
 			dbBatch.Witness, err = getWitness(dbBatch.Batch.BatchNumber, a.cfg.WitnessURL, a.cfg.UseFullWitness)
 			if err != nil {
-				if err == errBusy {
-					log.Debugf("Witness server is busy, retrying get witness for batch %d in %v", dbBatch.Batch.BatchNumber, a.cfg.RetryTime.Duration)
+				if errors.Is(err, errBusy) {
+					log.Debugf(
+						"Witness server is busy, retrying get witness for batch %d in %v",
+						dbBatch.Batch.BatchNumber, a.cfg.RetryTime.Duration,
+					)
 				} else {
 					log.Errorf("Failed to get witness for batch %d, err: %v", dbBatch.Batch.BatchNumber, err)
 				}
 				time.Sleep(a.cfg.RetryTime.Duration)
+
 				continue inner
 			}
 
@@ -238,6 +246,7 @@ func (a *Aggregator) retrieveWitness() {
 			if err != nil {
 				log.Errorf("Error adding batch: %v", err)
 				time.Sleep(a.cfg.RetryTime.Duration)
+
 				continue inner
 			}
 			success = true
@@ -264,8 +273,11 @@ func (a *Aggregator) handleReorg(reorgData synchronizer.ReorgExecutionResult) {
 	// Halt the aggregator
 	a.halted.Store(true)
 	for {
-		log.Errorf("Halting the aggregator due to a L1 reorg. Reorged data has been deleted so it is safe to manually restart the aggregator.")
-		time.Sleep(10 * time.Second) // nolint:gomnd
+		log.Warnf(
+			"Halting the aggregator due to a L1 reorg. " +
+				"Reorged data has been deleted, so it is safe to manually restart the aggregator.",
+		)
+		time.Sleep(10 * time.Second) //nolint:gomnd
 	}
 }
 
@@ -303,7 +315,10 @@ func (a *Aggregator) handleRollbackBatches(rollbackData synchronizer.RollbackBat
 
 	// Check lastVerifiedBatchNumber makes sense
 	if err == nil && lastVerifiedBatchNumber > rollbackData.LastBatchNumber {
-		err = fmt.Errorf("last verified batch number %d is greater than the last batch number %d in the rollback data", lastVerifiedBatchNumber, rollbackData.LastBatchNumber)
+		err = fmt.Errorf(
+			"last verified batch number %d is greater than the last batch number %d in the rollback data",
+			lastVerifiedBatchNumber, rollbackData.LastBatchNumber,
+		)
 	}
 
 	// Delete invalidated batches
@@ -388,12 +403,14 @@ func (a *Aggregator) handleRollbackBatches(rollbackData synchronizer.RollbackBat
 		a.halted.Store(true)
 		for {
 			log.Errorf("Halting the aggregator due to an error handling rollback batches event: %v", err)
-			time.Sleep(10 * time.Second) // nolint:gomnd
+			time.Sleep(10 * time.Second) //nolint:gomnd
 		}
 	}
 }
 
-func (a *Aggregator) handleReceivedDataStream(entry *datastreamer.FileEntry, client *datastreamer.StreamClient, server *datastreamer.StreamServer) error {
+func (a *Aggregator) handleReceivedDataStream(
+	entry *datastreamer.FileEntry, client *datastreamer.StreamClient, server *datastreamer.StreamServer,
+) error {
 	forcedBlockhashL1 := common.Hash{}
 
 	if !a.halted.Load() {
@@ -406,6 +423,7 @@ func (a *Aggregator) handleReceivedDataStream(entry *datastreamer.FileEntry, cli
 				err := proto.Unmarshal(entry.Data, batch)
 				if err != nil {
 					log.Errorf("Error unmarshalling batch: %v", err)
+
 					return err
 				}
 
@@ -418,6 +436,7 @@ func (a *Aggregator) handleReceivedDataStream(entry *datastreamer.FileEntry, cli
 				err := proto.Unmarshal(entry.Data, batch)
 				if err != nil {
 					log.Errorf("Error unmarshalling batch: %v", err)
+
 					return err
 				}
 
@@ -437,6 +456,7 @@ func (a *Aggregator) handleReceivedDataStream(entry *datastreamer.FileEntry, cli
 					virtualBatch, err := a.l1Syncr.GetVirtualBatchByBatchNumber(a.ctx, a.currentStreamBatch.BatchNumber)
 					if err != nil && !errors.Is(err, entities.ErrNotFound) {
 						log.Errorf("Error getting virtual batch: %v", err)
+
 						return err
 					}
 
@@ -447,28 +467,34 @@ func (a *Aggregator) handleReceivedDataStream(entry *datastreamer.FileEntry, cli
 
 						if err != nil && !errors.Is(err, entities.ErrNotFound) {
 							log.Errorf("Error getting virtual batch: %v", err)
+
 							return err
 						}
 					}
 
 					// Encode batch
-					if a.currentStreamBatch.Type != datastream.BatchType_BATCH_TYPE_INVALID && a.currentStreamBatch.Type != datastream.BatchType_BATCH_TYPE_INJECTED {
+					if a.currentStreamBatch.Type != datastream.BatchType_BATCH_TYPE_INVALID &&
+						a.currentStreamBatch.Type != datastream.BatchType_BATCH_TYPE_INJECTED {
 						batchl2Data, err = state.EncodeBatchV2(&a.currentStreamBatchRaw)
 						if err != nil {
 							log.Errorf("Error encoding batch: %v", err)
+
 							return err
 						}
 					}
 
 					// If the batch is marked as Invalid in the DS we enforce retrieve the data from L1
-					if a.cfg.UseL1BatchData || a.currentStreamBatch.Type == datastream.BatchType_BATCH_TYPE_INVALID || a.currentStreamBatch.Type == datastream.BatchType_BATCH_TYPE_INJECTED {
+					if a.cfg.UseL1BatchData ||
+						a.currentStreamBatch.Type == datastream.BatchType_BATCH_TYPE_INVALID ||
+						a.currentStreamBatch.Type == datastream.BatchType_BATCH_TYPE_INJECTED {
 						a.currentStreamBatch.BatchL2Data = virtualBatch.BatchL2Data
 					} else {
 						a.currentStreamBatch.BatchL2Data = batchl2Data
 					}
 
 					// Compare BatchL2Data from L1 and DataStream
-					if common.Bytes2Hex(batchl2Data) != common.Bytes2Hex(virtualBatch.BatchL2Data) && a.currentStreamBatch.Type != datastream.BatchType_BATCH_TYPE_INJECTED {
+					if common.Bytes2Hex(batchl2Data) != common.Bytes2Hex(virtualBatch.BatchL2Data) &&
+						a.currentStreamBatch.Type != datastream.BatchType_BATCH_TYPE_INJECTED {
 						log.Warnf("BatchL2Data from L1 and data stream are different for batch %d", a.currentStreamBatch.BatchNumber)
 
 						if a.currentStreamBatch.Type == datastream.BatchType_BATCH_TYPE_INVALID {
@@ -483,6 +509,7 @@ func (a *Aggregator) handleReceivedDataStream(entry *datastreamer.FileEntry, cli
 					sequence, err := a.l1Syncr.GetSequenceByBatchNumber(a.ctx, a.currentStreamBatch.BatchNumber)
 					if err != nil {
 						log.Errorf("Error getting sequence: %v", err)
+
 						return err
 					}
 
@@ -492,6 +519,7 @@ func (a *Aggregator) handleReceivedDataStream(entry *datastreamer.FileEntry, cli
 						sequence, err = a.l1Syncr.GetSequenceByBatchNumber(a.ctx, a.currentStreamBatch.BatchNumber)
 						if err != nil {
 							log.Errorf("Error getting sequence: %v", err)
+
 							return err
 						}
 					}
@@ -503,6 +531,7 @@ func (a *Aggregator) handleReceivedDataStream(entry *datastreamer.FileEntry, cli
 					oldDBBatch, err := a.state.GetBatch(a.ctx, a.currentStreamBatch.BatchNumber-1, nil)
 					if err != nil {
 						log.Errorf("Error getting batch %d: %v", a.currentStreamBatch.BatchNumber-1, err)
+
 						return err
 					}
 
@@ -511,6 +540,7 @@ func (a *Aggregator) handleReceivedDataStream(entry *datastreamer.FileEntry, cli
 						l1Block, err := a.l1Syncr.GetL1BlockByNumber(a.ctx, virtualBatch.BlockNumber)
 						if err != nil {
 							log.Errorf("Error getting L1 block: %v", err)
+
 							return err
 						}
 
@@ -539,6 +569,7 @@ func (a *Aggregator) handleReceivedDataStream(entry *datastreamer.FileEntry, cli
 					if err != nil {
 						if !errors.Is(err, state.ErrNotFound) {
 							log.Errorf("Error getting batch %d: %v", a.currentStreamBatch.BatchNumber, err)
+
 							return err
 						}
 					}
@@ -551,6 +582,7 @@ func (a *Aggregator) handleReceivedDataStream(entry *datastreamer.FileEntry, cli
 					err = a.state.AddBatch(a.ctx, &dbBatch, nil)
 					if err != nil {
 						log.Errorf("Error adding batch: %v", err)
+
 						return err
 					}
 
@@ -573,6 +605,7 @@ func (a *Aggregator) handleReceivedDataStream(entry *datastreamer.FileEntry, cli
 				err := proto.Unmarshal(entry.Data, l2Block)
 				if err != nil {
 					log.Errorf("Error unmarshalling L2Block: %v", err)
+
 					return err
 				}
 
@@ -593,12 +626,14 @@ func (a *Aggregator) handleReceivedDataStream(entry *datastreamer.FileEntry, cli
 				err := proto.Unmarshal(entry.Data, l2Tx)
 				if err != nil {
 					log.Errorf("Error unmarshalling L2Tx: %v", err)
+
 					return err
 				}
 				// New Tx raw
 				tx, err := state.DecodeTx(common.Bytes2Hex(l2Tx.Encoded))
 				if err != nil {
 					log.Errorf("Error decoding tx: %v", err)
+
 					return err
 				}
 
@@ -611,6 +646,7 @@ func (a *Aggregator) handleReceivedDataStream(entry *datastreamer.FileEntry, cli
 			}
 		}
 	}
+
 	return nil
 }
 
@@ -620,6 +656,7 @@ func (a *Aggregator) Start() error {
 	err := a.l1Syncr.Sync(true)
 	if err != nil {
 		log.Fatalf("Failed to synchronize from L1: %v", err)
+
 		return err
 	}
 
@@ -671,7 +708,14 @@ func (a *Aggregator) Start() error {
 		log.Infof("Starting AccInputHash:%v", accInputHash.String())
 
 		// Store Acc Input Hash of the latest verified batch
-		dummyDBBatch := state.DBBatch{Batch: state.Batch{BatchNumber: lastVerifiedBatchNumber, AccInputHash: *accInputHash}, Datastream: []byte{0}, Witness: []byte{0}}
+		dummyDBBatch := state.DBBatch{
+			Batch: state.Batch{
+				BatchNumber:  lastVerifiedBatchNumber,
+				AccInputHash: *accInputHash,
+			},
+			Datastream: []byte{0},
+			Witness:    []byte{0},
+		}
 		err = a.state.AddBatch(a.ctx, &dummyDBBatch, nil)
 		if err != nil {
 			return err
@@ -723,6 +767,7 @@ func (a *Aggregator) Start() error {
 	}
 
 	<-a.ctx.Done()
+
 	return a.ctx.Err()
 }
 
@@ -757,6 +802,7 @@ func (a *Aggregator) Channel(stream prover.AggregatorService_ChannelServer) erro
 	if !prover.SupportsForkID(a.cfg.ForkId) {
 		err := errors.New("prover does not support required fork ID")
 		log.Warn(FirstToUpper(err.Error()))
+
 		return err
 	}
 
@@ -775,11 +821,13 @@ func (a *Aggregator) Channel(stream prover.AggregatorService_ChannelServer) erro
 				if err != nil {
 					log.Errorf("Failed to check if prover is idle: %v", err)
 					time.Sleep(a.cfg.RetryTime.Duration)
+
 					continue
 				}
 				if !isIdle {
 					log.Debug("Prover is not idle")
 					time.Sleep(a.cfg.RetryTime.Duration)
+
 					continue
 				}
 
@@ -831,6 +879,7 @@ func (a *Aggregator) sendFinalProof() {
 			if err != nil {
 				log.Errorf("Failed to retrieve batch with number [%d]: %v", proof.BatchNumberFinal, err)
 				a.endProofVerification()
+
 				continue
 			}
 
@@ -912,10 +961,13 @@ func (a *Aggregator) settleDirect(
 	inputs ethmanTypes.FinalProofInputs) bool {
 	// add batch verification to be monitored
 	sender := common.HexToAddress(a.cfg.SenderAddress)
-	to, data, err := a.etherman.BuildTrustedVerifyBatchesTxData(proof.BatchNumber-1, proof.BatchNumberFinal, &inputs, sender)
+	to, data, err := a.etherman.BuildTrustedVerifyBatchesTxData(
+		proof.BatchNumber-1, proof.BatchNumberFinal, &inputs, sender,
+	)
 	if err != nil {
 		log.Errorf("Error estimating batch verification to add to eth tx manager: %v", err)
 		a.handleFailureToAddVerifyBatchToBeMonitored(ctx, proof)
+
 		return false
 	}
 
@@ -925,6 +977,7 @@ func (a *Aggregator) settleDirect(
 		mTxLogger := ethtxmanager.CreateLogger(monitoredTxID, sender, to)
 		mTxLogger.Errorf("Error to add batch verification tx to eth tx manager: %v", err)
 		a.handleFailureToAddVerifyBatchToBeMonitored(ctx, proof)
+
 		return false
 	}
 
@@ -937,7 +990,10 @@ func (a *Aggregator) settleDirect(
 }
 
 func (a *Aggregator) handleFailureToAddVerifyBatchToBeMonitored(ctx context.Context, proof *state.Proof) {
-	log := log.WithFields("proofId", proof.ProofID, "batches", fmt.Sprintf("%d-%d", proof.BatchNumber, proof.BatchNumberFinal))
+	log := log.WithFields(
+		"proofId", proof.ProofID,
+		"batches", fmt.Sprintf("%d-%d", proof.BatchNumber, proof.BatchNumberFinal),
+	)
 	proof.GeneratingSince = nil
 	err := a.state.UpdateGeneratedProof(ctx, proof, nil)
 	if err != nil {
@@ -947,7 +1003,9 @@ func (a *Aggregator) handleFailureToAddVerifyBatchToBeMonitored(ctx context.Cont
 }
 
 // buildFinalProof builds and return the final proof for an aggregated/batch proof.
-func (a *Aggregator) buildFinalProof(ctx context.Context, prover proverInterface, proof *state.Proof) (*prover.FinalProof, error) {
+func (a *Aggregator) buildFinalProof(
+	ctx context.Context, prover proverInterface, proof *state.Proof,
+) (*prover.FinalProof, error) {
 	log := log.WithFields(
 		"prover", prover.Name(),
 		"proverId", prover.ID(),
@@ -971,15 +1029,18 @@ func (a *Aggregator) buildFinalProof(ctx context.Context, prover proverInterface
 	}
 
 	// mock prover sanity check
-	if string(finalProof.Public.NewStateRoot) == mockedStateRoot && string(finalProof.Public.NewLocalExitRoot) == mockedLocalExitRoot {
+	if string(finalProof.Public.NewStateRoot) == mockedStateRoot &&
+		string(finalProof.Public.NewLocalExitRoot) == mockedLocalExitRoot {
 		// This local exit root and state root come from the mock
 		// prover, use the one captured by the executor instead
 		finalDBBatch, err := a.state.GetBatch(ctx, proof.BatchNumberFinal, nil)
 		if err != nil {
 			return nil, fmt.Errorf("failed to retrieve batch with number [%d]", proof.BatchNumberFinal)
 		}
-		log.Warnf("NewLocalExitRoot and NewStateRoot look like a mock values, using values from executor instead: LER: %v, SR: %v",
-			finalDBBatch.Batch.LocalExitRoot.TerminalString(), finalDBBatch.Batch.StateRoot.TerminalString())
+		log.Warnf(
+			"NewLocalExitRoot and NewStateRoot look like a mock values, using values from executor instead: LER: %v, SR: %v",
+			finalDBBatch.Batch.LocalExitRoot.TerminalString(), finalDBBatch.Batch.StateRoot.TerminalString(),
+		)
 		finalProof.Public.NewStateRoot = finalDBBatch.Batch.StateRoot.Bytes()
 		finalProof.Public.NewLocalExitRoot = finalDBBatch.Batch.LocalExitRoot.Bytes()
 	}
@@ -993,7 +1054,12 @@ func (a *Aggregator) buildFinalProof(ctx context.Context, prover proverInterface
 
 		if common.BytesToHash(finalProof.Public.NewStateRoot).String() != finalDBBatch.Batch.StateRoot.String() {
 			for {
-				log.Errorf("State root from the final proof does not match the expected for batch %d: Proof = [%s] Expected = [%s]", proof.BatchNumberFinal, common.BytesToHash(finalProof.Public.NewStateRoot).String(), finalDBBatch.Batch.StateRoot.String())
+				log.Errorf(
+					"State root from the final proof does not match the expected for batch %d: Proof = [%s] Expected = [%s]",
+					proof.BatchNumberFinal,
+					common.BytesToHash(finalProof.Public.NewStateRoot).String(),
+					finalDBBatch.Batch.StateRoot.String(),
+				)
 				time.Sleep(a.cfg.RetryTime.Duration)
 			}
 		} else {
@@ -1022,6 +1088,7 @@ func (a *Aggregator) tryBuildFinalProof(ctx context.Context, prover proverInterf
 	var err error
 	if !a.canVerifyProof() {
 		log.Debug("Time to verify proof not reached or proof verification in progress")
+
 		return false, nil
 	}
 	log.Debug("Send final proof time reached")
@@ -1035,10 +1102,11 @@ func (a *Aggregator) tryBuildFinalProof(ctx context.Context, prover proverInterf
 		// we don't have a proof generating at the moment, check if we
 		// have a proof ready to verify
 
-		proof, err = a.getAndLockProofReadyToVerify(ctx, prover, lastVerifiedBatchNumber)
+		proof, err = a.getAndLockProofReadyToVerify(ctx, lastVerifiedBatchNumber)
 		if errors.Is(err, state.ErrNotFound) {
 			// nothing to verify, swallow the error
 			log.Debug("No proof ready to verify")
+
 			return false, nil
 		}
 		if err != nil {
@@ -1077,6 +1145,7 @@ func (a *Aggregator) tryBuildFinalProof(ctx context.Context, prover proverInterf
 	if err != nil {
 		err = fmt.Errorf("failed to build final proof, %w", err)
 		log.Error(FirstToUpper(err.Error()))
+
 		return false, err
 	}
 
@@ -1094,26 +1163,40 @@ func (a *Aggregator) tryBuildFinalProof(ctx context.Context, prover proverInterf
 	}
 
 	log.Debug("tryBuildFinalProof end")
+
 	return true, nil
 }
 
-func (a *Aggregator) validateEligibleFinalProof(ctx context.Context, proof *state.Proof, lastVerifiedBatchNum uint64) (bool, error) {
+func (a *Aggregator) validateEligibleFinalProof(
+	ctx context.Context, proof *state.Proof, lastVerifiedBatchNum uint64,
+) (bool, error) {
 	batchNumberToVerify := lastVerifiedBatchNum + 1
 
 	if proof.BatchNumber != batchNumberToVerify {
 		if proof.BatchNumber < batchNumberToVerify && proof.BatchNumberFinal >= batchNumberToVerify {
 			// We have a proof that contains some batches below the last batch verified, anyway can be eligible as final proof
-			log.Warnf("Proof %d-%d contains some batches lower than last batch verified %d. Check anyway if it is eligible", proof.BatchNumber, proof.BatchNumberFinal, lastVerifiedBatchNum)
+			log.Warnf(
+				"Proof %d-%d contains some batches lower than last batch verified %d. Check anyway if it is eligible",
+				proof.BatchNumber, proof.BatchNumberFinal, lastVerifiedBatchNum,
+			)
 		} else if proof.BatchNumberFinal < batchNumberToVerify {
 			// We have a proof that contains batches below that the last batch verified, we need to delete this proof
-			log.Warnf("Proof %d-%d lower than next batch to verify %d. Deleting it", proof.BatchNumber, proof.BatchNumberFinal, batchNumberToVerify)
+			log.Warnf(
+				"Proof %d-%d lower than next batch to verify %d. Deleting it",
+				proof.BatchNumber, proof.BatchNumberFinal, batchNumberToVerify,
+			)
 			err := a.state.DeleteGeneratedProofs(ctx, proof.BatchNumber, proof.BatchNumberFinal, nil)
 			if err != nil {
 				return false, fmt.Errorf("failed to delete discarded proof, err: %w", err)
 			}
+
 			return false, nil
 		} else {
-			log.Debugf("Proof batch number %d is not the following to last verfied batch number %d", proof.BatchNumber, lastVerifiedBatchNum)
+			log.Debugf(
+				"Proof batch number %d is not the following to last verfied batch number %d",
+				proof.BatchNumber, lastVerifiedBatchNum,
+			)
+
 			return false, nil
 		}
 	}
@@ -1123,13 +1206,20 @@ func (a *Aggregator) validateEligibleFinalProof(ctx context.Context, proof *stat
 		return false, fmt.Errorf("failed to check if proof contains complete sequences, %w", err)
 	}
 	if !bComplete {
-		log.Infof("Recursive proof %d-%d not eligible to be verified: not containing complete sequences", proof.BatchNumber, proof.BatchNumberFinal)
+		log.Infof(
+			"Recursive proof %d-%d not eligible to be verified: not containing complete sequences",
+			proof.BatchNumber, proof.BatchNumberFinal,
+		)
+
 		return false, nil
 	}
+
 	return true, nil
 }
 
-func (a *Aggregator) getAndLockProofReadyToVerify(ctx context.Context, prover proverInterface, lastVerifiedBatchNum uint64) (*state.Proof, error) {
+func (a *Aggregator) getAndLockProofReadyToVerify(
+	ctx context.Context, lastVerifiedBatchNum uint64,
+) (*state.Proof, error) {
 	a.stateDBMutex.Lock()
 	defer a.stateDBMutex.Unlock()
 
@@ -1155,6 +1245,7 @@ func (a *Aggregator) unlockProofsToAggregate(ctx context.Context, proof1 *state.
 	dbTx, err := a.state.BeginStateTransaction(ctx)
 	if err != nil {
 		log.Warnf("Failed to begin transaction to release proof aggregation state, err: %v", err)
+
 		return err
 	}
 
@@ -1169,8 +1260,10 @@ func (a *Aggregator) unlockProofsToAggregate(ctx context.Context, proof1 *state.
 		if err := dbTx.Rollback(ctx); err != nil {
 			err := fmt.Errorf("failed to rollback proof aggregation state: %w", err)
 			log.Error(FirstToUpper(err.Error()))
+
 			return err
 		}
+
 		return fmt.Errorf("failed to release proof aggregation state: %w", err)
 	}
 
@@ -1182,7 +1275,9 @@ func (a *Aggregator) unlockProofsToAggregate(ctx context.Context, proof1 *state.
 	return nil
 }
 
-func (a *Aggregator) getAndLockProofsToAggregate(ctx context.Context, prover proverInterface) (*state.Proof, *state.Proof, error) {
+func (a *Aggregator) getAndLockProofsToAggregate(
+	ctx context.Context, prover proverInterface,
+) (*state.Proof, *state.Proof, error) {
 	log := log.WithFields(
 		"prover", prover.Name(),
 		"proverId", prover.ID(),
@@ -1201,6 +1296,7 @@ func (a *Aggregator) getAndLockProofsToAggregate(ctx context.Context, prover pro
 	dbTx, err := a.state.BeginStateTransaction(ctx)
 	if err != nil {
 		log.Errorf("Failed to begin transaction to set proof aggregation state, err: %v", err)
+
 		return nil, nil, err
 	}
 
@@ -1216,8 +1312,10 @@ func (a *Aggregator) getAndLockProofsToAggregate(ctx context.Context, prover pro
 		if err := dbTx.Rollback(ctx); err != nil {
 			err := fmt.Errorf("failed to rollback proof aggregation state %w", err)
 			log.Error(FirstToUpper(err.Error()))
+
 			return nil, nil, err
 		}
+
 		return nil, nil, fmt.Errorf("failed to set proof aggregation state %w", err)
 	}
 
@@ -1244,6 +1342,7 @@ func (a *Aggregator) tryAggregateProofs(ctx context.Context, prover proverInterf
 	if errors.Is(err0, state.ErrNotFound) {
 		// nothing to aggregate, swallow the error
 		log.Debug("Nothing to aggregate")
+
 		return false, nil
 	}
 	if err0 != nil {
@@ -1265,7 +1364,10 @@ func (a *Aggregator) tryAggregateProofs(ctx context.Context, prover proverInterf
 		log.Debug("tryAggregateProofs end")
 	}()
 
-	log.Infof("Aggregating proofs: %d-%d and %d-%d", proof1.BatchNumber, proof1.BatchNumberFinal, proof2.BatchNumber, proof2.BatchNumberFinal)
+	log.Infof(
+		"Aggregating proofs: %d-%d and %d-%d",
+		proof1.BatchNumber, proof1.BatchNumberFinal, proof2.BatchNumber, proof2.BatchNumberFinal,
+	)
 
 	batches := fmt.Sprintf("%d-%d", proof1.BatchNumber, proof2.BatchNumberFinal)
 	log = log.WithFields("batches", batches)
@@ -1278,6 +1380,7 @@ func (a *Aggregator) tryAggregateProofs(ctx context.Context, prover proverInterf
 	if err != nil {
 		err = fmt.Errorf("failed to serialize input prover, %w", err)
 		log.Error(FirstToUpper(err.Error()))
+
 		return false, err
 	}
 
@@ -1293,6 +1396,7 @@ func (a *Aggregator) tryAggregateProofs(ctx context.Context, prover proverInterf
 	if err != nil {
 		err = fmt.Errorf("failed to get aggregated proof id, %w", err)
 		log.Error(FirstToUpper(err.Error()))
+
 		return false, err
 	}
 
@@ -1305,6 +1409,7 @@ func (a *Aggregator) tryAggregateProofs(ctx context.Context, prover proverInterf
 	if err != nil {
 		err = fmt.Errorf("failed to get aggregated proof from prover, %w", err)
 		log.Error(FirstToUpper(err.Error()))
+
 		return false, err
 	}
 
@@ -1318,6 +1423,7 @@ func (a *Aggregator) tryAggregateProofs(ctx context.Context, prover proverInterf
 	if err != nil {
 		err = fmt.Errorf("failed to begin transaction to update proof aggregation state, %w", err)
 		log.Error(FirstToUpper(err.Error()))
+
 		return false, err
 	}
 
@@ -1326,10 +1432,12 @@ func (a *Aggregator) tryAggregateProofs(ctx context.Context, prover proverInterf
 		if err := dbTx.Rollback(ctx); err != nil {
 			err := fmt.Errorf("failed to rollback proof aggregation state, %w", err)
 			log.Error(FirstToUpper(err.Error()))
+
 			return false, err
 		}
 		err = fmt.Errorf("failed to delete previously aggregated proofs, %w", err)
 		log.Error(FirstToUpper(err.Error()))
+
 		return false, err
 	}
 
@@ -1341,10 +1449,12 @@ func (a *Aggregator) tryAggregateProofs(ctx context.Context, prover proverInterf
 		if err := dbTx.Rollback(ctx); err != nil {
 			err := fmt.Errorf("failed to rollback proof aggregation state, %w", err)
 			log.Error(FirstToUpper(err.Error()))
+
 			return false, err
 		}
 		err = fmt.Errorf("failed to store the recursive proof, %w", err)
 		log.Error(FirstToUpper(err.Error()))
+
 		return false, err
 	}
 
@@ -1352,6 +1462,7 @@ func (a *Aggregator) tryAggregateProofs(ctx context.Context, prover proverInterf
 	if err != nil {
 		err = fmt.Errorf("failed to store the recursive proof, %w", err)
 		log.Error(FirstToUpper(err.Error()))
+
 		return false, err
 	}
 
@@ -1376,6 +1487,7 @@ func (a *Aggregator) tryAggregateProofs(ctx context.Context, prover proverInterf
 		if err != nil {
 			err = fmt.Errorf("failed to store batch proof result, %w", err)
 			log.Error(FirstToUpper(err.Error()))
+
 			return false, err
 		}
 	}
@@ -1392,7 +1504,9 @@ func (a *Aggregator) getVerifiedBatchAccInputHash(ctx context.Context, batchNumb
 	return &accInputHash, nil
 }
 
-func (a *Aggregator) getAndLockBatchToProve(ctx context.Context, prover proverInterface) (*state.Batch, []byte, *state.Proof, error) {
+func (a *Aggregator) getAndLockBatchToProve(
+	ctx context.Context, prover proverInterface,
+) (*state.Batch, []byte, *state.Proof, error) {
 	proverID := prover.ID()
 	proverName := prover.Name()
 
@@ -1420,6 +1534,7 @@ func (a *Aggregator) getAndLockBatchToProve(ctx context.Context, prover proverIn
 		proofExists, err = a.state.CheckProofExistsForBatch(ctx, batchNumberToVerify, nil)
 		if err != nil {
 			log.Infof("Error checking proof exists for batch %d", batchNumberToVerify)
+
 			return nil, nil, nil, err
 		}
 	}
@@ -1433,6 +1548,7 @@ func (a *Aggregator) getAndLockBatchToProve(ctx context.Context, prover proverIn
 	// Not found, so it it not possible to verify the batch yet
 	if sequence == nil || errors.Is(err, entities.ErrNotFound) {
 		log.Infof("No sequence found for batch %d", batchNumberToVerify)
+
 		return nil, nil, nil, state.ErrNotFound
 	}
 
@@ -1447,18 +1563,21 @@ func (a *Aggregator) getAndLockBatchToProve(ctx context.Context, prover proverIn
 		if errors.Is(err, state.ErrNotFound) {
 			log.Infof("Batch (%d) is not yet in DB", batchNumberToVerify)
 		}
+
 		return nil, nil, nil, err
 	}
 
 	// Check if the witness is already in the DB
 	if len(dbBatch.Witness) == 0 {
 		log.Infof("Witness for batch %d is not yet in DB", batchNumberToVerify)
+
 		return nil, nil, nil, state.ErrNotFound
 	}
 
 	err = a.state.AddSequence(ctx, stateSequence, nil)
 	if err != nil {
 		log.Infof("Error storing sequence for batch %d", batchNumberToVerify)
+
 		return nil, nil, nil, err
 	}
 
@@ -1472,11 +1591,13 @@ func (a *Aggregator) getAndLockBatchToProve(ctx context.Context, prover proverIn
 	isProfitable, err := a.profitabilityChecker.IsProfitable(ctx, big.NewInt(0))
 	if err != nil {
 		log.Errorf("Failed to check aggregator profitability, err: %v", err)
+
 		return nil, nil, nil, err
 	}
 
 	if !isProfitable {
 		log.Infof("Batch is not profitable, pol collateral %d", big.NewInt(0))
+
 		return nil, nil, nil, err
 	}
 
@@ -1493,6 +1614,7 @@ func (a *Aggregator) getAndLockBatchToProve(ctx context.Context, prover proverIn
 	err = a.state.AddGeneratedProof(ctx, proof, nil)
 	if err != nil {
 		log.Errorf("Failed to add batch proof, err: %v", err)
+
 		return nil, nil, nil, err
 	}
 
@@ -1511,6 +1633,7 @@ func (a *Aggregator) tryGenerateBatchProof(ctx context.Context, prover proverInt
 	if errors.Is(err0, state.ErrNotFound) || errors.Is(err0, entities.ErrNotFound) {
 		// nothing to proof, swallow the error
 		log.Debug("Nothing to generate proof")
+
 		return false, nil
 	}
 	if err0 != nil {
@@ -1540,6 +1663,7 @@ func (a *Aggregator) tryGenerateBatchProof(ctx context.Context, prover proverInt
 	if err != nil {
 		err = fmt.Errorf("failed to build input prover, %w", err)
 		log.Error(FirstToUpper(err.Error()))
+
 		return false, err
 	}
 
@@ -1550,6 +1674,7 @@ func (a *Aggregator) tryGenerateBatchProof(ctx context.Context, prover proverInt
 	if err != nil {
 		err = fmt.Errorf("failed to get batch proof id, %w", err)
 		log.Error(FirstToUpper(err.Error()))
+
 		return false, err
 	}
 
@@ -1561,6 +1686,7 @@ func (a *Aggregator) tryGenerateBatchProof(ctx context.Context, prover proverInt
 	if err != nil {
 		err = fmt.Errorf("failed to get proof from prover, %w", err)
 		log.Error(FirstToUpper(err.Error()))
+
 		return false, err
 	}
 
@@ -1569,7 +1695,10 @@ func (a *Aggregator) tryGenerateBatchProof(ctx context.Context, prover proverInt
 	// Sanity Check: state root from the proof must match the one from the batch
 	if a.cfg.BatchProofSanityCheckEnabled && (stateRoot != common.Hash{}) && (stateRoot != batchToProve.StateRoot) {
 		for {
-			log.Errorf("State root from the proof does not match the expected for batch %d: Proof = [%s] Expected = [%s]", batchToProve.BatchNumber, stateRoot.String(), batchToProve.StateRoot.String())
+			log.Errorf(
+				"State root from the proof does not match the expected for batch %d: Proof = [%s] Expected = [%s]",
+				batchToProve.BatchNumber, stateRoot.String(), batchToProve.StateRoot.String(),
+			)
 			time.Sleep(a.cfg.RetryTime.Duration)
 		}
 	} else {
@@ -1597,6 +1726,7 @@ func (a *Aggregator) tryGenerateBatchProof(ctx context.Context, prover proverInt
 		if err != nil {
 			err = fmt.Errorf("failed to store batch proof result, %w", err)
 			log.Error(FirstToUpper(err.Error()))
+
 			return false, err
 		}
 	}
@@ -1609,10 +1739,12 @@ func (a *Aggregator) tryGenerateBatchProof(ctx context.Context, prover proverInt
 func (a *Aggregator) canVerifyProof() bool {
 	a.timeSendFinalProofMutex.RLock()
 	defer a.timeSendFinalProofMutex.RUnlock()
+
 	return a.timeSendFinalProof.Before(time.Now()) && !a.verifyingProof
 }
 
-// startProofVerification sets to true the verifyingProof variable to indicate that there is a proof verification in progress
+// startProofVerification sets the verifyingProof variable to true
+// to indicate that there is a proof verification in progress.
 func (a *Aggregator) startProofVerification() {
 	a.timeSendFinalProofMutex.Lock()
 	defer a.timeSendFinalProofMutex.Unlock()
@@ -1633,7 +1765,9 @@ func (a *Aggregator) resetVerifyProofTime() {
 	a.timeSendFinalProof = time.Now().Add(a.cfg.VerifyProofInterval.Duration)
 }
 
-func (a *Aggregator) buildInputProver(ctx context.Context, batchToVerify *state.Batch, witness []byte) (*prover.StatelessInputProver, error) {
+func (a *Aggregator) buildInputProver(
+	ctx context.Context, batchToVerify *state.Batch, witness []byte,
+) (*prover.StatelessInputProver, error) {
 	isForcedBatch := false
 	batchRawData := &state.BatchRawV2{}
 	var err error
@@ -1644,6 +1778,7 @@ func (a *Aggregator) buildInputProver(ctx context.Context, batchToVerify *state.
 		batchRawData, err = state.DecodeBatchV2(batchToVerify.BatchL2Data)
 		if err != nil {
 			log.Errorf("Failed to decode batch data, err: %v", err)
+
 			return nil, err
 		}
 	}
@@ -1652,7 +1787,7 @@ func (a *Aggregator) buildInputProver(ctx context.Context, batchToVerify *state.
 	forcedBlockhashL1 := common.Hash{}
 	l1InfoRoot := batchToVerify.L1InfoRoot.Bytes()
 	if !isForcedBatch {
-		tree, err := l1infotree.NewL1InfoTree(32, [][32]byte{}) // nolint:gomnd
+		tree, err := l1infotree.NewL1InfoTree(32, [][32]byte{}) //nolint:gomnd
 		if err != nil {
 			return nil, err
 		}
@@ -1673,6 +1808,7 @@ func (a *Aggregator) buildInputProver(ctx context.Context, batchToVerify *state.
 				leaves, err := a.l1Syncr.GetL1InfoTreeLeaves(ctx, []uint32{l2blockRaw.IndexL1InfoTree})
 				if err != nil {
 					log.Errorf("Error getting l1InfoTreeLeaf: %v", err)
+
 					return nil, err
 				}
 
@@ -1683,14 +1819,19 @@ func (a *Aggregator) buildInputProver(ctx context.Context, batchToVerify *state.
 				smtProof, calculatedL1InfoRoot, err := tree.ComputeMerkleProof(l2blockRaw.IndexL1InfoTree, aLeaves)
 				if err != nil {
 					log.Errorf("Error computing merkle proof: %v", err)
+
 					return nil, err
 				}
 
 				if batchToVerify.L1InfoRoot != calculatedL1InfoRoot {
-					return nil, fmt.Errorf("error: l1InfoRoot mismatch. L1InfoRoot: %s, calculatedL1InfoRoot: %s. l1InfoTreeIndex: %d", batchToVerify.L1InfoRoot.String(), calculatedL1InfoRoot.String(), l2blockRaw.IndexL1InfoTree)
+					return nil, fmt.Errorf(
+						"error: l1InfoRoot mismatch. L1InfoRoot: %s, calculatedL1InfoRoot: %s. l1InfoTreeIndex: %d",
+						batchToVerify.L1InfoRoot.String(), calculatedL1InfoRoot.String(), l2blockRaw.IndexL1InfoTree,
+					)
 				}
 
 				protoProof := make([][]byte, len(smtProof))
+
 				for i, proof := range smtProof {
 					tmpProof := proof
 					protoProof[i] = tmpProof[:]
@@ -1710,11 +1851,13 @@ func (a *Aggregator) buildInputProver(ctx context.Context, batchToVerify *state.
 			virtualBatch, err := a.l1Syncr.GetVirtualBatchByBatchNumber(ctx, batchToVerify.BatchNumber)
 			if err != nil {
 				log.Errorf("Error getting virtual batch: %v", err)
+
 				return nil, err
 			}
 			l1Block, err := a.l1Syncr.GetL1BlockByNumber(ctx, virtualBatch.BlockNumber)
 			if err != nil {
 				log.Errorf("Error getting l1 block: %v", err)
+
 				return nil, err
 			}
 
@@ -1752,6 +1895,7 @@ func (a *Aggregator) buildInputProver(ctx context.Context, batchToVerify *state.
 	}
 
 	printInputProver(inputProver)
+
 	return inputProver, nil
 }
 
@@ -1777,6 +1921,7 @@ func getWitness(batchNumber uint64, URL string, fullWitness bool) ([]byte, error
 		if response.Error.Message == "busy" {
 			return nil, errBusy
 		}
+
 		return nil, fmt.Errorf("error from witness for batch %d: %v", batchNumber, response.Error)
 	}
 
@@ -1820,8 +1965,11 @@ func newHealthChecker() *healthChecker {
 
 // Check returns the current status of the server for unary gRPC health requests,
 // for now if the server is up and able to respond we will always return SERVING.
-func (hc *healthChecker) Check(ctx context.Context, req *grpchealth.HealthCheckRequest) (*grpchealth.HealthCheckResponse, error) {
+func (hc *healthChecker) Check(
+	ctx context.Context, req *grpchealth.HealthCheckRequest,
+) (*grpchealth.HealthCheckResponse, error) {
 	log.Info("Serving the Check request for health check")
+
 	return &grpchealth.HealthCheckResponse{
 		Status: grpchealth.HealthCheckResponse_SERVING,
 	}, nil
@@ -1831,6 +1979,7 @@ func (hc *healthChecker) Check(ctx context.Context, req *grpchealth.HealthCheckR
 // for now if the server is up and able to respond we will always return SERVING.
 func (hc *healthChecker) Watch(req *grpchealth.HealthCheckRequest, server grpchealth.Health_WatchServer) error {
 	log.Info("Serving the Watch request for health check")
+
 	return server.Send(&grpchealth.HealthCheckResponse{
 		Status: grpchealth.HealthCheckResponse_SERVING,
 	})
@@ -1912,5 +2061,6 @@ func (a *Aggregator) cleanupLockedProofs() {
 func FirstToUpper(s string) string {
 	runes := []rune(s)
 	runes[0] = unicode.ToUpper(runes[0])
+
 	return string(runes)
 }
diff --git a/aggregator/config.go b/aggregator/config.go
index 382801873..e17a24d1f 100644
--- a/aggregator/config.go
+++ b/aggregator/config.go
@@ -74,7 +74,9 @@ type Config struct {
 	// this parameter is used for the base tx profitability checker
 	TxProfitabilityMinReward TokenAmountWithDecimals `mapstructure:"TxProfitabilityMinReward"`
 
-	// IntervalAfterWhichBatchConsolidateAnyway this is interval for the main sequencer, that will check if there is no transactions
+	// IntervalAfterWhichBatchConsolidateAnyway is the interval duration for the main sequencer to check
+	// if there are no transactions. If there are no transactions in this interval, the sequencer will
+	// consolidate the batch anyway.
 	IntervalAfterWhichBatchConsolidateAnyway types.Duration `mapstructure:"IntervalAfterWhichBatchConsolidateAnyway"`
 
 	// BatchProofSanityCheckEnabled is a flag to enable the sanity check of the batch proof
@@ -87,7 +89,7 @@ type Config struct {
 	ChainID uint64
 
 	// ForkID is the L2 ForkID provided by the Network Config
-	ForkId uint64 `mapstructure:"ForkId"`
+	ForkId uint64 `mapstructure:"ForkId"` //nolint:stylecheck
 
 	// SenderAddress defines which private key the eth tx manager needs to use
 	// to sign the L1 txs
@@ -137,7 +139,8 @@ type Config struct {
 	// Synchornizer config
 	Synchronizer syncronizerConfig.Config `mapstructure:"Synchronizer"`
 
-	// SettlementBackend configuration defines how a final ZKP should be settled. Directly to L1 or over the Beethoven service.
+	// SettlementBackend configuration defines how a final ZKP should be settled.
+	// It can be settled directly to L1 or over Agglayer.
 	SettlementBackend SettlementBackend `mapstructure:"SettlementBackend" jsonschema:"enum=agglayer,enum=l1"`
 
 	// SequencerPrivateKey Private key of the trusted sequencer
@@ -178,5 +181,6 @@ func newKeyFromKeystore(cfg types.KeystoreFileConfig) (*ecdsa.PrivateKey, error)
 	if err != nil {
 		return nil, err
 	}
+
 	return key.PrivateKey, nil
 }
diff --git a/aggregator/db/db.go b/aggregator/db/db.go
index b9112f530..8f05e8452 100644
--- a/aggregator/db/db.go
+++ b/aggregator/db/db.go
@@ -10,9 +10,13 @@ import (
 
 // NewSQLDB creates a new SQL DB
 func NewSQLDB(cfg Config) (*pgxpool.Pool, error) {
-	config, err := pgxpool.ParseConfig(fmt.Sprintf("postgres://%s:%s@%s:%s/%s?pool_max_conns=%d", cfg.User, cfg.Password, cfg.Host, cfg.Port, cfg.Name, cfg.MaxConns))
+	config, err := pgxpool.ParseConfig(fmt.Sprintf(
+		"postgres://%s:%s@%s:%s/%s?pool_max_conns=%d",
+		cfg.User, cfg.Password, cfg.Host, cfg.Port, cfg.Name, cfg.MaxConns,
+	))
 	if err != nil {
 		log.Errorf("Unable to parse DB config: %v\n", err)
+
 		return nil, err
 	}
 
@@ -23,6 +27,7 @@ func NewSQLDB(cfg Config) (*pgxpool.Pool, error) {
 	conn, err := pgxpool.ConnectConfig(context.Background(), config)
 	if err != nil {
 		log.Errorf("Unable to connect to database: %v\n", err)
+
 		return nil, err
 	}
 
diff --git a/aggregator/db/migrations.go b/aggregator/db/migrations.go
index 8aeda2e92..20e8c29ae 100644
--- a/aggregator/db/migrations.go
+++ b/aggregator/db/migrations.go
@@ -30,6 +30,7 @@ func init() {
 // RunMigrationsUp runs migrate-up for the given config.
 func RunMigrationsUp(cfg Config, name string) error {
 	log.Info("running migrations up")
+
 	return runMigrations(cfg, name, migrate.Up)
 }
 
@@ -41,6 +42,7 @@ func CheckMigrations(cfg Config, name string) error {
 // RunMigrationsDown runs migrate-down for the given config.
 func RunMigrationsDown(cfg Config, name string) error {
 	log.Info("running migrations down")
+
 	return runMigrations(cfg, name, migrate.Down)
 }
 
@@ -48,7 +50,10 @@ func RunMigrationsDown(cfg Config, name string) error {
 // the database updated with the latest changes in either direction,
 // up or down.
 func runMigrations(cfg Config, name string, direction migrate.MigrationDirection) error {
-	c, err := pgx.ParseConfig(fmt.Sprintf("postgres://%s:%s@%s:%s/%s", cfg.User, cfg.Password, cfg.Host, cfg.Port, cfg.Name))
+	c, err := pgx.ParseConfig(fmt.Sprintf(
+		"postgres://%s:%s@%s:%s/%s",
+		cfg.User, cfg.Password, cfg.Host, cfg.Port, cfg.Name,
+	))
 	if err != nil {
 		return err
 	}
@@ -64,17 +69,22 @@ func runMigrations(cfg Config, name string, direction migrate.MigrationDirection
 		FileSystem: embedMigration,
 		Root:       "migrations",
 	}
+
 	nMigrations, err := migrate.Exec(db, "postgres", migrations, direction)
 	if err != nil {
 		return err
 	}
 
 	log.Info("successfully ran ", nMigrations, " migrations")
+
 	return nil
 }
 
 func checkMigrations(cfg Config, name string) error {
-	c, err := pgx.ParseConfig(fmt.Sprintf("postgres://%s:%s@%s:%s/%s", cfg.User, cfg.Password, cfg.Host, cfg.Port, cfg.Name))
+	c, err := pgx.ParseConfig(fmt.Sprintf(
+		"postgres://%s:%s@%s:%s/%s",
+		cfg.User, cfg.Password, cfg.Host, cfg.Port, cfg.Name,
+	))
 	if err != nil {
 		return err
 	}
@@ -87,9 +97,11 @@ func checkMigrations(cfg Config, name string) error {
 	}
 
 	migrationSource := &migrate.EmbedFileSystemMigrationSource{FileSystem: embedMigration}
+
 	migrations, err := migrationSource.FindMigrations()
 	if err != nil {
 		log.Errorf("error getting migrations from source: %v", err)
+
 		return err
 	}
 
@@ -105,12 +117,17 @@ func checkMigrations(cfg Config, name string) error {
 	err = db.QueryRow(query).Scan(&actual)
 	if err != nil {
 		log.Error("error getting migrations count: ", err)
+
 		return err
 	}
 	if expected == actual {
 		log.Infof("Found %d migrations as expected", actual)
 	} else {
-		return fmt.Errorf("error the component needs to run %d migrations before starting. DB only contains %d migrations", expected, actual)
+		return fmt.Errorf(
+			"error the component needs to run %d migrations before starting. DB only contains %d migrations",
+			expected, actual,
+		)
 	}
+
 	return nil
 }
diff --git a/aggregator/interfaces.go b/aggregator/interfaces.go
index 6b5ba63a0..b231de350 100644
--- a/aggregator/interfaces.go
+++ b/aggregator/interfaces.go
@@ -30,7 +30,9 @@ type proverInterface interface {
 type etherman interface {
 	GetRollupId() uint32
 	GetLatestVerifiedBatchNum() (uint64, error)
-	BuildTrustedVerifyBatchesTxData(lastVerifiedBatch, newVerifiedBatch uint64, inputs *ethmanTypes.FinalProofInputs, beneficiary common.Address) (to *common.Address, data []byte, err error)
+	BuildTrustedVerifyBatchesTxData(
+		lastVerifiedBatch, newVerifiedBatch uint64, inputs *ethmanTypes.FinalProofInputs, beneficiary common.Address,
+	) (to *common.Address, data []byte, err error)
 	GetLatestBlockHeader(ctx context.Context) (*types.Header, error)
 	GetBatchAccInputHash(ctx context.Context, batchNumber uint64) (common.Hash, error)
 }
diff --git a/aggregator/profitabilitychecker.go b/aggregator/profitabilitychecker.go
index 27e3f7056..225cedb9c 100644
--- a/aggregator/profitabilitychecker.go
+++ b/aggregator/profitabilitychecker.go
@@ -24,7 +24,9 @@ type TxProfitabilityCheckerBase struct {
 }
 
 // NewTxProfitabilityCheckerBase init base tx profitability checker
-func NewTxProfitabilityCheckerBase(state stateInterface, interval time.Duration, minReward *big.Int) *TxProfitabilityCheckerBase {
+func NewTxProfitabilityCheckerBase(
+	state stateInterface, interval time.Duration, minReward *big.Int,
+) *TxProfitabilityCheckerBase {
 	return &TxProfitabilityCheckerBase{
 		State:                             state,
 		IntervalAfterWhichBatchSentAnyway: interval,
@@ -43,7 +45,6 @@ func (pc *TxProfitabilityCheckerBase) IsProfitable(ctx context.Context, polColla
 	//		return true, nil
 	//	}
 	//}
-
 	return polCollateral.Cmp(pc.MinReward) >= 0, nil
 }
 
@@ -72,12 +73,12 @@ func (pc *TxProfitabilityCheckerAcceptAll) IsProfitable(ctx context.Context, pol
 	//		return true, nil
 	//	}
 	//}
-
 	return true, nil
 }
 
 // TODO: now it's impossible to check, when batch got consolidated, bcs it's not saved
-//func isConsolidatedBatchAppeared(ctx context.Context, state stateInterface, intervalAfterWhichBatchConsolidatedAnyway time.Duration) (bool, error) {
+//func isConsolidatedBatchAppeared(ctx context.Context, state stateInterface,
+//  intervalAfterWhichBatchConsolidatedAnyway time.Duration) (bool, error) {
 //	batch, err := state.GetLastVerifiedBatch(ctx, nil)
 //	if err != nil {
 //		return false, fmt.Errorf("failed to get last verified batch, err: %v", err)
diff --git a/aggregator/prover/prover.go b/aggregator/prover/prover.go
index fd9b1e7d3..285eadb27 100644
--- a/aggregator/prover/prover.go
+++ b/aggregator/prover/prover.go
@@ -23,13 +23,13 @@ const (
 )
 
 var (
-	ErrBadProverResponse    = errors.New("Prover returned wrong type for response")  //nolint:revive
-	ErrProverInternalError  = errors.New("Prover returned INTERNAL_ERROR response")  //nolint:revive
-	ErrProverCompletedError = errors.New("Prover returned COMPLETED_ERROR response") //nolint:revive
-	ErrBadRequest           = errors.New("Prover returned ERROR for a bad request")  //nolint:revive
-	ErrUnspecified          = errors.New("Prover returned an UNSPECIFIED response")  //nolint:revive
-	ErrUnknown              = errors.New("Prover returned an unknown response")      //nolint:revive
-	ErrProofCanceled        = errors.New("Proof has been canceled")                  //nolint:revive
+	ErrBadProverResponse    = errors.New("prover returned wrong type for response")  //nolint:revive
+	ErrProverInternalError  = errors.New("prover returned INTERNAL_ERROR response")  //nolint:revive
+	ErrProverCompletedError = errors.New("prover returned COMPLETED_ERROR response") //nolint:revive
+	ErrBadRequest           = errors.New("prover returned ERROR for a bad request")  //nolint:revive
+	ErrUnspecified          = errors.New("prover returned an UNSPECIFIED response")  //nolint:revive
+	ErrUnknown              = errors.New("prover returned an unknown response")      //nolint:revive
+	ErrProofCanceled        = errors.New("proof has been canceled")                  //nolint:revive
 )
 
 // Prover abstraction of the grpc prover client.
@@ -42,18 +42,22 @@ type Prover struct {
 }
 
 // New returns a new Prover instance.
-func New(stream AggregatorService_ChannelServer, addr net.Addr, proofStatePollingInterval types.Duration) (*Prover, error) {
+func New(
+	stream AggregatorService_ChannelServer, addr net.Addr, proofStatePollingInterval types.Duration,
+) (*Prover, error) {
 	p := &Prover{
 		stream:                    stream,
 		address:                   addr,
 		proofStatePollingInterval: proofStatePollingInterval,
 	}
+
 	status, err := p.Status()
 	if err != nil {
-		return nil, fmt.Errorf("Failed to retrieve prover id %w", err)
+		return nil, fmt.Errorf("failed to retrieve prover id %w", err)
 	}
 	p.name = status.ProverName
 	p.id = status.ProverId
+
 	return p, nil
 }
 
@@ -68,6 +72,7 @@ func (p *Prover) Addr() string {
 	if p.address == nil {
 		return ""
 	}
+
 	return p.address.String()
 }
 
@@ -85,6 +90,7 @@ func (p *Prover) Status() (*GetStatusResponse, error) {
 	if msg, ok := res.Response.(*ProverMessage_GetStatusResponse); ok {
 		return msg.GetStatusResponse, nil
 	}
+
 	return nil, fmt.Errorf("%w, wanted %T, got %T", ErrBadProverResponse, &ProverMessage_GetStatusResponse{}, res.Response)
 }
 
@@ -94,6 +100,7 @@ func (p *Prover) IsIdle() (bool, error) {
 	if err != nil {
 		return false, err
 	}
+
 	return status.Status == GetStatusResponse_STATUS_IDLE, nil
 }
 
@@ -102,6 +109,7 @@ func (p *Prover) SupportsForkID(forkID uint64) bool {
 	status, err := p.Status()
 	if err != nil {
 		log.Warnf("Error asking status for prover ID %s: %v", p.ID(), err)
+
 		return false
 	}
 
@@ -126,19 +134,34 @@ func (p *Prover) BatchProof(input *StatelessInputProver) (*string, error) {
 	if msg, ok := res.Response.(*ProverMessage_GenBatchProofResponse); ok {
 		switch msg.GenBatchProofResponse.Result {
 		case Result_RESULT_UNSPECIFIED:
-			return nil, fmt.Errorf("failed to generate proof %s, %w, input %v", msg.GenBatchProofResponse.String(), ErrUnspecified, input)
+			return nil, fmt.Errorf(
+				"failed to generate proof %s, %w, input %v",
+				msg.GenBatchProofResponse.String(), ErrUnspecified, input,
+			)
 		case Result_RESULT_OK:
 			return &msg.GenBatchProofResponse.Id, nil
 		case Result_RESULT_ERROR:
-			return nil, fmt.Errorf("failed to generate proof %s, %w, input %v", msg.GenBatchProofResponse.String(), ErrBadRequest, input)
+			return nil, fmt.Errorf(
+				"failed to generate proof %s, %w, input %v",
+				msg.GenBatchProofResponse.String(), ErrBadRequest, input,
+			)
 		case Result_RESULT_INTERNAL_ERROR:
-			return nil, fmt.Errorf("failed to generate proof %s, %w, input %v", msg.GenBatchProofResponse.String(), ErrProverInternalError, input)
+			return nil, fmt.Errorf(
+				"failed to generate proof %s, %w, input %v",
+				msg.GenBatchProofResponse.String(), ErrProverInternalError, input,
+			)
 		default:
-			return nil, fmt.Errorf("failed to generate proof %s, %w,input %v", msg.GenBatchProofResponse.String(), ErrUnknown, input)
+			return nil, fmt.Errorf(
+				"failed to generate proof %s, %w,input %v",
+				msg.GenBatchProofResponse.String(), ErrUnknown, input,
+			)
 		}
 	}
 
-	return nil, fmt.Errorf("%w, wanted %T, got %T", ErrBadProverResponse, &ProverMessage_GenBatchProofResponse{}, res.Response)
+	return nil, fmt.Errorf(
+		"%w, wanted %T, got %T",
+		ErrBadProverResponse, &ProverMessage_GenBatchProofResponse{}, res.Response,
+	)
 }
 
 // AggregatedProof instructs the prover to generate an aggregated proof from
@@ -176,7 +199,10 @@ func (p *Prover) AggregatedProof(inputProof1, inputProof2 string) (*string, erro
 		}
 	}
 
-	return nil, fmt.Errorf("%w, wanted %T, got %T", ErrBadProverResponse, &ProverMessage_GenAggregatedProofResponse{}, res.Response)
+	return nil, fmt.Errorf(
+		"%w, wanted %T, got %T",
+		ErrBadProverResponse, &ProverMessage_GenAggregatedProofResponse{}, res.Response,
+	)
 }
 
 // FinalProof instructs the prover to generate a final proof for the given
@@ -213,7 +239,11 @@ func (p *Prover) FinalProof(inputProof string, aggregatorAddr string) (*string,
 				msg.GenFinalProofResponse.String(), ErrUnknown, inputProof)
 		}
 	}
-	return nil, fmt.Errorf("%w, wanted %T, got %T", ErrBadProverResponse, &ProverMessage_GenFinalProofResponse{}, res.Response)
+
+	return nil, fmt.Errorf(
+		"%w, wanted %T, got %T",
+		ErrBadProverResponse, &ProverMessage_GenFinalProofResponse{}, res.Response,
+	)
 }
 
 // CancelProofRequest asks the prover to stop the generation of the proof
@@ -246,6 +276,7 @@ func (p *Prover) CancelProofRequest(proofID string) error {
 				proofID, ErrUnknown, msg.CancelResponse.String())
 		}
 	}
+
 	return fmt.Errorf("%w, wanted %T, got %T", ErrBadProverResponse, &ProverMessage_CancelResponse{}, res.Response)
 }
 
@@ -257,7 +288,13 @@ func (p *Prover) WaitRecursiveProof(ctx context.Context, proofID string) (string
 		return "", common.Hash{}, err
 	}
 
-	resProof := res.Proof.(*GetProofResponse_RecursiveProof)
+	resProof, ok := res.Proof.(*GetProofResponse_RecursiveProof)
+	if !ok {
+		return "", common.Hash{}, fmt.Errorf(
+			"%w, wanted %T, got %T",
+			ErrBadProverResponse, &GetProofResponse_RecursiveProof{}, res.Proof,
+		)
+	}
 
 	sr, err := GetStateRootFromProof(resProof.RecursiveProof)
 	if err != nil && sr != (common.Hash{}) {
@@ -278,7 +315,11 @@ func (p *Prover) WaitFinalProof(ctx context.Context, proofID string) (*FinalProo
 	if err != nil {
 		return nil, err
 	}
-	resProof := res.Proof.(*GetProofResponse_FinalProof)
+	resProof, ok := res.Proof.(*GetProofResponse_FinalProof)
+	if !ok {
+		return nil, fmt.Errorf("%w, wanted %T, got %T", ErrBadProverResponse, &GetProofResponse_FinalProof{}, res.Proof)
+	}
+
 	return resProof.FinalProof, nil
 }
 
@@ -307,6 +348,7 @@ func (p *Prover) waitProof(ctx context.Context, proofID string) (*GetProofRespon
 				switch msg.GetProofResponse.Result {
 				case GetProofResponse_RESULT_PENDING:
 					time.Sleep(p.proofStatePollingInterval.Duration)
+
 					continue
 				case GetProofResponse_RESULT_UNSPECIFIED:
 					return nil, fmt.Errorf("failed to get proof ID: %s, %w, prover response: %s",
@@ -330,7 +372,11 @@ func (p *Prover) waitProof(ctx context.Context, proofID string) (*GetProofRespon
 						proofID, ErrUnknown, msg.GetProofResponse.String())
 				}
 			}
-			return nil, fmt.Errorf("%w, wanted %T, got %T", ErrBadProverResponse, &ProverMessage_GetProofResponse{}, res.Response)
+
+			return nil, fmt.Errorf(
+				"%w, wanted %T, got %T",
+				ErrBadProverResponse, &ProverMessage_GetProofResponse{}, res.Response,
+			)
 		}
 	}
 }
@@ -345,6 +391,7 @@ func (p *Prover) call(req *AggregatorMessage) (*ProverMessage, error) {
 	if err != nil {
 		return nil, err
 	}
+
 	return res, nil
 }
 
@@ -366,6 +413,7 @@ func GetStateRootFromProof(proof string) (common.Hash, error) {
 	err := json.Unmarshal([]byte(proof), &publics)
 	if err != nil {
 		log.Errorf("Error unmarshalling proof: %v", err)
+
 		return common.Hash{}, err
 	}
 
@@ -401,5 +449,6 @@ func fea2scalar(v []uint64) *big.Int {
 	res.Add(res, new(big.Int).Lsh(new(big.Int).SetUint64(v[5]), 160)) //nolint:gomnd
 	res.Add(res, new(big.Int).Lsh(new(big.Int).SetUint64(v[6]), 192)) //nolint:gomnd
 	res.Add(res, new(big.Int).Lsh(new(big.Int).SetUint64(v[7]), 224)) //nolint:gomnd
+
 	return res
 }
diff --git a/bridgesync/bridgesync.go b/bridgesync/bridgesync.go
index 220c33fd2..ef8e45413 100644
--- a/bridgesync/bridgesync.go
+++ b/bridgesync/bridgesync.go
@@ -15,6 +15,7 @@ const (
 	downloadBufferSize = 1000
 )
 
+// BridgeSync manages the state of the exit tree for the bridge contract by processing Ethereum blockchain events.
 type BridgeSync struct {
 	processor *processor
 	driver    *sync.EVMDriver
@@ -101,10 +102,12 @@ func newBridgeSync(
 	if err != nil {
 		return nil, err
 	}
+
 	lastProcessedBlock, err := processor.GetLastProcessedBlock(ctx)
 	if err != nil {
 		return nil, err
 	}
+
 	if lastProcessedBlock < initialBlock {
 		err = processor.ProcessBlock(ctx, sync.Block{
 			Num: initialBlock,
@@ -140,6 +143,7 @@ func newBridgeSync(
 	if err != nil {
 		return nil, err
 	}
+
 	return &BridgeSync{
 		processor: processor,
 		driver:    driver,
@@ -163,6 +167,9 @@ func (s *BridgeSync) GetClaimsAndBridges(ctx context.Context, fromBlock, toBlock
 	return s.processor.GetClaimsAndBridges(ctx, fromBlock, toBlock)
 }
 
-func (s *BridgeSync) GetProof(ctx context.Context, depositCount uint32, localExitRoot common.Hash) ([32]common.Hash, error) {
+// GetProof retrieves the Merkle proof for the given deposit count and exit root.
+func (s *BridgeSync) GetProof(
+	ctx context.Context, depositCount uint32, localExitRoot common.Hash,
+) ([32]common.Hash, error) {
 	return s.processor.exitTree.GetProof(ctx, depositCount, localExitRoot)
 }
diff --git a/bridgesync/config.go b/bridgesync/config.go
index 9aa849e2c..66eb00ed3 100644
--- a/bridgesync/config.go
+++ b/bridgesync/config.go
@@ -9,7 +9,7 @@ type Config struct {
 	// DBPath path of the DB
 	DBPath string `mapstructure:"DBPath"`
 	// BlockFinality indicates the status of the blocks that will be queried in order to sync
-	BlockFinality string `jsonschema:"enum=LatestBlock, enum=SafeBlock, enum=PendingBlock, enum=FinalizedBlock, enum=EarliestBlock" mapstructure:"BlockFinality"`
+	BlockFinality string `jsonschema:"enum=LatestBlock, enum=SafeBlock, enum=PendingBlock, enum=FinalizedBlock, enum=EarliestBlock" mapstructure:"BlockFinality"` //nolint:lll
 	// InitialBlockNum is the first block that will be queried when starting the synchronization from scratch.
 	// It should be a number equal or bellow the creation of the bridge contract
 	InitialBlockNum uint64 `mapstructure:"InitialBlockNum"`
diff --git a/bridgesync/downloader.go b/bridgesync/downloader.go
index 2763fcfe9..3599d7dea 100644
--- a/bridgesync/downloader.go
+++ b/bridgesync/downloader.go
@@ -22,13 +22,16 @@ import (
 )
 
 var (
-	bridgeEventSignature        = crypto.Keccak256Hash([]byte("BridgeEvent(uint8,uint32,address,uint32,address,uint256,bytes,uint32)"))
+	bridgeEventSignature = crypto.Keccak256Hash([]byte(
+		"BridgeEvent(uint8,uint32,address,uint32,address,uint256,bytes,uint32)",
+	))
 	claimEventSignature         = crypto.Keccak256Hash([]byte("ClaimEvent(uint256,uint32,address,address,uint256)"))
 	claimEventSignaturePreEtrog = crypto.Keccak256Hash([]byte("ClaimEvent(uint32,uint32,address,address,uint256)"))
 	methodIDClaimAsset          = common.Hex2Bytes("ccaa2d11")
 	methodIDClaimMessage        = common.Hex2Bytes("f5efcd79")
 )
 
+// EthClienter defines the methods required to interact with an Ethereum client.
 type EthClienter interface {
 	ethereum.LogFilterer
 	ethereum.BlockNumberReader
@@ -52,7 +55,7 @@ func buildAppender(client EthClienter, bridge common.Address, syncFullClaims boo
 		bridge, err := bridgeContractV2.ParseBridgeEvent(l)
 		if err != nil {
 			return fmt.Errorf(
-				"error parsing log %+v using d.bridgeContractV2.ParseBridgeEvent: %v",
+				"error parsing log %+v using d.bridgeContractV2.ParseBridgeEvent: %w",
 				l, err,
 			)
 		}
@@ -66,6 +69,7 @@ func buildAppender(client EthClienter, bridge common.Address, syncFullClaims boo
 			Metadata:           bridge.Metadata,
 			DepositCount:       bridge.DepositCount,
 		}})
+
 		return nil
 	}
 
@@ -73,7 +77,7 @@ func buildAppender(client EthClienter, bridge common.Address, syncFullClaims boo
 		claimEvent, err := bridgeContractV2.ParseClaimEvent(l)
 		if err != nil {
 			return fmt.Errorf(
-				"error parsing log %+v using d.bridgeContractV2.ParseClaimEvent: %v",
+				"error parsing log %+v using d.bridgeContractV2.ParseClaimEvent: %w",
 				l, err,
 			)
 		}
@@ -85,7 +89,9 @@ func buildAppender(client EthClienter, bridge common.Address, syncFullClaims boo
 			Amount:             claimEvent.Amount,
 		}
 		if syncFullClaims {
-			setClaimCalldata(client, bridge, l.TxHash, claim)
+			if err := setClaimCalldata(client, bridge, l.TxHash, claim); err != nil {
+				return err
+			}
 		}
 		b.Events = append(b.Events, Event{Claim: claim})
 		return nil
@@ -95,7 +101,7 @@ func buildAppender(client EthClienter, bridge common.Address, syncFullClaims boo
 		claimEvent, err := bridgeContractV1.ParseClaimEvent(l)
 		if err != nil {
 			return fmt.Errorf(
-				"error parsing log %+v using d.bridgeContractV1.ParseClaimEvent: %v",
+				"error parsing log %+v using d.bridgeContractV1.ParseClaimEvent: %w",
 				l, err,
 			)
 		}
@@ -107,7 +113,9 @@ func buildAppender(client EthClienter, bridge common.Address, syncFullClaims boo
 			Amount:             claimEvent.Amount,
 		}
 		if syncFullClaims {
-			setClaimCalldata(client, bridge, l.TxHash, claim)
+			if err := setClaimCalldata(client, bridge, l.TxHash, claim); err != nil {
+				return err
+			}
 		}
 		b.Events = append(b.Events, Event{Claim: claim})
 		return nil
@@ -144,7 +152,13 @@ func setClaimCalldata(client EthClienter, bridge common.Address, txHash common.H
 		if callStack.Len() == 0 {
 			break
 		}
-		currentCall := callStack.Pop().(call)
+
+		currentCallInterface := callStack.Pop()
+		currentCall, ok := currentCallInterface.(call)
+		if !ok {
+			return fmt.Errorf("unexpected type for 'currentCall'. Expected 'call', got '%T'", currentCallInterface)
+		}
+
 		if currentCall.To == bridge {
 			found, err := setClaimIfFoundOnInput(
 				currentCall.Input,
@@ -169,9 +183,9 @@ func setClaimIfFoundOnInput(input []byte, claim *Claim) (bool, error) {
 	if err != nil {
 		return false, err
 	}
-	methodId := input[:4]
+	methodID := input[:4]
 	// Recover Method from signature and ABI
-	method, err := smcAbi.MethodById(methodId)
+	method, err := smcAbi.MethodById(methodID)
 	if err != nil {
 		return false, err
 	}
@@ -180,13 +194,13 @@ func setClaimIfFoundOnInput(input []byte, claim *Claim) (bool, error) {
 		return false, err
 	}
 	// Ignore other methods
-	if bytes.Equal(methodId, methodIDClaimAsset) || bytes.Equal(methodId, methodIDClaimMessage) {
+	if bytes.Equal(methodID, methodIDClaimAsset) || bytes.Equal(methodID, methodIDClaimMessage) {
 		found, err := decodeClaimCallDataAndSetIfFound(data, claim)
 		if err != nil {
 			return false, err
 		}
 		if found {
-			if bytes.Equal(methodId, methodIDClaimMessage) {
+			if bytes.Equal(methodID, methodIDClaimMessage) {
 				claim.IsMessage = true
 			}
 			return true, nil
@@ -228,25 +242,52 @@ func decodeClaimCallDataAndSetIfFound(data []interface{}, claim *Claim) (bool, e
 		10: metadata,
 	)
 	*/
-	actualGlobalIndex := data[2].(*big.Int)
+	actualGlobalIndex, ok := data[2].(*big.Int)
+	if !ok {
+		return false, fmt.Errorf("unexpected type for actualGlobalIndex, expected *big.Int got '%T'", data[2])
+	}
 	if actualGlobalIndex.Cmp(claim.GlobalIndex) != 0 {
 		// not the claim we're looking for
 		return false, nil
 	} else {
 		proofLER := [tree.DefaultHeight]common.Hash{}
-		proofLERBytes := data[0].([32][32]byte)
+		proofLERBytes, ok := data[0].([32][32]byte)
+		if !ok {
+			return false, fmt.Errorf("unexpected type for proofLERBytes, expected [32][32]byte got '%T'", data[0])
+		}
+
 		proofRER := [tree.DefaultHeight]common.Hash{}
-		proofRERBytes := data[1].([32][32]byte)
+		proofRERBytes, ok := data[1].([32][32]byte)
+		if !ok {
+			return false, fmt.Errorf("unexpected type for proofRERBytes, expected [32][32]byte got '%T'", data[1])
+		}
+
 		for i := 0; i < int(tree.DefaultHeight); i++ {
 			proofLER[i] = proofLERBytes[i]
 			proofRER[i] = proofRERBytes[i]
 		}
 		claim.ProofLocalExitRoot = proofLER
 		claim.ProofRollupExitRoot = proofRER
-		claim.MainnetExitRoot = data[3].([32]byte)
-		claim.RollupExitRoot = data[4].([32]byte)
-		claim.DestinationNetwork = data[7].(uint32)
-		claim.Metadata = data[10].([]byte)
+
+		claim.MainnetExitRoot, ok = data[3].([32]byte)
+		if !ok {
+			return false, fmt.Errorf("unexpected type for 'MainnetExitRoot'. Expected '[32]byte', got '%T'", data[3])
+		}
+
+		claim.RollupExitRoot, ok = data[4].([32]byte)
+		if !ok {
+			return false, fmt.Errorf("unexpected type for 'RollupExitRoot'. Expected '[32]byte', got '%T'", data[4])
+		}
+
+		claim.DestinationNetwork, ok = data[7].(uint32)
+		if !ok {
+			return false, fmt.Errorf("unexpected type for 'DestinationNetwork'. Expected 'uint32', got '%T'", data[7])
+		}
+		claim.Metadata, ok = data[10].([]byte)
+		if !ok {
+			return false, fmt.Errorf("unexpected type for 'claim Metadata'. Expected '[]byte', got '%T'", data[10])
+		}
+
 		return true, nil
 	}
 }
diff --git a/bridgesync/e2e_test.go b/bridgesync/e2e_test.go
index 6eff5548b..e0901509a 100644
--- a/bridgesync/e2e_test.go
+++ b/bridgesync/e2e_test.go
@@ -26,20 +26,22 @@ func newSimulatedClient(t *testing.T, auth *bind.TransactOpts) (
 	bridgeContract *polygonzkevmbridgev2.Polygonzkevmbridgev2,
 ) {
 	t.Helper()
+
 	var err error
-	balance, _ := big.NewInt(0).SetString("10000000000000000000000000", 10) //nolint:gomnd
+	balance, _ := big.NewInt(0).SetString("10000000000000000000000000", 10)
 	address := auth.From
 	genesisAlloc := map[common.Address]types.Account{
 		address: {
 			Balance: balance,
 		},
 	}
-	blockGasLimit := uint64(999999999999999999) //nolint:gomnd
+	blockGasLimit := uint64(999999999999999999)
 	client = simulated.NewBackend(genesisAlloc, simulated.WithBlockGasLimit(blockGasLimit))
 
 	bridgeAddr, _, bridgeContract, err = polygonzkevmbridgev2.DeployPolygonzkevmbridgev2(auth, client.Client())
 	require.NoError(t, err)
 	client.Commit()
+
 	return
 }
 
@@ -54,15 +56,18 @@ func TestBridgeEventE2E(t *testing.T) {
 	client, bridgeAddr, bridgeSc := newSimulatedClient(t, auth)
 	rd, err := reorgdetector.New(client.Client(), reorgdetector.Config{DBPath: dbPathReorg})
 	require.NoError(t, err)
-	go rd.Start(ctx)
+
+	go rd.Start(ctx) //nolint:errcheck
 
 	testClient := helpers.TestClient{ClientRenamed: client.Client()}
 	syncer, err := bridgesync.NewL1(ctx, dbPathSyncer, bridgeAddr, 10, etherman.LatestBlock, rd, testClient, 0, time.Millisecond*10, 0, 0)
 	require.NoError(t, err)
+
 	go syncer.Start(ctx)
 
 	// Send bridge txs
 	expectedBridges := []bridgesync.Bridge{}
+
 	for i := 0; i < 100; i++ {
 		bridge := bridgesync.Bridge{
 			Amount:             big.NewInt(0),
@@ -89,16 +94,20 @@ func TestBridgeEventE2E(t *testing.T) {
 
 	// Wait for syncer to catch up
 	syncerUpToDate := false
+
 	var errMsg string
 	lb, err := client.Client().BlockNumber(ctx)
 	require.NoError(t, err)
+
 	for i := 0; i < 10; i++ {
 		lpb, err := syncer.GetLastProcessedBlock(ctx)
 		require.NoError(t, err)
 		if lpb == lb {
 			syncerUpToDate = true
+
 			break
 		}
+
 		time.Sleep(time.Millisecond * 100)
 		errMsg = fmt.Sprintf("last block from client: %d, last block from syncer: %d", lb, lpb)
 	}
@@ -110,6 +119,7 @@ func TestBridgeEventE2E(t *testing.T) {
 	events, err := syncer.GetClaimsAndBridges(ctx, 0, lastBlock)
 	require.NoError(t, err)
 	actualBridges := []bridgesync.Bridge{}
+
 	for _, event := range events {
 		if event.Bridge != nil {
 			actualBridges = append(actualBridges, *event.Bridge)
diff --git a/bridgesync/processor.go b/bridgesync/processor.go
index bd96732f8..824d4afd2 100644
--- a/bridgesync/processor.go
+++ b/bridgesync/processor.go
@@ -23,6 +23,7 @@ const (
 )
 
 var (
+	// ErrBlockNotProcessed indicates that the given block(s) have not been processed yet.
 	ErrBlockNotProcessed = errors.New("given block(s) have not been processed yet")
 	ErrNotFound          = errors.New("not found")
 	lastBlockKey         = []byte("lb")
@@ -56,6 +57,7 @@ func (b *Bridge) Hash() common.Hash {
 	if b.Amount == nil {
 		b.Amount = big.NewInt(0)
 	}
+
 	return common.BytesToHash(keccak256.Hash(
 		[]byte{b.LeafType},
 		origNet,
@@ -110,6 +112,7 @@ func newProcessor(ctx context.Context, dbPath, dbPrefix string) (*processor, err
 			lastBlockTable: {},
 		}
 		tree.AddTables(cfg, dbPrefix)
+
 		return cfg
 	}
 	db, err := mdbx.NewMDBX(nil).
@@ -123,6 +126,7 @@ func newProcessor(ctx context.Context, dbPath, dbPrefix string) (*processor, err
 	if err != nil {
 		return nil, err
 	}
+
 	return &processor{
 		db:             db,
 		eventsTable:    eventsTable,
@@ -143,11 +147,13 @@ func (p *processor) GetClaimsAndBridges(
 	if err != nil {
 		return nil, err
 	}
+
 	defer tx.Rollback()
 	lpb, err := p.getLastProcessedBlockWithTx(tx)
 	if err != nil {
 		return nil, err
 	}
+
 	if lpb < toBlock {
 		return nil, ErrBlockNotProcessed
 	}
@@ -161,6 +167,7 @@ func (p *processor) GetClaimsAndBridges(
 		if err != nil {
 			return nil, err
 		}
+
 		if dbCommon.BytesToUint64(k) > toBlock {
 			break
 		}
@@ -182,7 +189,9 @@ func (p *processor) GetLastProcessedBlock(ctx context.Context) (uint64, error) {
 	if err != nil {
 		return 0, err
 	}
+
 	defer tx.Rollback()
+
 	return p.getLastProcessedBlockWithTx(tx)
 }
 
@@ -214,21 +223,26 @@ func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error {
 	for k, v, err := c.Seek(firstKey); k != nil; k, _, err = c.Next() {
 		if err != nil {
 			tx.Rollback()
+
 			return err
 		}
 		if err := tx.Delete(p.eventsTable, k); err != nil {
 			tx.Rollback()
+
 			return err
 		}
 		if firstDepositCountReorged == -1 {
 			events := []Event{}
 			if err := json.Unmarshal(v, &events); err != nil {
 				tx.Rollback()
+
 				return err
 			}
+
 			for _, event := range events {
 				if event.Bridge != nil {
 					firstDepositCountReorged = int64(event.Bridge.DepositCount)
+
 					break
 				}
 			}
@@ -236,6 +250,7 @@ func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error {
 	}
 	if err := p.updateLastProcessedBlock(tx, firstReorgedBlock-1); err != nil {
 		tx.Rollback()
+
 		return err
 	}
 	exitTreeRollback := func() {}
@@ -243,13 +258,16 @@ func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error {
 		if exitTreeRollback, err = p.exitTree.Reorg(tx, uint32(firstDepositCountReorged)); err != nil {
 			tx.Rollback()
 			exitTreeRollback()
+
 			return err
 		}
 	}
 	if err := tx.Commit(); err != nil {
 		exitTreeRollback()
+
 		return err
 	}
+
 	return nil
 }
 
@@ -263,29 +281,36 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error {
 	leaves := []tree.Leaf{}
 	if len(block.Events) > 0 {
 		events := []Event{}
+
 		for _, e := range block.Events {
-			event := e.(Event)
-			events = append(events, event)
-			if event.Bridge != nil {
-				leaves = append(leaves, tree.Leaf{
-					Index: event.Bridge.DepositCount,
-					Hash:  event.Bridge.Hash(),
-				})
+			if event, ok := e.(Event); ok {
+				events = append(events, event)
+				if event.Bridge != nil {
+					leaves = append(leaves, tree.Leaf{
+						Index: event.Bridge.DepositCount,
+						Hash:  event.Bridge.Hash(),
+					})
+				}
+			} else {
+				p.log.Errorf("unexpected type %T; expected Event", e)
 			}
 		}
 		value, err := json.Marshal(events)
 		if err != nil {
 			tx.Rollback()
+
 			return err
 		}
 		if err := tx.Put(p.eventsTable, dbCommon.Uint64ToBytes(block.Num), value); err != nil {
 			tx.Rollback()
+
 			return err
 		}
 	}
 
 	if err := p.updateLastProcessedBlock(tx, block.Num); err != nil {
 		tx.Rollback()
+
 		return err
 	}
 
@@ -293,21 +318,27 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error {
 	if err != nil {
 		tx.Rollback()
 		exitTreeRollback()
+
 		return err
 	}
 	if err := tx.Commit(); err != nil {
 		exitTreeRollback()
+
 		return err
 	}
+
 	p.log.Debugf("processed %d events until block %d", len(block.Events), block.Num)
+
 	return nil
 }
 
 func (p *processor) updateLastProcessedBlock(tx kv.RwTx, blockNum uint64) error {
 	blockNumBytes := dbCommon.Uint64ToBytes(blockNum)
+
 	return tx.Put(p.lastBlockTable, lastBlockKey, blockNumBytes)
 }
 
+// GenerateGlobalIndex creates a global index based on network type, rollup index, and local exit root index.
 func GenerateGlobalIndex(mainnetFlag bool, rollupIndex uint32, localExitRootIndex uint32) *big.Int {
 	var (
 		globalIndexBytes []byte
@@ -323,5 +354,6 @@ func GenerateGlobalIndex(mainnetFlag bool, rollupIndex uint32, localExitRootInde
 	}
 	leri := big.NewInt(0).SetUint64(uint64(localExitRootIndex)).FillBytes(buf[:])
 	globalIndexBytes = append(globalIndexBytes, leri...)
+
 	return big.NewInt(0).SetBytes(globalIndexBytes)
 }
diff --git a/bridgesync/processor_test.go b/bridgesync/processor_test.go
index 90fe74be8..790955bf6 100644
--- a/bridgesync/processor_test.go
+++ b/bridgesync/processor_test.go
@@ -9,6 +9,7 @@ import (
 	"slices"
 	"testing"
 
+	"github.com/0xPolygon/cdk/log"
 	"github.com/0xPolygon/cdk/sync"
 	"github.com/0xPolygon/cdk/tree/testvectors"
 	"github.com/ethereum/go-ethereum/common"
@@ -345,6 +346,8 @@ func (a *getClaimsAndBridgesAction) desc() string {
 }
 
 func (a *getClaimsAndBridgesAction) execute(t *testing.T) {
+	t.Helper()
+
 	actualEvents, actualErr := a.p.GetClaimsAndBridges(a.ctx, a.fromBlock, a.toBlock)
 	require.Equal(t, a.expectedEvents, actualEvents)
 	require.Equal(t, a.expectedErr, actualErr)
@@ -369,6 +372,8 @@ func (a *getLastProcessedBlockAction) desc() string {
 }
 
 func (a *getLastProcessedBlockAction) execute(t *testing.T) {
+	t.Helper()
+
 	actualLastProcessedBlock, actualErr := a.p.GetLastProcessedBlock(a.ctx)
 	require.Equal(t, a.expectedLastProcessedBlock, actualLastProcessedBlock)
 	require.Equal(t, a.expectedErr, actualErr)
@@ -392,6 +397,8 @@ func (a *reorgAction) desc() string {
 }
 
 func (a *reorgAction) execute(t *testing.T) {
+	t.Helper()
+
 	actualErr := a.p.Reorg(context.Background(), a.firstReorgedBlock)
 	require.Equal(t, a.expectedErr, actualErr)
 }
@@ -414,15 +421,23 @@ func (a *processBlockAction) desc() string {
 }
 
 func (a *processBlockAction) execute(t *testing.T) {
+	t.Helper()
+
 	actualErr := a.p.ProcessBlock(context.Background(), a.block)
 	require.Equal(t, a.expectedErr, actualErr)
 }
 
 func eventsToBridgeEvents(events []interface{}) []Event {
 	bridgeEvents := []Event{}
+
 	for _, event := range events {
-		bridgeEvents = append(bridgeEvents, event.(Event))
+		if evt, ok := event.(Event); ok {
+			bridgeEvents = append(bridgeEvents, evt)
+		} else {
+			log.Errorf("unexpected type %T; expected Event", event)
+		}
 	}
+
 	return bridgeEvents
 }
 
diff --git a/claimsponsor/claimsponsor.go b/claimsponsor/claimsponsor.go
index e0d8e7b83..bad29c867 100644
--- a/claimsponsor/claimsponsor.go
+++ b/claimsponsor/claimsponsor.go
@@ -84,6 +84,7 @@ func newClaimSponsor(
 			claimTable: {},
 			queueTable: {},
 		}
+
 		return cfg
 	}
 	db, err := mdbx.NewMDBX(nil).
@@ -97,6 +98,7 @@ func newClaimSponsor(
 		MaxRetryAttemptsAfterError: maxRetryAttemptsAfterError,
 		RetryAfterErrorPeriod:      retryAfterErrorPeriod,
 	}
+
 	return &ClaimSponsor{
 		db:                    db,
 		sender:                sender,
@@ -120,19 +122,22 @@ func (c *ClaimSponsor) Start(ctx context.Context) {
 		if err2 != nil {
 			err = err2
 			log.Errorf("error calling BeginRw: %v", err)
+
 			continue
 		}
 		queueIndex, globalIndex, err2 := getFirstQueueIndex(tx)
 		if err2 != nil {
 			err = err2
 			tx.Rollback()
-			if err == ErrNotFound {
+			if errors.Is(err, ErrNotFound) {
 				log.Debugf("queue is empty")
 				err = nil
 				time.Sleep(c.waitOnEmptyQueue)
+
 				continue
 			}
 			log.Errorf("error calling getFirstQueueIndex: %v", err)
+
 			continue
 		}
 		claim, err2 := getClaim(tx, globalIndex)
@@ -140,6 +145,7 @@ func (c *ClaimSponsor) Start(ctx context.Context) {
 			err = err2
 			tx.Rollback()
 			log.Errorf("error calling getClaim with globalIndex %s: %v", globalIndex.String(), err)
+
 			continue
 		}
 		if claim.TxID == "" {
@@ -148,6 +154,7 @@ func (c *ClaimSponsor) Start(ctx context.Context) {
 				err = err2
 				tx.Rollback()
 				log.Errorf("error calling sendClaim with globalIndex %s: %v", globalIndex.String(), err)
+
 				continue
 			}
 			claim.TxID = txID
@@ -157,6 +164,7 @@ func (c *ClaimSponsor) Start(ctx context.Context) {
 				err = err2
 				tx.Rollback()
 				log.Errorf("error calling putClaim with globalIndex %s: %v", globalIndex.String(), err)
+
 				continue
 			}
 		}
@@ -164,6 +172,7 @@ func (c *ClaimSponsor) Start(ctx context.Context) {
 		if err2 != nil {
 			err = err2
 			log.Errorf("error calling tx.Commit after putting claim: %v", err)
+
 			continue
 		}
 
@@ -172,6 +181,7 @@ func (c *ClaimSponsor) Start(ctx context.Context) {
 		if err2 != nil {
 			err = err2
 			log.Errorf("error calling waitTxToBeSuccessOrFail for tx %s: %v", claim.TxID, err)
+
 			continue
 		}
 		log.Infof("tx %s with global index %s concluded with status: %s", claim.TxID, globalIndex.String(), status)
@@ -179,6 +189,7 @@ func (c *ClaimSponsor) Start(ctx context.Context) {
 		if err2 != nil {
 			err = err2
 			log.Errorf("error calling BeginRw: %v", err)
+
 			continue
 		}
 		claim.Status = status
@@ -187,6 +198,7 @@ func (c *ClaimSponsor) Start(ctx context.Context) {
 			err = err2
 			tx.Rollback()
 			log.Errorf("error calling putClaim with globalIndex %s: %v", globalIndex.String(), err)
+
 			continue
 		}
 		err2 = tx.Delete(queueTable, dbCommon.Uint64ToBytes(queueIndex))
@@ -194,12 +206,14 @@ func (c *ClaimSponsor) Start(ctx context.Context) {
 			err = err2
 			tx.Rollback()
 			log.Errorf("error calling delete on the queue table with index %d: %v", queueIndex, err)
+
 			continue
 		}
 		err2 = tx.Commit()
 		if err2 != nil {
 			err = err2
 			log.Errorf("error calling tx.Commit after putting claim: %v", err)
+
 			continue
 		}
 
@@ -236,12 +250,14 @@ func (c *ClaimSponsor) AddClaimToQueue(ctx context.Context, claim *Claim) error
 	}
 
 	_, err = getClaim(tx, claim.GlobalIndex)
-	if err != ErrNotFound {
+	if !errors.Is(err, ErrNotFound) {
 		if err != nil {
 			tx.Rollback()
+
 			return err
 		} else {
 			tx.Rollback()
+
 			return errors.New("claim already added")
 		}
 	}
@@ -249,15 +265,17 @@ func (c *ClaimSponsor) AddClaimToQueue(ctx context.Context, claim *Claim) error
 	err = putClaim(tx, claim)
 	if err != nil {
 		tx.Rollback()
+
 		return err
 	}
 
 	var queuePosition uint64
 	lastQueuePosition, _, err := getLastQueueIndex(tx)
-	if err == ErrNotFound {
+	if errors.Is(err, ErrNotFound) {
 		queuePosition = 0
 	} else if err != nil {
 		tx.Rollback()
+
 		return err
 	} else {
 		queuePosition = lastQueuePosition + 1
@@ -265,6 +283,7 @@ func (c *ClaimSponsor) AddClaimToQueue(ctx context.Context, claim *Claim) error
 	err = tx.Put(queueTable, dbCommon.Uint64ToBytes(queuePosition), claim.Key())
 	if err != nil {
 		tx.Rollback()
+
 		return err
 	}
 
@@ -276,6 +295,7 @@ func putClaim(tx kv.RwTx, claim *Claim) error {
 	if err != nil {
 		return err
 	}
+
 	return tx.Put(claimTable, claim.Key(), value)
 }
 
@@ -306,6 +326,7 @@ func getLastQueueIndex(tx kv.Tx) (uint64, *big.Int, error) {
 	if err != nil {
 		return 0, nil, err
 	}
+
 	return getIndex(iter)
 }
 
@@ -318,6 +339,7 @@ func getFirstQueueIndex(tx kv.Tx) (uint64, *big.Int, error) {
 	if err != nil {
 		return 0, nil, err
 	}
+
 	return getIndex(iter)
 }
 
@@ -330,6 +352,7 @@ func getIndex(iter iter.KV) (uint64, *big.Int, error) {
 		return 0, nil, ErrNotFound
 	}
 	globalIndex := new(big.Int).SetBytes(v)
+
 	return dbCommon.BytesToUint64(k), globalIndex, nil
 }
 
@@ -339,6 +362,7 @@ func (c *ClaimSponsor) GetClaim(ctx context.Context, globalIndex *big.Int) (*Cla
 		return nil, err
 	}
 	defer tx.Rollback()
+
 	return getClaim(tx, globalIndex)
 }
 
@@ -352,5 +376,6 @@ func getClaim(tx kv.Tx, globalIndex *big.Int) (*Claim, error) {
 	}
 	claim := &Claim{}
 	err = json.Unmarshal(claimBytes, claim)
+
 	return claim, err
 }
diff --git a/claimsponsor/e2e_test.go b/claimsponsor/e2e_test.go
index 796a09ba2..29a939d75 100644
--- a/claimsponsor/e2e_test.go
+++ b/claimsponsor/e2e_test.go
@@ -92,6 +92,7 @@ func TestE2EL1toEVML2(t *testing.T) {
 				require.NoError(t, errors.New("claim failed"))
 			} else if claim.Status == claimsponsor.SuccessClaimStatus {
 				succeed = true
+
 				break
 			}
 			time.Sleep(100 * time.Millisecond)
diff --git a/claimsponsor/evmclaimsponsor.go b/claimsponsor/evmclaimsponsor.go
index e7b94b200..9f24c2ef2 100644
--- a/claimsponsor/evmclaimsponsor.go
+++ b/claimsponsor/evmclaimsponsor.go
@@ -21,7 +21,8 @@ const (
 	LeafTypeAsset uint8 = 0
 	// LeafTypeMessage represents a bridge message
 	LeafTypeMessage       uint8 = 1
-	gasTooHighErrTemplate       = "Claim tx estimated to consume more gas than the maximum allowed by the service. Estimated %d, maximum allowed: %d"
+	gasTooHighErrTemplate       = "Claim tx estimated to consume more gas than the maximum allowed by the service. " +
+		"Estimated %d, maximum allowed: %d"
 )
 
 type EthClienter interface {
@@ -31,9 +32,11 @@ type EthClienter interface {
 
 type EthTxManager interface {
 	Remove(ctx context.Context, id common.Hash) error
-	ResultsByStatus(ctx context.Context, statuses []ethtxmanager.MonitoredTxStatus) ([]ethtxmanager.MonitoredTxResult, error)
+	ResultsByStatus(ctx context.Context, statuses []ethtxmanager.MonitoredTxStatus,
+	) ([]ethtxmanager.MonitoredTxResult, error)
 	Result(ctx context.Context, id common.Hash) (ethtxmanager.MonitoredTxResult, error)
-	Add(ctx context.Context, to *common.Address, forcedNonce *uint64, value *big.Int, data []byte, gasOffset uint64, sidecar *types.BlobTxSidecar) (common.Hash, error)
+	Add(ctx context.Context, to *common.Address, forcedNonce *uint64, value *big.Int, data []byte,
+		gasOffset uint64, sidecar *types.BlobTxSidecar) (common.Hash, error)
 }
 
 type EVMClaimSponsor struct {
@@ -117,6 +120,7 @@ func NewEVMClaimSponsor(
 		return nil, err
 	}
 	evmSponsor.ClaimSponsor = baseSponsor
+
 	return baseSponsor, nil
 }
 
@@ -136,6 +140,7 @@ func (c *EVMClaimSponsor) checkClaim(ctx context.Context, claim *Claim) error {
 	if gas > c.maxGas {
 		return fmt.Errorf(gasTooHighErrTemplate, gas, c.maxGas)
 	}
+
 	return nil
 }
 
@@ -148,6 +153,7 @@ func (c *EVMClaimSponsor) sendClaim(ctx context.Context, claim *Claim) (string,
 	if err != nil {
 		return "", err
 	}
+
 	return id.Hex(), nil
 }
 
diff --git a/cmd/main.go b/cmd/main.go
index 4686902f9..050fad2b1 100644
--- a/cmd/main.go
+++ b/cmd/main.go
@@ -13,7 +13,7 @@ const appName = "cdk"
 
 const (
 	// SEQUENCE_SENDER name to identify the sequence-sender component
-	SEQUENCE_SENDER = "sequence-sender"
+	SEQUENCE_SENDER = "sequence-sender" //nolint:stylecheck
 	// AGGREGATOR name to identify the aggregator component
 	AGGREGATOR = "aggregator"
 	// AGGORACLE name to identify the aggoracle component
@@ -23,8 +23,8 @@ const (
 )
 
 const (
-	// NETWORK_CONFIGFILE name to identify the netowk_custom (genesis) config-file
-	NETWORK_CONFIGFILE = "custom_network"
+	// NetworkConfigFile name to identify the network_custom (genesis) config-file
+	NetworkConfigFile = "custom_network"
 )
 
 var (
diff --git a/cmd/run.go b/cmd/run.go
index b960b3762..af5ff7a40 100644
--- a/cmd/run.go
+++ b/cmd/run.go
@@ -62,14 +62,31 @@ func start(cliCtx *cli.Context) error {
 	components := cliCtx.StringSlice(config.FlagComponents)
 	l1Client := runL1ClientIfNeeded(components, c.Etherman.URL)
 	l2Client := runL2ClientIfNeeded(components, c.AggOracle.EVMSender.URLRPCL2)
-	reorgDetectorL1 := runReorgDetectorL1IfNeeded(cliCtx.Context, components, l1Client, &c.ReorgDetectorL1)
-	reorgDetectorL2 := runReorgDetectorL2IfNeeded(cliCtx.Context, components, l2Client, &c.ReorgDetectorL2)
+	reorgDetectorL1, errChanL1 := runReorgDetectorL1IfNeeded(cliCtx.Context, components, l1Client, &c.ReorgDetectorL1)
+	go func() {
+		if err := <-errChanL1; err != nil {
+			log.Fatal("Error from ReorgDetectorL1: ", err)
+		}
+	}()
+
+	reorgDetectorL2, errChanL2 := runReorgDetectorL2IfNeeded(cliCtx.Context, components, l2Client, &c.ReorgDetectorL2)
+	go func() {
+		if err := <-errChanL2; err != nil {
+			log.Fatal("Error from ReorgDetectorL2: ", err)
+		}
+	}()
+
 	l1InfoTreeSync := runL1InfoTreeSyncerIfNeeded(cliCtx.Context, components, *c, l1Client, reorgDetectorL1)
 	claimSponsor := runClaimSponsorIfNeeded(cliCtx.Context, components, l2Client, c.ClaimSponsor)
 	l1BridgeSync := runBridgeSyncL1IfNeeded(cliCtx.Context, components, c.BridgeL1Sync, reorgDetectorL1, l1Client)
 	l2BridgeSync := runBridgeSyncL2IfNeeded(cliCtx.Context, components, c.BridgeL2Sync, reorgDetectorL2, l2Client)
-	l1Bridge2InfoIndexSync := runL1Bridge2InfoIndexSyncIfNeeded(cliCtx.Context, components, c.L1Bridge2InfoIndexSync, l1BridgeSync, l1InfoTreeSync, l1Client)
-	lastGERSync := runLastGERSyncIfNeeded(cliCtx.Context, components, c.LastGERSync, reorgDetectorL2, l2Client, l1InfoTreeSync)
+	l1Bridge2InfoIndexSync := runL1Bridge2InfoIndexSyncIfNeeded(
+		cliCtx.Context, components, c.L1Bridge2InfoIndexSync,
+		l1BridgeSync, l1InfoTreeSync, l1Client,
+	)
+	lastGERSync := runLastGERSyncIfNeeded(
+		cliCtx.Context, components, c.LastGERSync, reorgDetectorL2, l2Client, l1InfoTreeSync,
+	)
 
 	for _, component := range components {
 		switch component {
@@ -117,12 +134,15 @@ func start(cliCtx *cli.Context) error {
 func createAggregator(ctx context.Context, c config.Config, runMigrations bool) *aggregator.Aggregator {
 	// Migrations
 	if runMigrations {
-		log.Infof("Running DB migrations host: %s:%s db:%s user:%s", c.Aggregator.DB.Host, c.Aggregator.DB.Port, c.Aggregator.DB.Name, c.Aggregator.DB.User)
+		log.Infof(
+			"Running DB migrations host: %s:%s db:%s user:%s",
+			c.Aggregator.DB.Host, c.Aggregator.DB.Port, c.Aggregator.DB.Name, c.Aggregator.DB.User,
+		)
 		runAggregatorMigrations(c.Aggregator.DB)
 	}
 
 	// DB
-	stateSqlDB, err := db.NewSQLDB(c.Aggregator.DB)
+	stateSQLDB, err := db.NewSQLDB(c.Aggregator.DB)
 	if err != nil {
 		log.Fatal(err)
 	}
@@ -138,12 +158,13 @@ func createAggregator(ctx context.Context, c config.Config, runMigrations bool)
 		log.Fatal(err)
 	}
 
-	st := newState(&c, l2ChainID, stateSqlDB)
+	st := newState(&c, l2ChainID, stateSQLDB)
 
 	c.Aggregator.ChainID = l2ChainID
 
 	// Populate Network config
-	c.Aggregator.Synchronizer.Etherman.Contracts.GlobalExitRootManagerAddr = c.NetworkConfig.L1Config.GlobalExitRootManagerAddr
+	c.Aggregator.Synchronizer.Etherman.Contracts.GlobalExitRootManagerAddr =
+		c.NetworkConfig.L1Config.GlobalExitRootManagerAddr
 	c.Aggregator.Synchronizer.Etherman.Contracts.RollupManagerAddr = c.NetworkConfig.L1Config.RollupManagerAddr
 	c.Aggregator.Synchronizer.Etherman.Contracts.ZkEVMAddr = c.NetworkConfig.L1Config.ZkEVMAddr
 
@@ -182,6 +203,7 @@ func createSequenceSender(
 	}
 	cfg.SequenceSender.SenderAddress = auth.From
 	blockFialityType := etherman.BlockNumberFinality(cfg.SequenceSender.BlockFinality)
+
 	blockFinality, err := blockFialityType.ToBlockNum()
 	if err != nil {
 		log.Fatalf("Failed to create block finality. Err: %w, ", err)
@@ -241,9 +263,13 @@ func newTxBuilder(
 		}
 	case contracts.VersionElderberry:
 		if cfg.Common.IsValidiumMode {
-			txBuilder = txbuilder.NewTxBuilderElderberryValidium(ethman.Contracts.Elderberry.Rollup, da, *auth, cfg.SequenceSender.MaxBatchesForL1)
+			txBuilder = txbuilder.NewTxBuilderElderberryValidium(
+				ethman.Contracts.Elderberry.Rollup, da, *auth, cfg.SequenceSender.MaxBatchesForL1,
+			)
 		} else {
-			txBuilder = txbuilder.NewTxBuilderElderberryZKEVM(ethman.Contracts.Elderberry.Rollup, *auth, cfg.SequenceSender.MaxTxSizeForL1)
+			txBuilder = txbuilder.NewTxBuilderElderberryZKEVM(
+				ethman.Contracts.Elderberry.Rollup, *auth, cfg.SequenceSender.MaxTxSizeForL1,
+			)
 		}
 	default:
 		err = fmt.Errorf("unknown contract version: %s", cfg.Common.ContractVersions)
@@ -252,7 +278,12 @@ func newTxBuilder(
 	return txBuilder, err
 }
 
-func createAggoracle(cfg config.Config, l1Client, l2Client *ethclient.Client, syncer *l1infotreesync.L1InfoTreeSync) *aggoracle.AggOracle {
+func createAggoracle(
+	cfg config.Config,
+	l1Client,
+	l2Client *ethclient.Client,
+	syncer *l1infotreesync.L1InfoTreeSync,
+) *aggoracle.AggOracle {
 	var sender aggoracle.ChainSender
 	switch cfg.AggOracle.TargetChainType {
 	case aggoracle.EVMChain:
@@ -308,7 +339,7 @@ func newDataAvailability(c config.Config, etherman *etherman.Client) (*dataavail
 	// Backend specific config
 	daProtocolName, err := etherman.GetDAProtocolName()
 	if err != nil {
-		return nil, fmt.Errorf("error getting data availability protocol name: %v", err)
+		return nil, fmt.Errorf("error getting data availability protocol name: %w", err)
 	}
 	var daBackend dataavailability.DABackender
 	switch daProtocolName {
@@ -323,7 +354,7 @@ func newDataAvailability(c config.Config, etherman *etherman.Client) (*dataavail
 		}
 		dacAddr, err := etherman.GetDAProtocolAddr()
 		if err != nil {
-			return nil, fmt.Errorf("error getting trusted sequencer URI. Error: %v", err)
+			return nil, fmt.Errorf("error getting trusted sequencer URI. Error: %w", err)
 		}
 
 		daBackend, err = datacommittee.New(
@@ -401,9 +432,10 @@ func newState(c *config.Config, l2ChainID uint64, sqlDB *pgxpool.Pool) *state.St
 		ChainID: l2ChainID,
 	}
 
-	stateDb := pgstatestorage.NewPostgresStorage(stateCfg, sqlDB)
+	stateDB := pgstatestorage.NewPostgresStorage(stateCfg, sqlDB)
+
+	st := state.NewState(stateCfg, stateDB)
 
-	st := state.NewState(stateCfg, stateDb)
 	return st
 }
 
@@ -415,6 +447,7 @@ func newReorgDetector(
 	if err != nil {
 		log.Fatal(err)
 	}
+
 	return rd
 }
 
@@ -426,6 +459,7 @@ func isNeeded(casesWhereNeeded, actualCases []string) bool {
 			}
 		}
 	}
+
 	return false
 }
 
@@ -457,6 +491,7 @@ func runL1InfoTreeSyncerIfNeeded(
 		log.Fatal(err)
 	}
 	go l1InfoTreeSync.Start(ctx)
+
 	return l1InfoTreeSync
 }
 
@@ -469,6 +504,7 @@ func runL1ClientIfNeeded(components []string, urlRPCL1 string) *ethclient.Client
 	if err != nil {
 		log.Fatal(err)
 	}
+
 	return l1CLient
 }
 
@@ -481,25 +517,52 @@ func runL2ClientIfNeeded(components []string, urlRPCL2 string) *ethclient.Client
 	if err != nil {
 		log.Fatal(err)
 	}
+
 	return l2CLient
 }
 
-func runReorgDetectorL1IfNeeded(ctx context.Context, components []string, l1Client *ethclient.Client, cfg *reorgdetector.Config) *reorgdetector.ReorgDetector {
+func runReorgDetectorL1IfNeeded(
+	ctx context.Context,
+	components []string,
+	l1Client *ethclient.Client,
+	cfg *reorgdetector.Config,
+) (*reorgdetector.ReorgDetector, chan error) {
 	if !isNeeded([]string{SEQUENCE_SENDER, AGGREGATOR, AGGORACLE, RPC}, components) {
-		return nil
+		return nil, nil
 	}
 	rd := newReorgDetector(cfg, l1Client)
-	go rd.Start(ctx)
-	return rd
+
+	errChan := make(chan error)
+	go func() {
+		if err := rd.Start(ctx); err != nil {
+			errChan <- err
+		}
+		close(errChan)
+	}()
+
+	return rd, errChan
 }
 
-func runReorgDetectorL2IfNeeded(ctx context.Context, components []string, l2Client *ethclient.Client, cfg *reorgdetector.Config) *reorgdetector.ReorgDetector {
+func runReorgDetectorL2IfNeeded(
+	ctx context.Context,
+	components []string,
+	l2Client *ethclient.Client,
+	cfg *reorgdetector.Config,
+) (*reorgdetector.ReorgDetector, chan error) {
 	if !isNeeded([]string{AGGORACLE, RPC}, components) {
-		return nil
+		return nil, nil
 	}
 	rd := newReorgDetector(cfg, l2Client)
-	go rd.Start(ctx)
-	return rd
+
+	errChan := make(chan error)
+	go func() {
+		if err := rd.Start(ctx); err != nil {
+			errChan <- err
+		}
+		close(errChan)
+	}()
+
+	return rd, errChan
 }
 
 func runClaimSponsorIfNeeded(
@@ -535,6 +598,7 @@ func runClaimSponsorIfNeeded(
 		log.Fatalf("error creating claim sponsor: %s", err)
 	}
 	go cs.Start(ctx)
+
 	return cs
 }
 
@@ -562,6 +626,7 @@ func runL1Bridge2InfoIndexSyncIfNeeded(
 		log.Fatalf("error creating l1Bridge2InfoIndexSync: %s", err)
 	}
 	go l1Bridge2InfoIndexSync.Start(ctx)
+
 	return l1Bridge2InfoIndexSync
 }
 
@@ -593,6 +658,7 @@ func runLastGERSyncIfNeeded(
 		log.Fatalf("error creating lastGERSync: %s", err)
 	}
 	go lastGERSync.Start(ctx)
+
 	return lastGERSync
 }
 
@@ -623,6 +689,7 @@ func runBridgeSyncL1IfNeeded(
 		log.Fatalf("error creating bridgeSyncL1: %s", err)
 	}
 	go bridgeSyncL1.Start(ctx)
+
 	return bridgeSyncL1
 }
 
@@ -654,6 +721,7 @@ func runBridgeSyncL2IfNeeded(
 		log.Fatalf("error creating bridgeSyncL2: %s", err)
 	}
 	go bridgeSyncL2.Start(ctx)
+
 	return bridgeSyncL2
 }
 
diff --git a/cmd/version.go b/cmd/version.go
index 51766c022..acba8f9fc 100644
--- a/cmd/version.go
+++ b/cmd/version.go
@@ -9,5 +9,6 @@ import (
 
 func versionCmd(*cli.Context) error {
 	zkevm.PrintVersion(os.Stdout)
+
 	return nil
 }
diff --git a/common/common.go b/common/common.go
index 259b2a8dc..c4dbdf18c 100644
--- a/common/common.go
+++ b/common/common.go
@@ -11,7 +11,9 @@ import (
 
 // Uint64ToBytes converts a uint64 to a byte slice
 func Uint64ToBytes(num uint64) []byte {
-	bytes := make([]byte, 8)
+	const uint64ByteSize = 8
+
+	bytes := make([]byte, uint64ByteSize)
 	binary.BigEndian.PutUint64(bytes, num)
 
 	return bytes
@@ -22,10 +24,13 @@ func BytesToUint64(bytes []byte) uint64 {
 	return binary.BigEndian.Uint64(bytes)
 }
 
-// Uint32To2Bytes converts a uint32 to a byte slice
+// Uint32ToBytes converts a uint32 to a byte slice in big-endian order
 func Uint32ToBytes(num uint32) []byte {
-	key := make([]byte, 4)
+	const uint32ByteSize = 4
+
+	key := make([]byte, uint32ByteSize)
 	binary.BigEndian.PutUint32(key, num)
+
 	return key
 }
 
@@ -34,6 +39,7 @@ func BytesToUint32(bytes []byte) uint32 {
 	return binary.BigEndian.Uint32(bytes)
 }
 
+// CalculateAccInputHash computes the hash of accumulated input data for a given batch.
 func CalculateAccInputHash(
 	oldAccInputHash common.Hash,
 	batchData []byte,
@@ -53,15 +59,19 @@ func CalculateAccInputHash(
 	for len(v1) < 32 {
 		v1 = append([]byte{0}, v1...)
 	}
+
 	for len(v3) < 32 {
 		v3 = append([]byte{0}, v3...)
 	}
+
 	for len(v4) < 8 {
 		v4 = append([]byte{0}, v4...)
 	}
+
 	for len(v5) < 20 {
 		v5 = append([]byte{0}, v5...)
 	}
+
 	for len(v6) < 32 {
 		v6 = append([]byte{0}, v6...)
 	}
diff --git a/common/config.go b/common/config.go
index 62670c6f5..fab4d0fd8 100644
--- a/common/config.go
+++ b/common/config.go
@@ -2,6 +2,7 @@ package common
 
 import "github.com/0xPolygon/cdk/translator"
 
+// Config holds the configuration for the CDK.
 type Config struct {
 	// IsValidiumMode has the value true if the sequence sender is running in validium mode.
 	IsValidiumMode bool `mapstructure:"IsValidiumMode"`
diff --git a/config/config.go b/config/config.go
index 76abbf203..9d00313dc 100644
--- a/config/config.go
+++ b/config/config.go
@@ -2,6 +2,7 @@ package config
 
 import (
 	"bytes"
+	"errors"
 	"fmt"
 	"path/filepath"
 	"strings"
@@ -40,7 +41,8 @@ const (
 	FlagComponents = "components"
 	// FlagHTTPAPI is the flag for http.api.
 	FlagHTTPAPI = "http.api"
-	// FlagKeyStorePath is the path of the key store file containing the private key of the account going to sing and approve the tokens
+	// FlagKeyStorePath is the path of the key store file containing the private key
+	// of the account going to sing and approve the tokens.
 	FlagKeyStorePath = "key-store-path"
 	// FlagPassword is the password needed to decrypt the key store
 	FlagPassword = "password"
@@ -118,10 +120,12 @@ func Default() (*Config, error) {
 	if err != nil {
 		return nil, err
 	}
+
 	err = viper.Unmarshal(&cfg, viper.DecodeHook(mapstructure.TextUnmarshallerHookFunc()))
 	if err != nil {
 		return nil, err
 	}
+
 	return &cfg, nil
 }
 
@@ -131,6 +135,7 @@ func Load(ctx *cli.Context) (*Config, error) {
 	if err != nil {
 		return nil, err
 	}
+
 	configFilePath := ctx.String(FlagCfg)
 	if configFilePath != "" {
 		dirName, fileName := filepath.Split(configFilePath)
@@ -142,24 +147,32 @@ func Load(ctx *cli.Context) (*Config, error) {
 		viper.SetConfigName(fileNameWithoutExtension)
 		viper.SetConfigType(fileExtension)
 	}
+
 	viper.AutomaticEnv()
 	replacer := strings.NewReplacer(".", "_")
 	viper.SetEnvKeyReplacer(replacer)
 	viper.SetEnvPrefix("CDK")
+
 	err = viper.ReadInConfig()
 	if err != nil {
-		_, ok := err.(viper.ConfigFileNotFoundError)
-		if ok {
+		var configNotFoundError viper.ConfigFileNotFoundError
+		if errors.As(err, &configNotFoundError) {
 			log.Infof("config file not found")
 		} else {
 			log.Infof("error reading config file: ", err)
+
 			return nil, err
 		}
 	}
 
 	decodeHooks := []viper.DecoderConfigOption{
 		// this allows arrays to be decoded from env var separated by ",", example: MY_VAR="value1,value2,value3"
-		viper.DecodeHook(mapstructure.ComposeDecodeHookFunc(mapstructure.TextUnmarshallerHookFunc(), mapstructure.StringToSliceHookFunc(","))),
+		viper.DecodeHook(
+			mapstructure.ComposeDecodeHookFunc(
+				mapstructure.TextUnmarshallerHookFunc(),
+				mapstructure.StringToSliceHookFunc(","),
+			),
+		),
 	}
 
 	err = viper.Unmarshal(&cfg, decodeHooks...)
diff --git a/config/network.go b/config/network.go
index 96359233a..fc3f75ce7 100644
--- a/config/network.go
+++ b/config/network.go
@@ -68,6 +68,7 @@ type genesisAccountFromJSON struct {
 
 func (cfg *Config) loadNetworkConfig(ctx *cli.Context) {
 	cfgPath := ctx.String(FlagCustomNetwork)
+
 	networkJSON, err := LoadGenesisFileAsString(cfgPath)
 	if err != nil {
 		panic(err.Error())
@@ -75,7 +76,7 @@ func (cfg *Config) loadNetworkConfig(ctx *cli.Context) {
 
 	config, err := LoadGenesisFromJSONString(networkJSON)
 	if err != nil {
-		panic(fmt.Errorf("failed to load genesis configuration from file. Error: %v", err))
+		panic(fmt.Errorf("failed to load genesis configuration from file. Error: %w", err))
 	}
 	cfg.NetworkConfig = config
 }
@@ -83,10 +84,11 @@ func (cfg *Config) loadNetworkConfig(ctx *cli.Context) {
 // LoadGenesisFileAsString loads the genesis file as a string
 func LoadGenesisFileAsString(cfgPath string) (string, error) {
 	if cfgPath != "" {
-		f, err := os.Open(cfgPath) //nolint:gosec
+		f, err := os.Open(cfgPath)
 		if err != nil {
 			return "", err
 		}
+
 		defer func() {
 			err := f.Close()
 			if err != nil {
@@ -98,6 +100,7 @@ func LoadGenesisFileAsString(cfgPath string) (string, error) {
 		if err != nil {
 			return "", err
 		}
+
 		return string(b), nil
 	} else {
 		return "", errors.New("custom netwrork file not provided. Please use the custom-network-file flag")
@@ -133,6 +136,7 @@ func LoadGenesisFromJSONString(jsonStr string) (NetworkConfig, error) {
 			}
 			cfg.Genesis.Actions = append(cfg.Genesis.Actions, action)
 		}
+
 		if account.Nonce != "" && account.Nonce != "0" {
 			action := &state.GenesisAction{
 				Address: account.Address,
@@ -141,6 +145,7 @@ func LoadGenesisFromJSONString(jsonStr string) (NetworkConfig, error) {
 			}
 			cfg.Genesis.Actions = append(cfg.Genesis.Actions, action)
 		}
+
 		if account.Bytecode != "" {
 			action := &state.GenesisAction{
 				Address:  account.Address,
@@ -149,6 +154,7 @@ func LoadGenesisFromJSONString(jsonStr string) (NetworkConfig, error) {
 			}
 			cfg.Genesis.Actions = append(cfg.Genesis.Actions, action)
 		}
+
 		if len(account.Storage) > 0 {
 			for storageKey, storageValue := range account.Storage {
 				action := &state.GenesisAction{
diff --git a/config/types/duration.go b/config/types/duration.go
index 7612291fa..d6855d105 100644
--- a/config/types/duration.go
+++ b/config/types/duration.go
@@ -18,6 +18,7 @@ func (d *Duration) UnmarshalText(data []byte) error {
 		return err
 	}
 	d.Duration = duration
+
 	return nil
 }
 
diff --git a/dataavailability/dataavailability.go b/dataavailability/dataavailability.go
index 2e8ec1249..fc4a482ec 100644
--- a/dataavailability/dataavailability.go
+++ b/dataavailability/dataavailability.go
@@ -20,10 +20,14 @@ func New(backend DABackender) (*DataAvailability, error) {
 	return da, da.backend.Init()
 }
 
-func (d *DataAvailability) PostSequenceBanana(ctx context.Context, sequenceBanana etherman.SequenceBanana) ([]byte, error) {
+// PostSequenceBanana sends sequence data to the backend and returns a response.
+func (d *DataAvailability) PostSequenceBanana(
+	ctx context.Context, sequenceBanana etherman.SequenceBanana,
+) ([]byte, error) {
 	return d.backend.PostSequenceBanana(ctx, sequenceBanana)
 }
 
+// PostSequenceElderberry sends batch data to the backend and returns a response.
 func (d *DataAvailability) PostSequenceElderberry(ctx context.Context, batchesData [][]byte) ([]byte, error) {
 	return d.backend.PostSequenceElderberry(ctx, batchesData)
 }
diff --git a/dataavailability/datacommittee/datacommittee.go b/dataavailability/datacommittee/datacommittee.go
index 22abd589b..2a3cca8e6 100644
--- a/dataavailability/datacommittee/datacommittee.go
+++ b/dataavailability/datacommittee/datacommittee.go
@@ -63,6 +63,7 @@ func New(
 	ethClient, err := ethclient.Dial(l1RPCURL)
 	if err != nil {
 		log.Errorf("error connecting to %s: %+v", l1RPCURL, err)
+
 		return nil, err
 	}
 
@@ -94,13 +95,14 @@ func (d *Backend) Init() error {
 		}
 	}
 	d.selectedCommitteeMember = selectedCommitteeMember
+
 	return nil
 }
 
 // GetSequence gets backend data one hash at a time. This should be optimized on the DAC side to get them all at once.
 func (d *Backend) GetSequence(_ context.Context, hashes []common.Hash, _ []byte) ([][]byte, error) {
 	// TODO: optimize this on the DAC side by implementing a multi batch retrieve api)
-	var batchData [][]byte
+	batchData := make([][]byte, 0, len(hashes))
 	for _, h := range hashes {
 		data, err := d.GetBatchL2Data(h)
 		if err != nil {
@@ -108,6 +110,7 @@ func (d *Backend) GetSequence(_ context.Context, hashes []common.Hash, _ []byte)
 		}
 		batchData = append(batchData, data)
 	}
+
 	return batchData, nil
 }
 
@@ -129,6 +132,7 @@ func (d *Backend) GetBatchL2Data(hash common.Hash) ([]byte, error) {
 			if d.selectedCommitteeMember == intialMember {
 				break
 			}
+
 			continue
 		}
 		actualTransactionsHash := crypto.Keccak256Hash(data)
@@ -144,13 +148,16 @@ func (d *Backend) GetBatchL2Data(hash common.Hash) ([]byte, error) {
 			if d.selectedCommitteeMember == intialMember {
 				break
 			}
+
 			continue
 		}
+
 		return data, nil
 	}
 	if err := d.Init(); err != nil {
-		return nil, fmt.Errorf("error loading data committee: %s", err)
+		return nil, fmt.Errorf("error loading data committee: %w", err)
 	}
+
 	return nil, fmt.Errorf("couldn't get the data from any committee member")
 }
 
@@ -160,6 +167,7 @@ type signatureMsg struct {
 	err       error
 }
 
+// PostSequenceElderberry submits batches and collects signatures from committee members.
 func (d *Backend) PostSequenceElderberry(ctx context.Context, batchesData [][]byte) ([]byte, error) {
 	// Get current committee
 	committee, err := d.getCurrentDataCommittee()
@@ -188,9 +196,11 @@ func (d *Backend) PostSequenceElderberry(ctx context.Context, batchesData [][]by
 		go requestSignatureFromMember(signatureCtx, &signedSequenceElderberry,
 			func(c client.Client) ([]byte, error) { return c.SignSequence(ctx, signedSequenceElderberry) }, member, ch)
 	}
+
 	return collectSignatures(committee, ch, cancelSignatureCollection)
 }
 
+// PostSequenceBanana submits a sequence to the data committee and collects the signed response from them.
 func (d *Backend) PostSequenceBanana(ctx context.Context, sequence etherman.SequenceBanana) ([]byte, error) {
 	// Get current committee
 	committee, err := d.getCurrentDataCommittee()
@@ -245,7 +255,9 @@ func (d *Backend) PostSequenceBanana(ctx context.Context, sequence etherman.Sequ
 	return collectSignatures(committee, ch, cancelSignatureCollection)
 }
 
-func collectSignatures(committee *DataCommittee, ch chan signatureMsg, cancelSignatureCollection context.CancelFunc) ([]byte, error) {
+func collectSignatures(
+	committee *DataCommittee, ch chan signatureMsg, cancelSignatureCollection context.CancelFunc,
+) ([]byte, error) {
 	// Collect signatures
 	// Stop requesting as soon as we have N valid signatures
 	var (
@@ -260,6 +272,7 @@ func collectSignatures(committee *DataCommittee, ch chan signatureMsg, cancelSig
 			failedToCollect++
 			if len(committee.Members)-int(failedToCollect) < int(committee.RequiredSignatures) {
 				cancelSignatureCollection()
+
 				return nil, errors.New("too many members failed to send their signature")
 			}
 		} else {
@@ -299,6 +312,7 @@ func requestSignatureFromMember(ctx context.Context, signedSequence daTypes.Sign
 			addr: member.Addr,
 			err:  err,
 		}
+
 		return
 	}
 	// verify returned signature
@@ -309,6 +323,7 @@ func requestSignatureFromMember(ctx context.Context, signedSequence daTypes.Sign
 			addr: member.Addr,
 			err:  err,
 		}
+
 		return
 	}
 	if signer != member.Addr {
@@ -316,6 +331,7 @@ func requestSignatureFromMember(ctx context.Context, signedSequence daTypes.Sign
 			addr: member.Addr,
 			err:  fmt.Errorf("invalid signer. Expected %s, actual %s", member.Addr.Hex(), signer.Hex()),
 		}
+
 		return
 	}
 	ch <- signatureMsg{
@@ -339,6 +355,7 @@ func buildSignaturesAndAddrs(sigs signatureMsgs, members []DataCommitteeMember)
 		res = append(res, member.Addr.Bytes()...)
 	}
 	log.Debugf("full res %s", common.Bytes2Hex(res))
+
 	return res
 }
 
@@ -394,5 +411,6 @@ func (d *Backend) getCurrentDataCommitteeMembers() ([]DataCommitteeMember, error
 			URL:  member.Url,
 		})
 	}
+
 	return members, nil
 }
diff --git a/dataavailability/datacommittee/datacommittee_test.go b/dataavailability/datacommittee/datacommittee_test.go
index 17376a13d..ad1283245 100644
--- a/dataavailability/datacommittee/datacommittee_test.go
+++ b/dataavailability/datacommittee/datacommittee_test.go
@@ -84,6 +84,7 @@ func newTestingEnv(t *testing.T) (
 	if err != nil {
 		log.Fatal(err)
 	}
+
 	return dac, ethBackend, auth, da
 }
 
@@ -101,14 +102,14 @@ func newSimulatedDacman(t *testing.T, auth *bind.TransactOpts) (
 		return &Backend{}, nil, nil, nil
 	}
 	// 10000000 ETH in wei
-	balance, _ := new(big.Int).SetString("10000000000000000000000000", 10) //nolint:gomnd
+	balance, _ := new(big.Int).SetString("10000000000000000000000000", 10)
 	address := auth.From
 	genesisAlloc := map[common.Address]types.Account{
 		address: {
 			Balance: balance,
 		},
 	}
-	blockGasLimit := uint64(999999999999999999) //nolint:gomnd
+	blockGasLimit := uint64(999999999999999999)
 	client := simulated.NewBackend(genesisAlloc, simulated.WithBlockGasLimit(blockGasLimit))
 
 	// DAC Setup
@@ -137,6 +138,7 @@ func newSimulatedDacman(t *testing.T, auth *bind.TransactOpts) (
 	c := &Backend{
 		dataCommitteeContract: da,
 	}
+
 	return c, client, da, nil
 }
 
@@ -163,6 +165,7 @@ func deployDACProxy(auth *bind.TransactOpts, client bind.ContractBackend, dacImp
 		return common.Address{}, err
 	}
 	fmt.Println("DAC proxy deployed at", proxyAddr)
+
 	return proxyAddr, nil
 }
 
@@ -176,5 +179,6 @@ func deployProxy(auth *bind.TransactOpts,
 		implementationAddr,
 		initializeParams,
 	)
+
 	return addr, err
 }
diff --git a/dataavailability/interfaces.go b/dataavailability/interfaces.go
index b3630871b..f79b3c769 100644
--- a/dataavailability/interfaces.go
+++ b/dataavailability/interfaces.go
@@ -21,12 +21,14 @@ type SequenceSender interface {
 	SequenceSenderBanana
 }
 
+// SequenceSenderElderberry defines methods for sending sequence data to the data availability backend.
 type SequenceSenderElderberry interface {
 	// PostSequence sends the sequence data to the data availability backend, and returns the dataAvailabilityMessage
 	// as expected by the contract
 	PostSequenceElderberry(ctx context.Context, batchesData [][]byte) ([]byte, error)
 }
 
+// SequenceSenderBanana defines methods for sending sequence data to the data availability backend.
 type SequenceSenderBanana interface {
 	// PostSequence sends the sequence data to the data availability backend, and returns the dataAvailabilityMessage
 	// as expected by the contract
diff --git a/etherman/aggregator.go b/etherman/aggregator.go
index 87384a95a..4197c7a27 100644
--- a/etherman/aggregator.go
+++ b/etherman/aggregator.go
@@ -16,7 +16,9 @@ import (
 )
 
 // BuildTrustedVerifyBatchesTxData builds a []bytes to be sent to the PoE SC method TrustedVerifyBatches.
-func (etherMan *Client) BuildTrustedVerifyBatchesTxData(lastVerifiedBatch, newVerifiedBatch uint64, inputs *ethmanTypes.FinalProofInputs, beneficiary common.Address) (to *common.Address, data []byte, err error) {
+func (etherMan *Client) BuildTrustedVerifyBatchesTxData(
+	lastVerifiedBatch, newVerifiedBatch uint64, inputs *ethmanTypes.FinalProofInputs, beneficiary common.Address,
+) (to *common.Address, data []byte, err error) {
 	opts, err := etherMan.generateRandomAuth()
 	if err != nil {
 		return nil, nil, fmt.Errorf("failed to build trusted verify batches, err: %w", err)
@@ -36,6 +38,7 @@ func (etherMan *Client) BuildTrustedVerifyBatchesTxData(lastVerifiedBatch, newVe
 	proof, err := convertProof(inputs.FinalProof.Proof)
 	if err != nil {
 		log.Errorf("error converting proof. Error: %v, Proof: %s", err, inputs.FinalProof.Proof)
+
 		return nil, nil, err
 	}
 
@@ -56,6 +59,7 @@ func (etherMan *Client) BuildTrustedVerifyBatchesTxData(lastVerifiedBatch, newVe
 		if parsedErr, ok := TryParseError(err); ok {
 			err = parsedErr
 		}
+
 		return nil, nil, err
 	}
 
@@ -63,16 +67,19 @@ func (etherMan *Client) BuildTrustedVerifyBatchesTxData(lastVerifiedBatch, newVe
 }
 
 // GetBatchAccInputHash gets the batch accumulated input hash from the ethereum
-func (etherman *Client) GetBatchAccInputHash(ctx context.Context, batchNumber uint64) (common.Hash, error) {
-	rollupData, err := etherman.Contracts.Banana.RollupManager.GetRollupSequencedBatches(&bind.CallOpts{Pending: false}, etherman.RollupID, batchNumber)
+func (etherMan *Client) GetBatchAccInputHash(ctx context.Context, batchNumber uint64) (common.Hash, error) {
+	rollupData, err := etherMan.Contracts.Banana.RollupManager.GetRollupSequencedBatches(
+		&bind.CallOpts{Pending: false}, etherMan.RollupID, batchNumber,
+	)
 	if err != nil {
 		return common.Hash{}, err
 	}
+
 	return rollupData.AccInputHash, nil
 }
 
 // GetRollupId returns the rollup id
-func (etherMan *Client) GetRollupId() uint32 {
+func (etherMan *Client) GetRollupId() uint32 { //nolint:stylecheck
 	return etherMan.RollupID
 }
 
@@ -109,6 +116,7 @@ func convertProof(p string) ([24][32]byte, error) {
 		copy(aux[:], p)
 		proof[i] = aux
 	}
+
 	return proof, nil
 }
 
@@ -117,5 +125,6 @@ func DecodeBytes(val *string) ([]byte, error) {
 	if val == nil {
 		return []byte{}, nil
 	}
+
 	return hex.DecodeString(strings.TrimPrefix(*val, "0x"))
 }
diff --git a/etherman/contracts/base.go b/etherman/contracts/base.go
index c2aabd023..acc19e76b 100644
--- a/etherman/contracts/base.go
+++ b/etherman/contracts/base.go
@@ -41,15 +41,23 @@ func (e *ContractBase) String() string {
 	return e.Version() + "/" + e.Name() + "@" + e.Address().String()
 }
 
-func NewContractMagic[C any, T any](constructor contractConstructorFunc[T], address common.Address, backend bind.ContractBackend, name NameType, version VersionType) (*C, error) {
+func NewContractMagic[C any, T any](
+	constructor contractConstructorFunc[T],
+	address common.Address,
+	backend bind.ContractBackend,
+	name NameType,
+	version VersionType,
+) (*C, error) {
 	contractBind, err := constructor(address, backend)
 	if err != nil {
 		log.Errorf("failed to bind contract %s at address %s. Err:%w", name, address.String(), err)
+
 		return nil, err
 	}
 	tmp := new(C)
 	values := reflect.ValueOf(tmp).Elem()
 	values.FieldByIndex([]int{0}).Set(reflect.ValueOf(contractBind))
 	values.FieldByIndex([]int{1}).Set(reflect.ValueOf(NewContractBase(address, backend, name, version)))
+
 	return tmp, nil
 }
diff --git a/etherman/contracts/contracts_banana.go b/etherman/contracts/contracts_banana.go
index 39e3eb12c..d3d28f909 100644
--- a/etherman/contracts/contracts_banana.go
+++ b/etherman/contracts/contracts_banana.go
@@ -30,17 +30,23 @@ type ContractsBanana struct {
 }
 
 func NewContractsBanana(cfg config.L1Config, backend bind.ContractBackend) (*ContractsBanana, error) {
-
-	ger, err := NewContractMagic[GlobalExitRootBananaType](polygonzkevmglobalexitrootv2.NewPolygonzkevmglobalexitrootv2, cfg.GlobalExitRootManagerAddr, backend, ContractNameGlobalExitRoot, VersionBanana)
+	ger, err := NewContractMagic[GlobalExitRootBananaType](
+		polygonzkevmglobalexitrootv2.NewPolygonzkevmglobalexitrootv2,
+		cfg.GlobalExitRootManagerAddr,
+		backend, ContractNameGlobalExitRoot, VersionBanana)
 	if err != nil {
 		return nil, err
 	}
-	rollup, err := NewContractMagic[RollupBananaType](polygonvalidiumetrog.NewPolygonvalidiumetrog, cfg.ZkEVMAddr, backend, ContractNameRollup, VersionBanana)
+	rollup, err := NewContractMagic[RollupBananaType](
+		polygonvalidiumetrog.NewPolygonvalidiumetrog, cfg.ZkEVMAddr,
+		backend, ContractNameRollup, VersionBanana)
 	if err != nil {
 		return nil, err
 	}
 
-	rollupManager, err := NewContractMagic[RollupManagerBananaType](polygonrollupmanager.NewPolygonrollupmanager, cfg.RollupManagerAddr, backend, ContractNameRollupManager, VersionBanana)
+	rollupManager, err := NewContractMagic[RollupManagerBananaType](
+		polygonrollupmanager.NewPolygonrollupmanager, cfg.RollupManagerAddr,
+		backend, ContractNameRollupManager, VersionBanana)
 	if err != nil {
 		return nil, err
 	}
@@ -53,6 +59,6 @@ func NewContractsBanana(cfg config.L1Config, backend bind.ContractBackend) (*Con
 }
 
 func (c *ContractsBanana) String() string {
-	return "RollupManager: " + c.RollupManager.String() + "\nGlobalExitRoot: " + c.GlobalExitRoot.String() + "\nRollup: " + c.Rollup.String()
-
+	return "RollupManager: " + c.RollupManager.String() + "\nGlobalExitRoot: " +
+		c.GlobalExitRoot.String() + "\nRollup: " + c.Rollup.String()
 }
diff --git a/etherman/contracts/contracts_elderberry.go b/etherman/contracts/contracts_elderberry.go
index 45f53d143..3a3bf574f 100644
--- a/etherman/contracts/contracts_elderberry.go
+++ b/etherman/contracts/contracts_elderberry.go
@@ -30,16 +30,34 @@ type ContractsElderberry struct {
 }
 
 func NewContractsElderberry(cfg config.L1Config, backend bind.ContractBackend) (*ContractsElderberry, error) {
-	ger, err := NewContractMagic[GlobalExitRootElderberryType](polygonzkevmglobalexitrootv2.NewPolygonzkevmglobalexitrootv2, cfg.GlobalExitRootManagerAddr, backend, ContractNameGlobalExitRoot, VersionElderberry)
+	ger, err := NewContractMagic[GlobalExitRootElderberryType](
+		polygonzkevmglobalexitrootv2.NewPolygonzkevmglobalexitrootv2,
+		cfg.GlobalExitRootManagerAddr,
+		backend,
+		ContractNameGlobalExitRoot,
+		VersionElderberry,
+	)
 	if err != nil {
 		return nil, err
 	}
-	rollup, err := NewContractMagic[RollupElderberryType](polygonvalidiumetrog.NewPolygonvalidiumetrog, cfg.ZkEVMAddr, backend, ContractNameRollup, VersionElderberry)
+	rollup, err := NewContractMagic[RollupElderberryType](
+		polygonvalidiumetrog.NewPolygonvalidiumetrog,
+		cfg.ZkEVMAddr,
+		backend,
+		ContractNameRollup,
+		VersionElderberry,
+	)
 	if err != nil {
 		return nil, err
 	}
 
-	rollupManager, err := NewContractMagic[RollupManagerElderberryType](polygonrollupmanager.NewPolygonrollupmanager, cfg.RollupManagerAddr, backend, ContractNameRollupManager, VersionElderberry)
+	rollupManager, err := NewContractMagic[RollupManagerElderberryType](
+		polygonrollupmanager.NewPolygonrollupmanager,
+		cfg.RollupManagerAddr,
+		backend,
+		ContractNameRollupManager,
+		VersionElderberry,
+	)
 	if err != nil {
 		return nil, err
 	}
@@ -52,6 +70,6 @@ func NewContractsElderberry(cfg config.L1Config, backend bind.ContractBackend) (
 }
 
 func (c *ContractsElderberry) String() string {
-	return "RollupManager: " + c.RollupManager.String() + "\nGlobalExitRoot: " + c.GlobalExitRoot.String() + "\nRollup: " + c.Rollup.String()
-
+	return "RollupManager: " + c.RollupManager.String() + "\nGlobalExitRoot: " +
+		c.GlobalExitRoot.String() + "\nRollup: " + c.Rollup.String()
 }
diff --git a/etherman/errors.go b/etherman/errors.go
index bb7123fbb..c4fd93876 100644
--- a/etherman/errors.go
+++ b/etherman/errors.go
@@ -15,9 +15,14 @@ var (
 	//ErrInsufficientAllowance insufficient allowance
 	ErrInsufficientAllowance = errors.New("insufficient allowance")
 	// ErrBothGasPriceAndMaxFeeGasAreSpecified both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified
-	ErrBothGasPriceAndMaxFeeGasAreSpecified = errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified")
-	// ErrMaxFeeGasAreSpecifiedButLondonNotActive maxFeePerGas or maxPriorityFeePerGas specified but london is not active yet
-	ErrMaxFeeGasAreSpecifiedButLondonNotActive = errors.New("maxFeePerGas or maxPriorityFeePerGas specified but london is not active yet")
+	ErrBothGasPriceAndMaxFeeGasAreSpecified = errors.New(
+		"both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified",
+	)
+	// ErrMaxFeeGasAreSpecifiedButLondonNotActive maxFeePerGas or maxPriorityFeePerGas
+	// specified but london fork is not active yet
+	ErrMaxFeeGasAreSpecifiedButLondonNotActive = errors.New(
+		"maxFeePerGas or maxPriorityFeePerGas specified but london is not active yet",
+	)
 	// ErrNoSigner no signer to authorize the transaction with
 	ErrNoSigner = errors.New("no signer to authorize the transaction with")
 	// ErrMissingTrieNode means that a node is missing on the trie
@@ -48,5 +53,6 @@ func TryParseError(err error) (error, bool) {
 			}
 		}
 	}
+
 	return parsedError, exists
 }
diff --git a/etherman/errors_test.go b/etherman/errors_test.go
index cfc02ccc2..91ca6c500 100644
--- a/etherman/errors_test.go
+++ b/etherman/errors_test.go
@@ -19,7 +19,7 @@ func TestTryParseWithExactMatch(t *testing.T) {
 
 func TestTryParseWithContains(t *testing.T) {
 	expected := ErrTimestampMustBeInsideRange
-	smartContractErr := fmt.Errorf(" execution reverted: ProofOfEfficiency::sequenceBatches: %s", expected)
+	smartContractErr := fmt.Errorf(" execution reverted: ProofOfEfficiency::sequenceBatches: %w", expected)
 
 	actualErr, ok := TryParseError(smartContractErr)
 
diff --git a/etherman/etherman.go b/etherman/etherman.go
index 707fac5be..4f9e1c81f 100644
--- a/etherman/etherman.go
+++ b/etherman/etherman.go
@@ -71,7 +71,7 @@ type L1Config struct {
 	// PolAddr Address of the L1 Pol token Contract
 	PolAddr common.Address `json:"polTokenAddress" mapstructure:"PolAddr"`
 	// GlobalExitRootManagerAddr Address of the L1 GlobalExitRootManager contract
-	GlobalExitRootManagerAddr common.Address `json:"polygonZkEVMGlobalExitRootAddress" mapstructure:"GlobalExitRootManagerAddr"`
+	GlobalExitRootManagerAddr common.Address `json:"polygonZkEVMGlobalExitRootAddress" mapstructure:"GlobalExitRootManagerAddr"` //nolint:lll
 }
 
 // Client is a simple implementation of EtherMan.
@@ -93,11 +93,13 @@ func NewClient(cfg config.Config, l1Config config.L1Config, commonConfig cdkcomm
 	ethClient, err := ethclient.Dial(cfg.EthermanConfig.URL)
 	if err != nil {
 		log.Errorf("error connecting to %s: %+v", cfg.EthermanConfig.URL, err)
+
 		return nil, err
 	}
 	L1chainID, err := ethClient.ChainID(context.Background())
 	if err != nil {
 		log.Errorf("error getting L1chainID from %s: %+v", cfg.EthermanConfig.URL, err)
+
 		return nil, err
 	}
 	log.Infof("L1ChainID: %d", L1chainID.Uint64())
@@ -110,10 +112,14 @@ func NewClient(cfg config.Config, l1Config config.L1Config, commonConfig cdkcomm
 	rollupID, err := contracts.Banana.RollupManager.RollupAddressToID(&bind.CallOpts{Pending: false}, l1Config.ZkEVMAddr)
 	if err != nil {
 		log.Errorf("error getting rollupID from %s : %+v", contracts.Banana.RollupManager.String(), err)
+
 		return nil, err
 	}
 	if rollupID == 0 {
-		return nil, errors.New("rollupID is 0, is not a valid value. Check that rollup Address is correct " + l1Config.ZkEVMAddr.String())
+		return nil, errors.New(
+			"rollupID is 0, is not a valid value. Check that rollup Address is correct " +
+				l1Config.ZkEVMAddr.String(),
+		)
 	}
 	log.Infof("rollupID: %d (obtenied from SMC: %s )", rollupID, contracts.Banana.RollupManager.String())
 
@@ -149,7 +155,9 @@ type Order struct {
 }
 
 // WaitTxToBeMined waits for an L1 tx to be mined. It will return error if the tx is reverted or timeout is exceeded
-func (etherMan *Client) WaitTxToBeMined(ctx context.Context, tx *types.Transaction, timeout time.Duration) (bool, error) {
+func (etherMan *Client) WaitTxToBeMined(
+	ctx context.Context, tx *types.Transaction, timeout time.Duration,
+) (bool, error) {
 	// err := operations.WaitTxToBeMined(ctx, etherMan.EthClient, tx, timeout)
 	// if errors.Is(err, context.DeadlineExceeded) {
 	// 	return false, nil
@@ -167,6 +175,7 @@ func (etherMan *Client) GetSendSequenceFee(numBatches uint64) (*big.Int, error)
 		return nil, err
 	}
 	fee := new(big.Int).Mul(f, new(big.Int).SetUint64(numBatches))
+
 	return fee, nil
 }
 
@@ -188,17 +197,23 @@ func (etherMan *Client) EthBlockByNumber(ctx context.Context, blockNumber uint64
 		if errors.Is(err, ethereum.NotFound) || err.Error() == "block does not exist in blockchain" {
 			return nil, ErrNotFound
 		}
+
 		return nil, err
 	}
+
 	return block, nil
 }
 
 // GetLatestBatchNumber function allows to retrieve the latest proposed batch in the smc
 func (etherMan *Client) GetLatestBatchNumber() (uint64, error) {
-	rollupData, err := etherMan.Contracts.Banana.RollupManager.RollupIDToRollupData(&bind.CallOpts{Pending: false}, etherMan.RollupID)
+	rollupData, err := etherMan.Contracts.Banana.RollupManager.RollupIDToRollupData(
+		&bind.CallOpts{Pending: false},
+		etherMan.RollupID,
+	)
 	if err != nil {
 		return 0, err
 	}
+
 	return rollupData.LastBatchSequenced, nil
 }
 
@@ -223,6 +238,7 @@ func (etherMan *Client) getBlockNumber(ctx context.Context, blockNumber rpc.Bloc
 	if err != nil || header == nil {
 		return 0, err
 	}
+
 	return header.Number.Uint64(), nil
 }
 
@@ -232,15 +248,20 @@ func (etherMan *Client) GetLatestBlockTimestamp(ctx context.Context) (uint64, er
 	if err != nil || header == nil {
 		return 0, err
 	}
+
 	return header.Time, nil
 }
 
 // GetLatestVerifiedBatchNum gets latest verified batch from ethereum
 func (etherMan *Client) GetLatestVerifiedBatchNum() (uint64, error) {
-	rollupData, err := etherMan.Contracts.Banana.RollupManager.RollupIDToRollupData(&bind.CallOpts{Pending: false}, etherMan.RollupID)
+	rollupData, err := etherMan.Contracts.Banana.RollupManager.RollupIDToRollupData(
+		&bind.CallOpts{Pending: false},
+		etherMan.RollupID,
+	)
 	if err != nil {
 		return 0, err
 	}
+
 	return rollupData.LastVerifiedBatch, nil
 }
 
@@ -261,14 +282,19 @@ func (etherMan *Client) GetTrustedSequencerURL() (string, error) {
 
 // GetL2ChainID returns L2 Chain ID
 func (etherMan *Client) GetL2ChainID() (uint64, error) {
-	rollupData, err := etherMan.Contracts.Banana.RollupManager.RollupIDToRollupData(&bind.CallOpts{Pending: false}, etherMan.RollupID)
+	rollupData, err := etherMan.Contracts.Banana.RollupManager.RollupIDToRollupData(
+		&bind.CallOpts{Pending: false},
+		etherMan.RollupID,
+	)
 	log.Debug("chainID read from rollupManager: ", rollupData.ChainID)
 	if err != nil {
 		log.Debug("error from rollupManager: ", err)
+
 		return 0, err
 	} else if rollupData.ChainID == 0 {
 		return rollupData.ChainID, fmt.Errorf("error: chainID received is 0")
 	}
+
 	return rollupData.ChainID, nil
 }
 
@@ -283,7 +309,9 @@ func (etherMan *Client) CurrentNonce(ctx context.Context, account common.Address
 }
 
 // EstimateGas returns the estimated gas for the tx
-func (etherMan *Client) EstimateGas(ctx context.Context, from common.Address, to *common.Address, value *big.Int, data []byte) (uint64, error) {
+func (etherMan *Client) EstimateGas(
+	ctx context.Context, from common.Address, to *common.Address, value *big.Int, data []byte,
+) (uint64, error) {
 	return etherMan.EthClient.EstimateGas(ctx, ethereum.CallMsg{
 		From:  from,
 		To:    to,
@@ -305,15 +333,18 @@ func (etherMan *Client) CheckTxWasMined(ctx context.Context, txHash common.Hash)
 }
 
 // SignTx tries to sign a transaction accordingly to the provided sender
-func (etherMan *Client) SignTx(ctx context.Context, sender common.Address, tx *types.Transaction) (*types.Transaction, error) {
+func (etherMan *Client) SignTx(
+	ctx context.Context, sender common.Address, tx *types.Transaction,
+) (*types.Transaction, error) {
 	auth, err := etherMan.getAuthByAddress(sender)
-	if err == ErrNotFound {
+	if errors.Is(err, ErrNotFound) {
 		return nil, ErrPrivateKeyNotFound
 	}
 	signedTx, err := auth.Signer(auth.From, tx)
 	if err != nil {
 		return nil, err
 	}
+
 	return signedTx, nil
 }
 
@@ -342,6 +373,7 @@ func (etherMan *Client) GetRevertMessage(ctx context.Context, tx *types.Transact
 func (etherMan *Client) AddOrReplaceAuth(auth bind.TransactOpts) error {
 	log.Infof("added or replaced authorization for address: %v", auth.From.String())
 	etherMan.auth[auth.From] = auth
+
 	return nil
 }
 
@@ -354,6 +386,7 @@ func (etherMan *Client) LoadAuthFromKeyStore(path, password string) (*bind.Trans
 
 	log.Infof("loaded authorization for address: %v", auth.From.String())
 	etherMan.auth[auth.From] = auth
+
 	return &auth, pk, nil
 }
 
@@ -371,6 +404,7 @@ func newKeyFromKeystore(path, password string) (*keystore.Key, error) {
 	if err != nil {
 		return nil, err
 	}
+
 	return key, nil
 }
 
@@ -388,6 +422,7 @@ func newAuthFromKeystore(path, password string, chainID uint64) (bind.TransactOp
 	if err != nil {
 		return bind.TransactOpts{}, nil, err
 	}
+
 	return *auth, key.PrivateKey, nil
 }
 
@@ -397,6 +432,7 @@ func (etherMan *Client) getAuthByAddress(addr common.Address) (bind.TransactOpts
 	if !found {
 		return bind.TransactOpts{}, ErrNotFound
 	}
+
 	return auth, nil
 }
 
@@ -406,6 +442,7 @@ func (etherMan *Client) GetLatestBlockHeader(ctx context.Context) (*types.Header
 	if err != nil || header == nil {
 		return nil, err
 	}
+
 	return header, nil
 }
 
@@ -433,7 +470,10 @@ func (etherMan *Client) GetL1InfoRoot(indexL1InfoRoot uint32) (common.Hash, erro
 	)
 
 	if indexL1InfoRoot > 0 {
-		lastL1InfoTreeRoot, err = etherMan.Contracts.Banana.GlobalExitRoot.L1InfoRootMap(&bind.CallOpts{Pending: false}, indexL1InfoRoot)
+		lastL1InfoTreeRoot, err = etherMan.Contracts.Banana.GlobalExitRoot.L1InfoRootMap(
+			&bind.CallOpts{Pending: false},
+			indexL1InfoRoot,
+		)
 		if err != nil {
 			log.Errorf("error calling SC globalexitroot L1InfoLeafMap: %v", err)
 		}
diff --git a/hex/hex.go b/hex/hex.go
index 4699eb208..7e5b1d016 100644
--- a/hex/hex.go
+++ b/hex/hex.go
@@ -53,7 +53,7 @@ func DecodeHex(str string) ([]byte, error) {
 func MustDecodeHex(str string) []byte {
 	buf, err := DecodeHex(str)
 	if err != nil {
-		panic(fmt.Errorf("could not decode hex: %v", err))
+		panic(fmt.Errorf("could not decode hex: %w", err))
 	}
 
 	return buf
@@ -62,6 +62,7 @@ func MustDecodeHex(str string) []byte {
 // DecodeUint64 type-checks and converts a hex string to a uint64
 func DecodeUint64(str string) uint64 {
 	i := DecodeBig(str)
+
 	return i.Uint64()
 }
 
@@ -69,6 +70,7 @@ func DecodeUint64(str string) uint64 {
 func EncodeUint64(i uint64) string {
 	enc := make([]byte, 2, 10) //nolint:gomnd
 	copy(enc, "0x")
+
 	return string(strconv.AppendUint(enc, i, Base))
 }
 
@@ -117,5 +119,6 @@ func IsValid(s string) bool {
 			return false
 		}
 	}
+
 	return true
 }
diff --git a/l1bridge2infoindexsync/downloader.go b/l1bridge2infoindexsync/downloader.go
index f14fcf8e4..0a609cb9f 100644
--- a/l1bridge2infoindexsync/downloader.go
+++ b/l1bridge2infoindexsync/downloader.go
@@ -34,6 +34,7 @@ func (d *downloader) getLastFinalizedL1Block(ctx context.Context) (uint64, error
 	if err != nil {
 		return 0, err
 	}
+
 	return b.NumberU64(), nil
 }
 
@@ -50,6 +51,7 @@ func (d *downloader) getLastL1InfoIndexUntilBlock(ctx context.Context, blockNum
 	if err != nil {
 		return 0, err
 	}
+
 	return info.L1InfoTreeIndex, nil
 }
 
@@ -58,6 +60,7 @@ func (d *downloader) getMainnetExitRootAtL1InfoTreeIndex(ctx context.Context, in
 	if err != nil {
 		return common.Hash{}, err
 	}
+
 	return leaf.MainnetExitRoot, nil
 }
 
diff --git a/l1bridge2infoindexsync/driver.go b/l1bridge2infoindexsync/driver.go
index ce681bf08..d014e2c56 100644
--- a/l1bridge2infoindexsync/driver.go
+++ b/l1bridge2infoindexsync/driver.go
@@ -2,6 +2,7 @@ package l1bridge2infoindexsync
 
 import (
 	"context"
+	"errors"
 	"time"
 
 	"github.com/0xPolygon/cdk/l1infotreesync"
@@ -43,8 +44,10 @@ func (d *driver) sync(ctx context.Context) {
 			attempts++
 			log.Errorf("error getting last processed block and index: %v", err)
 			d.rh.Handle("GetLastProcessedBlockAndL1InfoTreeIndex", attempts)
+
 			continue
 		}
+
 		break
 	}
 	for {
@@ -59,13 +62,16 @@ func (d *driver) sync(ctx context.Context) {
 				attempts++
 				log.Errorf("error getting target sync block: %v", err)
 				d.rh.Handle("getTargetSynchronizationBlock", attempts)
+
 				continue
 			}
+
 			break
 		}
 		if shouldWait {
 			log.Debugf("waiting for syncers to catch up")
 			time.Sleep(d.waitForSyncersPeriod)
+
 			continue
 		}
 
@@ -75,20 +81,24 @@ func (d *driver) sync(ctx context.Context) {
 		for {
 			lastL1InfoTreeIndex, err = d.downloader.getLastL1InfoIndexUntilBlock(ctx, syncUntilBlock)
 			if err != nil {
-				if err == l1infotreesync.ErrNotFound || err == l1infotreesync.ErrBlockNotProcessed {
+				if errors.Is(err, l1infotreesync.ErrNotFound) || errors.Is(err, l1infotreesync.ErrBlockNotProcessed) {
 					log.Debugf("l1 info tree index not ready, querying until block %d: %s", syncUntilBlock, err)
+
 					break
 				}
 				attempts++
 				log.Errorf("error getting last l1 info tree index: %v", err)
 				d.rh.Handle("getLastL1InfoIndexUntilBlock", attempts)
+
 				continue
 			}
 			found = true
+
 			break
 		}
 		if !found {
 			time.Sleep(d.waitForSyncersPeriod)
+
 			continue
 		}
 
@@ -108,9 +118,11 @@ func (d *driver) sync(ctx context.Context) {
 					attempts++
 					log.Errorf("error getting relation: %v", err)
 					d.rh.Handle("getRelation", attempts)
+
 					continue
 				}
 				relations = append(relations, relation)
+
 				break
 			}
 		}
@@ -122,8 +134,10 @@ func (d *driver) sync(ctx context.Context) {
 				attempts++
 				log.Errorf("error processing block: %v", err)
 				d.rh.Handle("processUntilBlock", attempts)
+
 				continue
 			}
+
 			break
 		}
 
@@ -135,8 +149,11 @@ func (d *driver) sync(ctx context.Context) {
 	}
 }
 
-func (d *driver) getTargetSynchronizationBlock(ctx context.Context, lpbProcessor uint64) (syncUntilBlock uint64, shouldWait bool, err error) {
-	lastFinalised, err := d.downloader.getLastFinalizedL1Block(ctx) // NOTE: if this had configurable finality, it would be needed to deal with reorgs
+func (d *driver) getTargetSynchronizationBlock(
+	ctx context.Context, lpbProcessor uint64,
+) (syncUntilBlock uint64, shouldWait bool, err error) {
+	// NOTE: if this had configurable finality, it would be needed to deal with reorgs
+	lastFinalised, err := d.downloader.getLastFinalizedL1Block(ctx)
 	if err != nil {
 		return
 	}
@@ -146,8 +163,10 @@ func (d *driver) getTargetSynchronizationBlock(ctx context.Context, lpbProcessor
 				"should wait because the last processed block (%d) is greater or equal than the %s (%d)",
 				blockToCheck, blockType, lastProcessed)
 			shouldWait = true
+
 			return true
 		}
+
 		return false
 	}
 	if checkProcessedBlockFn(lpbProcessor, lastFinalised, "last finalised") {
@@ -180,6 +199,7 @@ func (d *driver) getTargetSynchronizationBlock(ctx context.Context, lpbProcessor
 		log.Debugf("target sync block is the last processed block from bridge (%d)", lpbBridge)
 		syncUntilBlock = lpbBridge
 	}
+
 	return
 }
 
diff --git a/l1bridge2infoindexsync/e2e_test.go b/l1bridge2infoindexsync/e2e_test.go
index 2aa8e38f5..e757be516 100644
--- a/l1bridge2infoindexsync/e2e_test.go
+++ b/l1bridge2infoindexsync/e2e_test.go
@@ -37,7 +37,7 @@ func newSimulatedClient(authDeployer, authCaller *bind.TransactOpts) (
 	err error,
 ) {
 	ctx := context.Background()
-	balance, _ := new(big.Int).SetString("10000000000000000000000000", 10) //nolint:gomnd
+	balance, _ := new(big.Int).SetString("10000000000000000000000000", 10)
 	genesisAlloc := map[common.Address]types.Account{
 		authDeployer.From: {
 			Balance: balance,
@@ -46,27 +46,26 @@ func newSimulatedClient(authDeployer, authCaller *bind.TransactOpts) (
 			Balance: balance,
 		},
 	}
-	blockGasLimit := uint64(999999999999999999) //nolint:gomnd
+	blockGasLimit := uint64(999999999999999999)
 	client = simulated.NewBackend(genesisAlloc, simulated.WithBlockGasLimit(blockGasLimit))
 
 	bridgeImplementationAddr, _, _, err := polygonzkevmbridgev2.DeployPolygonzkevmbridgev2(authDeployer, client.Client())
 	if err != nil {
-		return
+		return nil, common.Address{}, common.Address{}, nil, nil, err
 	}
 	client.Commit()
 
 	nonce, err := client.Client().PendingNonceAt(ctx, authDeployer.From)
 	if err != nil {
-		return
+		return nil, common.Address{}, common.Address{}, nil, nil, err
 	}
 	precalculatedAddr := crypto.CreateAddress(authDeployer.From, nonce+1)
 	bridgeABI, err := polygonzkevmbridgev2.Polygonzkevmbridgev2MetaData.GetAbi()
 	if err != nil {
-		return
+		return nil, common.Address{}, common.Address{}, nil, nil, err
 	}
 	if bridgeABI == nil {
-		err = errors.New("GetABI returned nil")
-		return
+		return nil, common.Address{}, common.Address{}, nil, nil, errors.New("GetABI returned nil")
 	}
 	dataCallProxy, err := bridgeABI.Pack("initialize",
 		uint32(0),        // networkIDMainnet
@@ -77,7 +76,7 @@ func newSimulatedClient(authDeployer, authCaller *bind.TransactOpts) (
 		[]byte{}, // gasTokenMetadata
 	)
 	if err != nil {
-		return
+		return nil, common.Address{}, common.Address{}, nil, nil, err
 	}
 	bridgeAddr, _, _, err = transparentupgradableproxy.DeployTransparentupgradableproxy(
 		authDeployer,
@@ -87,33 +86,34 @@ func newSimulatedClient(authDeployer, authCaller *bind.TransactOpts) (
 		dataCallProxy,
 	)
 	if err != nil {
-		return
+		return nil, common.Address{}, common.Address{}, nil, nil, err
 	}
 	client.Commit()
 	bridgeContract, err = polygonzkevmbridgev2.NewPolygonzkevmbridgev2(bridgeAddr, client.Client())
 	if err != nil {
-		return
+		return nil, common.Address{}, common.Address{}, nil, nil, err
 	}
 	checkGERAddr, err := bridgeContract.GlobalExitRootManager(&bind.CallOpts{})
 	if err != nil {
-		return
+		return nil, common.Address{}, common.Address{}, nil, nil, err
 	}
 	if precalculatedAddr != checkGERAddr {
-		err = errors.New("error deploying bridge")
+		return nil, common.Address{}, common.Address{}, nil, nil, errors.New("error deploying bridge")
 	}
 
 	gerAddr, _, gerContract, err = polygonzkevmglobalexitrootv2.DeployPolygonzkevmglobalexitrootv2(
 		authDeployer, client.Client(), authCaller.From, bridgeAddr,
 	)
 	if err != nil {
-		return
+		return nil, common.Address{}, common.Address{}, nil, nil, err
 	}
 	client.Commit()
 
 	if precalculatedAddr != gerAddr {
-		err = errors.New("error calculating addr")
+		return nil, common.Address{}, common.Address{}, nil, nil, errors.New("error calculating addr")
 	}
-	return
+
+	return client, gerAddr, bridgeAddr, gerContract, bridgeContract, nil
 }
 
 func TestE2E(t *testing.T) {
@@ -186,6 +186,7 @@ func TestE2E(t *testing.T) {
 
 		// Wait for block to be finalised
 		updateAtBlock, err := client.Client().BlockNumber(ctx)
+		require.NoError(t, err)
 		for {
 			lastFinalisedBlock, err := client.Client().BlockByNumber(ctx, big.NewInt(int64(rpc.FinalizedBlockNumber)))
 			require.NoError(t, err)
@@ -206,6 +207,7 @@ func TestE2E(t *testing.T) {
 			require.NoError(t, err)
 			if lpb == lb.NumberU64() {
 				syncerUpToDate = true
+
 				break
 			}
 			time.Sleep(time.Millisecond * 100)
diff --git a/l1bridge2infoindexsync/l1bridge2infoindexsync.go b/l1bridge2infoindexsync/l1bridge2infoindexsync.go
index b1c8fc551..c24bebbae 100644
--- a/l1bridge2infoindexsync/l1bridge2infoindexsync.go
+++ b/l1bridge2infoindexsync/l1bridge2infoindexsync.go
@@ -47,11 +47,16 @@ func (s *L1Bridge2InfoIndexSync) Start(ctx context.Context) {
 	s.driver.sync(ctx)
 }
 
+// GetLastProcessedBlock retrieves the last processed block number by the processor.
 func (s *L1Bridge2InfoIndexSync) GetLastProcessedBlock(ctx context.Context) (uint64, error) {
 	lpb, _, err := s.processor.GetLastProcessedBlockAndL1InfoTreeIndex(ctx)
+
 	return lpb, err
 }
 
-func (s *L1Bridge2InfoIndexSync) GetL1InfoTreeIndexByDepositCount(ctx context.Context, depositCount uint32) (uint32, error) {
+// GetL1InfoTreeIndexByDepositCount retrieves the L1 Info Tree index for a given deposit count.
+func (s *L1Bridge2InfoIndexSync) GetL1InfoTreeIndexByDepositCount(
+	ctx context.Context, depositCount uint32,
+) (uint32, error) {
 	return s.processor.getL1InfoTreeIndexByBridgeIndex(ctx, depositCount)
 }
diff --git a/l1bridge2infoindexsync/processor.go b/l1bridge2infoindexsync/processor.go
index 9b86ad9b8..bfe9f3a67 100644
--- a/l1bridge2infoindexsync/processor.go
+++ b/l1bridge2infoindexsync/processor.go
@@ -39,11 +39,13 @@ func (lp *lastProcessed) MarshalBinary() ([]byte, error) {
 }
 
 func (lp *lastProcessed) UnmarshalBinary(data []byte) error {
-	if len(data) != 12 {
-		return fmt.Errorf("expected len %d, actual len %d", 12, len(data))
+	const expectedDataLength = 12
+	if len(data) != expectedDataLength {
+		return fmt.Errorf("expected len %d, actual len %d", expectedDataLength, len(data))
 	}
 	lp.block = common.BytesToUint64(data[:8])
 	lp.index = common.BytesToUint32(data[8:])
+
 	return nil
 }
 
@@ -61,6 +63,7 @@ func newProcessor(dbPath string) (*processor, error) {
 	if err != nil {
 		return nil, err
 	}
+
 	return &processor{
 		db: db,
 	}, nil
@@ -74,6 +77,7 @@ func (p *processor) GetLastProcessedBlockAndL1InfoTreeIndex(ctx context.Context)
 		return 0, 0, err
 	}
 	defer tx.Rollback()
+
 	return p.getLastProcessedBlockAndL1InfoTreeIndexWithTx(tx)
 }
 
@@ -87,19 +91,24 @@ func (p *processor) getLastProcessedBlockAndL1InfoTreeIndexWithTx(tx kv.Tx) (uin
 		if err := lp.UnmarshalBinary(lastProcessedBytes); err != nil {
 			return 0, 0, err
 		}
+
 		return lp.block, lp.index, nil
 	}
 }
 
-func (p *processor) updateLastProcessedBlockAndL1InfoTreeIndex(ctx context.Context, blockNum uint64, index uint32) error {
+func (p *processor) updateLastProcessedBlockAndL1InfoTreeIndex(
+	ctx context.Context, blockNum uint64, index uint32,
+) error {
 	tx, err := p.db.BeginRw(ctx)
 	if err != nil {
 		return err
 	}
 	if err := p.updateLastProcessedBlockAndL1InfoTreeIndexWithTx(tx, blockNum, index); err != nil {
 		tx.Rollback()
+
 		return err
 	}
+
 	return tx.Commit()
 }
 
@@ -112,10 +121,13 @@ func (p *processor) updateLastProcessedBlockAndL1InfoTreeIndexWithTx(tx kv.RwTx,
 	if err != nil {
 		return err
 	}
+
 	return tx.Put(lastProcessedTable, lastProcessedKey, value)
 }
 
-func (p *processor) processUntilBlock(ctx context.Context, lastProcessedBlock uint64, relations []bridge2L1InfoRelation) error {
+func (p *processor) processUntilBlock(
+	ctx context.Context, lastProcessedBlock uint64, relations []bridge2L1InfoRelation,
+) error {
 	tx, err := p.db.BeginRw(ctx)
 	if err != nil {
 		return err
@@ -125,6 +137,7 @@ func (p *processor) processUntilBlock(ctx context.Context, lastProcessedBlock ui
 		_, lastIndex, err := p.getLastProcessedBlockAndL1InfoTreeIndexWithTx(tx)
 		if err != nil {
 			tx.Rollback()
+
 			return err
 		}
 		if err := p.updateLastProcessedBlockAndL1InfoTreeIndexWithTx(
@@ -133,13 +146,15 @@ func (p *processor) processUntilBlock(ctx context.Context, lastProcessedBlock ui
 			lastIndex,
 		); err != nil {
 			tx.Rollback()
+
 			return err
 		}
+
 		return tx.Commit()
 	}
 
 	for _, relation := range relations {
-		if _, err := p.getL1InfoTreeIndexByBridgeIndexWithTx(tx, relation.bridgeIndex); err != ErrNotFound {
+		if _, err := p.getL1InfoTreeIndexByBridgeIndexWithTx(tx, relation.bridgeIndex); !errors.Is(err, ErrNotFound) {
 			// Note that indexes could be repeated as the L1 Info tree update can be produced by a rollup and not mainnet.
 			// Hence if the index already exist, do not update as it's better to have the lowest index possible for the relation
 			continue
@@ -150,6 +165,7 @@ func (p *processor) processUntilBlock(ctx context.Context, lastProcessedBlock ui
 			common.Uint32ToBytes(relation.l1InfoTreeIndex),
 		); err != nil {
 			tx.Rollback()
+
 			return err
 		}
 	}
@@ -160,6 +176,7 @@ func (p *processor) processUntilBlock(ctx context.Context, lastProcessedBlock ui
 		relations[len(relations)-1].l1InfoTreeIndex,
 	); err != nil {
 		tx.Rollback()
+
 		return err
 	}
 
@@ -184,5 +201,6 @@ func (p *processor) getL1InfoTreeIndexByBridgeIndexWithTx(tx kv.Tx, depositCount
 	if indexBytes == nil {
 		return 0, ErrNotFound
 	}
+
 	return common.BytesToUint32(indexBytes), nil
 }
diff --git a/l1infotree/hash.go b/l1infotree/hash.go
index b07c3f105..120ba6c63 100644
--- a/l1infotree/hash.go
+++ b/l1infotree/hash.go
@@ -13,9 +13,10 @@ func Hash(data ...[32]byte) [32]byte {
 	var res [32]byte
 	hash := sha3.NewLegacyKeccak256()
 	for _, d := range data {
-		hash.Write(d[:]) //nolint:errcheck,gosec
+		hash.Write(d[:])
 	}
 	copy(res[:], hash.Sum(nil))
+
 	return res
 }
 
@@ -23,11 +24,13 @@ func generateZeroHashes(height uint8) [][32]byte {
 	var zeroHashes = [][32]byte{
 		common.Hash{},
 	}
-	// This generates a leaf = HashZero in position 0. In the rest of the positions that are equivalent to the ascending levels,
-	// we set the hashes of the nodes. So all nodes from level i=5 will have the same value and same children nodes.
+	// This generates a leaf = HashZero in position 0. In the rest of the positions that
+	// are equivalent to the ascending levels, we set the hashes of the nodes.
+	// So all nodes from level i=5 will have the same value and same children nodes.
 	for i := 1; i <= int(height); i++ {
 		zeroHashes = append(zeroHashes, Hash(zeroHashes[i-1], zeroHashes[i-1]))
 	}
+
 	return zeroHashes
 }
 
@@ -37,5 +40,6 @@ func HashLeafData(ger, prevBlockHash common.Hash, minTimestamp uint64) [32]byte
 	t := make([]byte, 8) //nolint:gomnd
 	binary.BigEndian.PutUint64(t, minTimestamp)
 	copy(res[:], keccak256.Hash(ger.Bytes(), prevBlockHash.Bytes(), t))
+
 	return res
 }
diff --git a/l1infotree/tree.go b/l1infotree/tree.go
index 6a5ad3bc8..6f6b74062 100644
--- a/l1infotree/tree.go
+++ b/l1infotree/tree.go
@@ -27,29 +27,34 @@ func NewL1InfoTree(height uint8, initialLeaves [][32]byte) (*L1InfoTree, error)
 	mt.siblings, mt.currentRoot, err = mt.initSiblings(initialLeaves)
 	if err != nil {
 		log.Error("error initializing siblings. Error: ", err)
+
 		return nil, err
 	}
 	log.Debug("Initial count: ", mt.count)
 	log.Debug("Initial root: ", mt.currentRoot)
+
 	return mt, nil
 }
 
 // ResetL1InfoTree resets the L1InfoTree.
 func (mt *L1InfoTree) ResetL1InfoTree(initialLeaves [][32]byte) (*L1InfoTree, error) {
+	const defaultTreeHeight = 32
 	log.Info("Resetting L1InfoTree...")
 	newMT := &L1InfoTree{
-		zeroHashes: generateZeroHashes(32), // nolint:gomnd
-		height:     32,                     // nolint:gomnd
+		zeroHashes: generateZeroHashes(defaultTreeHeight),
+		height:     defaultTreeHeight,
 		count:      uint32(len(initialLeaves)),
 	}
 	var err error
 	newMT.siblings, newMT.currentRoot, err = newMT.initSiblings(initialLeaves)
 	if err != nil {
 		log.Error("error initializing siblings. Error: ", err)
+
 		return nil, err
 	}
 	log.Debug("Reset initial count: ", newMT.count)
 	log.Debug("Reset initial root: ", newMT.currentRoot)
+
 	return newMT, nil
 }
 
@@ -59,11 +64,12 @@ func buildIntermediate(leaves [][32]byte) ([][][]byte, [][32]byte) {
 		hashes [][32]byte
 	)
 	for i := 0; i < len(leaves); i += 2 {
-		var left, right int = i, i + 1
+		var left, right = i, i + 1
 		hash := Hash(leaves[left], leaves[right])
 		nodes = append(nodes, [][]byte{hash[:], leaves[left][:], leaves[right][:]})
 		hashes = append(hashes, hash)
 	}
+
 	return nodes, hashes
 }
 
@@ -117,7 +123,7 @@ func (mt *L1InfoTree) ComputeMerkleProof(gerIndex uint32, leaves [][32]byte) ([]
 			hashes [][32]byte
 		)
 		for i := 0; i < len(leaves); i += 2 {
-			var left, right int = i, i + 1
+			var left, right = i, i + 1
 			hash := Hash(leaves[left], leaves[right])
 			nsi = append(nsi, [][]byte{hash[:], leaves[left][:], leaves[right][:]})
 			hashes = append(hashes, hash)
@@ -165,6 +171,7 @@ func (mt *L1InfoTree) AddLeaf(index uint32, leaf [32]byte) (common.Hash, error)
 	}
 	mt.currentRoot = cur
 	mt.count++
+
 	return cur, nil
 }
 
@@ -184,8 +191,10 @@ func (mt *L1InfoTree) initSiblings(initialLeaves [][32]byte) ([][32]byte, common
 		root, err := mt.BuildL1InfoRoot(initialLeaves)
 		if err != nil {
 			log.Error("error calculating initial root: ", err)
+
 			return nil, [32]byte{}, err
 		}
+
 		return siblings, root, nil
 	}
 
diff --git a/l1infotreesync/config.go b/l1infotreesync/config.go
index 1b1d80143..64318fae4 100644
--- a/l1infotreesync/config.go
+++ b/l1infotreesync/config.go
@@ -11,7 +11,7 @@ type Config struct {
 	RollupManagerAddr  common.Address `mapstructure:"RollupManagerAddr"`
 	SyncBlockChunkSize uint64         `mapstructure:"SyncBlockChunkSize"`
 	// BlockFinality indicates the status of the blocks that will be queried in order to sync
-	BlockFinality              string         `jsonschema:"enum=LatestBlock, enum=SafeBlock, enum=PendingBlock, enum=FinalizedBlock, enum=EarliestBlock" mapstructure:"BlockFinality"`
+	BlockFinality              string         `jsonschema:"enum=LatestBlock, enum=SafeBlock, enum=PendingBlock, enum=FinalizedBlock, enum=EarliestBlock" mapstructure:"BlockFinality"` //nolint:lll
 	URLRPCL1                   string         `mapstructure:"URLRPCL1"`
 	WaitForNewBlocksPeriod     types.Duration `mapstructure:"WaitForNewBlocksPeriod"`
 	InitialBlock               uint64         `mapstructure:"InitialBlock"`
diff --git a/l1infotreesync/downloader.go b/l1infotreesync/downloader.go
index bc4305bce..060fa6aeb 100644
--- a/l1infotreesync/downloader.go
+++ b/l1infotreesync/downloader.go
@@ -15,11 +15,15 @@ import (
 )
 
 var (
-	updateL1InfoTreeSignatureV1             = crypto.Keccak256Hash([]byte("UpdateL1InfoTree(bytes32,bytes32)"))
-	updateL1InfoTreeSignatureV2             = crypto.Keccak256Hash([]byte("UpdateL1InfoTreeV2(bytes32,uint32,uint256,uint64)"))
-	verifyBatchesSignature                  = crypto.Keccak256Hash([]byte("VerifyBatches(uint32,uint64,bytes32,bytes32,address)"))
-	verifyBatchesTrustedAggregatorSignature = crypto.Keccak256Hash([]byte("VerifyBatchesTrustedAggregator(uint32,uint64,bytes32,bytes32,address)"))
-	initL1InfoRootMapSignature              = crypto.Keccak256Hash([]byte("InitL1InfoRootMap(uint32,bytes32)"))
+	updateL1InfoTreeSignatureV1 = crypto.Keccak256Hash([]byte("UpdateL1InfoTree(bytes32,bytes32)"))
+	updateL1InfoTreeSignatureV2 = crypto.Keccak256Hash([]byte("UpdateL1InfoTreeV2(bytes32,uint32,uint256,uint64)"))
+	verifyBatchesSignature      = crypto.Keccak256Hash(
+		[]byte("VerifyBatches(uint32,uint64,bytes32,bytes32,address)"),
+	)
+	verifyBatchesTrustedAggregatorSignature = crypto.Keccak256Hash(
+		[]byte("VerifyBatchesTrustedAggregator(uint32,uint64,bytes32,bytes32,address)"),
+	)
+	initL1InfoRootMapSignature = crypto.Keccak256Hash([]byte("InitL1InfoRootMap(uint32,bytes32)"))
 )
 
 type EthClienter interface {
@@ -43,7 +47,7 @@ func buildAppender(client EthClienter, globalExitRoot, rollupManager common.Addr
 		init, err := ger.ParseInitL1InfoRootMap(l)
 		if err != nil {
 			return fmt.Errorf(
-				"error parsing log %+v using ger.ParseInitL1InfoRootMap: %v",
+				"error parsing log %+v using ger.ParseInitL1InfoRootMap: %w",
 				l, err,
 			)
 		}
@@ -51,13 +55,14 @@ func buildAppender(client EthClienter, globalExitRoot, rollupManager common.Addr
 			LeafCount:         init.LeafCount,
 			CurrentL1InfoRoot: init.CurrentL1InfoRoot,
 		}})
+
 		return nil
 	}
 	appender[updateL1InfoTreeSignatureV1] = func(b *sync.EVMBlock, l types.Log) error {
 		l1InfoTreeUpdate, err := ger.ParseUpdateL1InfoTree(l)
 		if err != nil {
 			return fmt.Errorf(
-				"error parsing log %+v using ger.ParseUpdateL1InfoTree: %v",
+				"error parsing log %+v using ger.ParseUpdateL1InfoTree: %w",
 				l, err,
 			)
 		}
@@ -67,26 +72,28 @@ func buildAppender(client EthClienter, globalExitRoot, rollupManager common.Addr
 			ParentHash:      b.ParentHash,
 			Timestamp:       b.Timestamp,
 		}})
+
 		return nil
 	}
 
 	// TODO: integrate this event to perform sanity checks
-	appender[updateL1InfoTreeSignatureV2] = func(b *sync.EVMBlock, l types.Log) error {
+	appender[updateL1InfoTreeSignatureV2] = func(b *sync.EVMBlock, l types.Log) error { //nolint:unparam
 		l1InfoTreeUpdate, err := ger.ParseUpdateL1InfoTreeV2(l)
 		if err != nil {
 			return fmt.Errorf(
-				"error parsing log %+v using ger.ParseUpdateL1InfoTreeV2: %v",
+				"error parsing log %+v using ger.ParseUpdateL1InfoTreeV2: %w",
 				l, err,
 			)
 		}
 		log.Infof("updateL1InfoTreeSignatureV2: expected root: %s", common.Bytes2Hex(l1InfoTreeUpdate.CurrentL1InfoRoot[:]))
+
 		return nil
 	}
 	appender[verifyBatchesSignature] = func(b *sync.EVMBlock, l types.Log) error {
 		verifyBatches, err := rm.ParseVerifyBatches(l)
 		if err != nil {
 			return fmt.Errorf(
-				"error parsing log %+v using rm.ParseVerifyBatches: %v",
+				"error parsing log %+v using rm.ParseVerifyBatches: %w",
 				l, err,
 			)
 		}
@@ -97,13 +104,14 @@ func buildAppender(client EthClienter, globalExitRoot, rollupManager common.Addr
 			ExitRoot:   verifyBatches.ExitRoot,
 			Aggregator: verifyBatches.Aggregator,
 		}})
+
 		return nil
 	}
 	appender[verifyBatchesTrustedAggregatorSignature] = func(b *sync.EVMBlock, l types.Log) error {
 		verifyBatches, err := rm.ParseVerifyBatchesTrustedAggregator(l)
 		if err != nil {
 			return fmt.Errorf(
-				"error parsing log %+v using rm.ParseVerifyBatches: %v",
+				"error parsing log %+v using rm.ParseVerifyBatches: %w",
 				l, err,
 			)
 		}
@@ -114,6 +122,7 @@ func buildAppender(client EthClienter, globalExitRoot, rollupManager common.Addr
 			ExitRoot:   verifyBatches.ExitRoot,
 			Aggregator: verifyBatches.Aggregator,
 		}})
+
 		return nil
 	}
 
diff --git a/l1infotreesync/e2e_test.go b/l1infotreesync/e2e_test.go
index 562f0e390..e0437d662 100644
--- a/l1infotreesync/e2e_test.go
+++ b/l1infotreesync/e2e_test.go
@@ -33,14 +33,14 @@ func newSimulatedClient(auth *bind.TransactOpts) (
 	err error,
 ) {
 	ctx := context.Background()
-	balance, _ := new(big.Int).SetString("10000000000000000000000000", 10) //nolint:gomnd
+	balance, _ := new(big.Int).SetString("10000000000000000000000000", 10)
 	address := auth.From
 	genesisAlloc := map[common.Address]types.Account{
 		address: {
 			Balance: balance,
 		},
 	}
-	blockGasLimit := uint64(999999999999999999) //nolint:gomnd
+	blockGasLimit := uint64(999999999999999999)
 	client = simulated.NewBackend(genesisAlloc, simulated.WithBlockGasLimit(blockGasLimit))
 
 	nonce, err := client.Client().PendingNonceAt(ctx, auth.From)
@@ -63,6 +63,7 @@ func newSimulatedClient(auth *bind.TransactOpts) (
 	if precalculatedAddr != gerAddr {
 		err = errors.New("error calculating addr")
 	}
+
 	return
 }
 
@@ -139,7 +140,7 @@ func TestFinalised(t *testing.T) {
 	require.NoError(t, err)
 	auth, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(1337))
 	require.NoError(t, err)
-	client, _, _, _, _, err := newSimulatedClient(auth)
+	client, _, _, _, _, err := newSimulatedClient(auth) //nolint:dogsled
 	require.NoError(t, err)
 	for i := 0; i < 100; i++ {
 		client.Commit()
@@ -215,7 +216,8 @@ func TestStressAndReorgs(t *testing.T) {
 			if targetReorgBlockNum < currentBlockNum { // we are dealing with uints...
 				reorgBlock, err := client.Client().BlockByNumber(ctx, big.NewInt(int64(targetReorgBlockNum)))
 				require.NoError(t, err)
-				client.Fork(reorgBlock.Hash())
+				err = client.Fork(reorgBlock.Hash())
+				require.NoError(t, err)
 			}
 		}
 	}
@@ -229,6 +231,7 @@ func TestStressAndReorgs(t *testing.T) {
 		require.NoError(t, err)
 		if lpb == lb {
 			syncerUpToDate = true
+
 			break
 		}
 		time.Sleep(time.Millisecond * 100)
diff --git a/l1infotreesync/l1infotreesync.go b/l1infotreesync/l1infotreesync.go
index 8cd3ee70c..69868b559 100644
--- a/l1infotreesync/l1infotreesync.go
+++ b/l1infotreesync/l1infotreesync.go
@@ -80,6 +80,7 @@ func New(
 	if err != nil {
 		return nil, err
 	}
+
 	return &L1InfoTreeSync{
 		processor: processor,
 		driver:    driver,
@@ -92,15 +93,20 @@ func (s *L1InfoTreeSync) Start(ctx context.Context) {
 }
 
 // GetL1InfoTreeMerkleProof creates a merkle proof for the L1 Info tree
-func (s *L1InfoTreeSync) GetL1InfoTreeMerkleProof(ctx context.Context, index uint32) ([32]common.Hash, common.Hash, error) {
+func (s *L1InfoTreeSync) GetL1InfoTreeMerkleProof(
+	ctx context.Context, index uint32,
+) ([32]common.Hash, common.Hash, error) {
 	return s.processor.GetL1InfoTreeMerkleProof(ctx, index)
 }
 
 // GetRollupExitTreeMerkleProof creates a merkle proof for the rollup exit tree
-func (s *L1InfoTreeSync) GetRollupExitTreeMerkleProof(ctx context.Context, networkID uint32, root common.Hash) ([32]common.Hash, error) {
+func (s *L1InfoTreeSync) GetRollupExitTreeMerkleProof(
+	ctx context.Context, networkID uint32, root common.Hash,
+) ([32]common.Hash, error) {
 	if networkID == 0 {
 		return tree.EmptyProof, nil
 	}
+
 	return s.processor.rollupExitTree.GetProof(ctx, networkID-1, root)
 }
 
@@ -141,9 +147,12 @@ func (s *L1InfoTreeSync) GetLastProcessedBlock(ctx context.Context) (uint64, err
 	return s.processor.GetLastProcessedBlock(ctx)
 }
 
-func (s *L1InfoTreeSync) GetLocalExitRoot(ctx context.Context, networkID uint32, rollupExitRoot common.Hash) (common.Hash, error) {
+func (s *L1InfoTreeSync) GetLocalExitRoot(
+	ctx context.Context, networkID uint32, rollupExitRoot common.Hash,
+) (common.Hash, error) {
 	if networkID == 0 {
 		return common.Hash{}, errors.New("network 0 is not a rollup, and it's not part of the rollup exit tree")
 	}
+
 	return s.processor.rollupExitTree.GetLeaf(ctx, networkID-1, rollupExitRoot)
 }
diff --git a/l1infotreesync/processor.go b/l1infotreesync/processor.go
index 286ee4fc3..a6fa28a17 100644
--- a/l1infotreesync/processor.go
+++ b/l1infotreesync/processor.go
@@ -103,6 +103,7 @@ func (l *storeLeaf) Hash() ethCommon.Hash {
 	t := make([]byte, 8) //nolint:gomnd
 	binary.BigEndian.PutUint64(t, l.Timestamp)
 	copy(res[:], keccak256.Hash(l.GlobalExitRoot().Bytes(), l.ParentHash.Bytes(), t))
+
 	return res
 }
 
@@ -120,6 +121,7 @@ func (l *storeLeaf) GlobalExitRoot() ethCommon.Hash {
 	hasher.Write(l.MainnetExitRoot[:])
 	hasher.Write(l.RollupExitRoot[:])
 	copy(gerBytes[:], hasher.Sum(nil))
+
 	return gerBytes
 }
 
@@ -132,6 +134,7 @@ func newProcessor(ctx context.Context, dbPath string) (*processor, error) {
 		}
 		tree.AddTables(cfg, dbPrefix+rollupExitTreeSuffix)
 		tree.AddTables(cfg, dbPrefix+l1InfoTreeSuffix)
+
 		return cfg
 	}
 	db, err := mdbx.NewMDBX(nil).
@@ -155,11 +158,14 @@ func newProcessor(ctx context.Context, dbPath string) (*processor, error) {
 		return nil, err
 	}
 	p.rollupExitTree = rollupExitTree
+
 	return p, nil
 }
 
 // GetL1InfoTreeMerkleProof creates a merkle proof for the L1 Info tree
-func (p *processor) GetL1InfoTreeMerkleProof(ctx context.Context, index uint32) ([32]ethCommon.Hash, ethCommon.Hash, error) {
+func (p *processor) GetL1InfoTreeMerkleProof(
+	ctx context.Context, index uint32,
+) ([32]ethCommon.Hash, ethCommon.Hash, error) {
 	tx, err := p.db.BeginRo(ctx)
 	if err != nil {
 		return tree.EmptyProof, ethCommon.Hash{}, err
@@ -215,6 +221,7 @@ func (p *processor) GetLatestInfoUntilBlock(ctx context.Context, blockNum uint64
 	if err := json.Unmarshal(v, &blk); err != nil {
 		return nil, err
 	}
+
 	return p.getInfoByIndexWithTx(tx, blk.LastIndex-1)
 }
 
@@ -225,6 +232,7 @@ func (p *processor) GetInfoByIndex(ctx context.Context, index uint32) (*L1InfoTr
 		return nil, err
 	}
 	defer tx.Rollback()
+
 	return p.getInfoByIndexWithTx(tx, index)
 }
 
@@ -240,6 +248,7 @@ func (p *processor) getInfoByIndexWithTx(tx kv.Tx, index uint32) (*L1InfoTreeLea
 	if err := json.Unmarshal(infoBytes, &info); err != nil {
 		return nil, err
 	}
+
 	return &L1InfoTreeLeaf{
 		L1InfoTreeIndex:   info.Index,
 		PreviousBlockHash: info.ParentHash,
@@ -258,6 +267,7 @@ func (p *processor) GetLastProcessedBlock(ctx context.Context) (uint64, error) {
 		return 0, err
 	}
 	defer tx.Rollback()
+
 	return p.getLastProcessedBlockWithTx(tx)
 }
 
@@ -268,6 +278,7 @@ func (p *processor) getLastProcessedBlockWithTx(tx kv.Tx) (uint64, error) {
 	} else if blockNumBytes == nil {
 		return 0, nil
 	}
+
 	return common.BytesToUint64(blockNumBytes), nil
 }
 
@@ -290,11 +301,13 @@ func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error {
 	for blkKey, blkValue, err := c.Seek(firstKey); blkKey != nil; blkKey, blkValue, err = c.Next() {
 		if err != nil {
 			tx.Rollback()
+
 			return err
 		}
 		var blk blockWithLeafs
 		if err := json.Unmarshal(blkValue, &blk); err != nil {
 			tx.Rollback()
+
 			return err
 		}
 		for i := blk.FirstIndex; i < blk.LastIndex; i++ {
@@ -303,16 +316,19 @@ func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error {
 			}
 			if err := p.deleteLeaf(tx, i); err != nil {
 				tx.Rollback()
+
 				return err
 			}
 		}
 		if err := tx.Delete(blockTable, blkKey); err != nil {
 			tx.Rollback()
+
 			return err
 		}
 	}
 	if err := p.updateLastProcessedBlock(tx, firstReorgedBlock-1); err != nil {
 		tx.Rollback()
+
 		return err
 	}
 	var rollbackL1InfoTree func()
@@ -321,13 +337,16 @@ func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error {
 		if err != nil {
 			tx.Rollback()
 			rollbackL1InfoTree()
+
 			return err
 		}
 	}
 	if err := tx.Commit(); err != nil {
 		rollbackL1InfoTree()
+
 		return err
 	}
+
 	return nil
 }
 
@@ -335,6 +354,7 @@ func (p *processor) deleteLeaf(tx kv.RwTx, index uint32) error {
 	if err := tx.Delete(infoTable, common.Uint32ToBytes(index)); err != nil {
 		return err
 	}
+
 	return nil
 }
 
@@ -345,7 +365,7 @@ func (p *processor) ProcessBlock(ctx context.Context, b sync.Block) error {
 	if err != nil {
 		return err
 	}
-	events := make([]Event, len(b.Events))
+	events := make([]Event, 0, len(b.Events))
 	rollupExitTreeRollback := func() {}
 	l1InfoTreeRollback := func() {}
 	rollback := func() {
@@ -359,16 +379,20 @@ func (p *processor) ProcessBlock(ctx context.Context, b sync.Block) error {
 		var initialL1InfoIndex uint32
 		var l1InfoLeavesAdded uint32
 		lastIndex, err := p.getLastIndex(tx)
-		if err == ErrNotFound {
+		if errors.Is(err, ErrNotFound) {
 			initialL1InfoIndex = 0
 		} else if err != nil {
 			rollback()
+
 			return err
 		} else {
 			initialL1InfoIndex = lastIndex + 1
 		}
 		for _, e := range b.Events {
-			event := e.(Event)
+			event, ok := e.(Event)
+			if !ok {
+				log.Errorf("unexpected type %T in events", e)
+			}
 			events = append(events, event)
 			if event.UpdateL1InfoTree != nil {
 				index := initialL1InfoIndex + l1InfoLeavesAdded
@@ -382,6 +406,7 @@ func (p *processor) ProcessBlock(ctx context.Context, b sync.Block) error {
 				}
 				if err := p.storeLeafInfo(tx, leafToStore); err != nil {
 					rollback()
+
 					return err
 				}
 				l1InfoTreeLeavesToAdd = append(l1InfoTreeLeavesToAdd, tree.Leaf{
@@ -412,15 +437,18 @@ func (p *processor) ProcessBlock(ctx context.Context, b sync.Block) error {
 			blockValue, err := json.Marshal(bwl)
 			if err != nil {
 				rollback()
+
 				return err
 			}
 			if err := tx.Put(blockTable, common.Uint64ToBytes(b.Num), blockValue); err != nil {
 				rollback()
+
 				return err
 			}
 			l1InfoTreeRollback, err = p.l1InfoTree.AddLeaves(tx, l1InfoTreeLeavesToAdd)
 			if err != nil {
 				rollback()
+
 				return err
 			}
 		}
@@ -429,20 +457,24 @@ func (p *processor) ProcessBlock(ctx context.Context, b sync.Block) error {
 			rollupExitTreeRollback, err = p.rollupExitTree.UpseartLeaves(tx, rollupExitTreeLeavesToAdd, b.Num)
 			if err != nil {
 				rollback()
+
 				return err
 			}
 		}
 	}
 	if err := p.updateLastProcessedBlock(tx, b.Num); err != nil {
 		rollback()
+
 		return err
 	}
 
 	if err := tx.Commit(); err != nil {
 		rollback()
+
 		return err
 	}
 	log.Infof("block %d processed with events: %+v", b.Num, events)
+
 	return nil
 }
 
@@ -469,6 +501,7 @@ func (p *processor) getLastIndex(tx kv.Tx) (uint32, error) {
 	if err := json.Unmarshal(blkBytes, &blk); err != nil {
 		return 0, err
 	}
+
 	return blk.LastIndex - 1, nil
 }
 
@@ -477,10 +510,12 @@ func (p *processor) storeLeafInfo(tx kv.RwTx, leaf storeLeaf) error {
 	if err != nil {
 		return err
 	}
+
 	return tx.Put(infoTable, common.Uint32ToBytes(leaf.Index), leafValue)
 }
 
 func (p *processor) updateLastProcessedBlock(tx kv.RwTx, blockNum uint64) error {
 	blockNumBytes := common.Uint64ToBytes(blockNum)
+
 	return tx.Put(lastBlockTable, lastBlockKey, blockNumBytes)
 }
diff --git a/lastgersync/config.go b/lastgersync/config.go
index 9db63bec6..36b12ab60 100644
--- a/lastgersync/config.go
+++ b/lastgersync/config.go
@@ -9,7 +9,7 @@ type Config struct {
 	// DBPath path of the DB
 	DBPath string `mapstructure:"DBPath"`
 	// BlockFinality indicates the status of the blocks that will be queried in order to sync
-	BlockFinality string `jsonschema:"enum=LatestBlock, enum=SafeBlock, enum=PendingBlock, enum=FinalizedBlock, enum=EarliestBlock" mapstructure:"BlockFinality"`
+	BlockFinality string `jsonschema:"enum=LatestBlock, enum=SafeBlock, enum=PendingBlock, enum=FinalizedBlock, enum=EarliestBlock" mapstructure:"BlockFinality"` //nolint:lll
 	// InitialBlockNum is the first block that will be queried when starting the synchronization from scratch.
 	// It should be a number equal or bellow the creation of the bridge contract
 	InitialBlockNum uint64 `mapstructure:"InitialBlockNum"`
@@ -22,6 +22,7 @@ type Config struct {
 	MaxRetryAttemptsAfterError int `mapstructure:"MaxRetryAttemptsAfterError"`
 	// WaitForNewBlocksPeriod time that will be waited when the synchronizer has reached the latest block
 	WaitForNewBlocksPeriod types.Duration `mapstructure:"WaitForNewBlocksPeriod"`
-	// DownloadBufferSize buffer of events to be porcessed. When reached will stop downloading events until the processing catches up
+	// DownloadBufferSize buffer of events to be porcessed. When the buffer limit is reached,
+	// downloading will stop until the processing catches up.
 	DownloadBufferSize int `mapstructure:"DownloadBufferSize"`
 }
diff --git a/lastgersync/e2e_test.go b/lastgersync/e2e_test.go
index 888f4b84d..111ffa07d 100644
--- a/lastgersync/e2e_test.go
+++ b/lastgersync/e2e_test.go
@@ -57,6 +57,7 @@ func TestE2E(t *testing.T) {
 			require.NoError(t, err)
 			if lpb == lb {
 				syncerUpToDate = true
+
 				break
 			}
 			time.Sleep(time.Millisecond * 100)
diff --git a/lastgersync/evmdownloader.go b/lastgersync/evmdownloader.go
index 717eb0957..10a8c4f26 100644
--- a/lastgersync/evmdownloader.go
+++ b/lastgersync/evmdownloader.go
@@ -2,6 +2,7 @@ package lastgersync
 
 import (
 	"context"
+	"errors"
 	"fmt"
 	"math/big"
 	"time"
@@ -45,6 +46,7 @@ func newDownloader(
 	if err != nil {
 		return nil, err
 	}
+
 	return &downloader{
 		EVMDownloaderImplementation: sync.NewEVMDownloaderImplementation(
 			"lastgersync", l2Client, blockFinality, waitForNewBlocksPeriod, nil, nil, nil, rh,
@@ -65,14 +67,16 @@ func (d *downloader) Download(ctx context.Context, fromBlock uint64, downloadedC
 	)
 	for {
 		lastIndex, err = d.processor.getLastIndex(ctx)
-		if err == ErrNotFound {
+		if errors.Is(err, ErrNotFound) {
 			lastIndex = 0
 		} else if err != nil {
 			log.Errorf("error getting last indes: %v", err)
 			attempts++
 			d.rh.Handle("getLastIndex", attempts)
+
 			continue
 		}
+
 		break
 	}
 	for {
@@ -80,6 +84,7 @@ func (d *downloader) Download(ctx context.Context, fromBlock uint64, downloadedC
 		case <-ctx.Done():
 			log.Debug("closing channel")
 			close(downloadedCh)
+
 			return
 		default:
 		}
@@ -93,12 +98,13 @@ func (d *downloader) Download(ctx context.Context, fromBlock uint64, downloadedC
 				log.Errorf("error getting GERs: %v", err)
 				attempts++
 				d.rh.Handle("getGERsFromIndex", attempts)
+
 				continue
 			}
+
 			break
 		}
 
-		attempts = 0
 		blockHeader := d.GetBlockHeader(ctx, lastBlock)
 		block := &sync.EVMBlock{
 			EVMBlockHeader: sync.EVMBlockHeader{
@@ -111,26 +117,30 @@ func (d *downloader) Download(ctx context.Context, fromBlock uint64, downloadedC
 		d.setGreatestGERInjectedFromList(block, gers)
 
 		downloadedCh <- *block
-		if block.Events != nil {
-			lastIndex = block.Events[0].(Event).L1InfoTreeIndex
+		if len(block.Events) > 0 {
+			event, ok := block.Events[0].(Event)
+			if !ok {
+				log.Errorf("unexpected type %T in events", block.Events[0])
+			}
+			lastIndex = event.L1InfoTreeIndex
 		}
 	}
 }
 
 func (d *downloader) getGERsFromIndex(ctx context.Context, fromL1InfoTreeIndex uint32) ([]Event, error) {
 	lastIndex, _, err := d.l1InfoTreesync.GetLastL1InfoTreeRootAndIndex(ctx)
-	if err == tree.ErrNotFound {
+	if errors.Is(err, tree.ErrNotFound) {
 		return nil, nil
 	}
 	if err != nil {
-		return nil, fmt.Errorf("error calling GetLastL1InfoTreeRootAndIndex: %v", err)
+		return nil, fmt.Errorf("error calling GetLastL1InfoTreeRootAndIndex: %w", err)
 	}
 
 	gers := []Event{}
 	for i := fromL1InfoTreeIndex; i <= lastIndex; i++ {
 		info, err := d.l1InfoTreesync.GetInfoByIndex(ctx, i)
 		if err != nil {
-			return nil, fmt.Errorf("error calling GetInfoByIndex: %v", err)
+			return nil, fmt.Errorf("error calling GetInfoByIndex: %w", err)
 		}
 		gers = append(gers, Event{
 			L1InfoTreeIndex: i,
@@ -155,11 +165,13 @@ func (d *downloader) setGreatestGERInjectedFromList(b *sync.EVMBlock, list []Eve
 					event.GlobalExitRoot.Hex(), err,
 				)
 				d.rh.Handle("GlobalExitRootMap", attempts)
+
 				continue
 			}
 			if timestamp.Cmp(big.NewInt(0)) == 1 {
 				b.Events = []interface{}{event}
 			}
+
 			break
 		}
 	}
diff --git a/lastgersync/lastgersync.go b/lastgersync/lastgersync.go
index 2d7ef8cb1..1b40bfcf7 100644
--- a/lastgersync/lastgersync.go
+++ b/lastgersync/lastgersync.go
@@ -2,11 +2,9 @@ package lastgersync
 
 import (
 	"context"
-
 	"time"
 
 	"github.com/0xPolygon/cdk/etherman"
-
 	"github.com/0xPolygon/cdk/l1infotreesync"
 	"github.com/0xPolygon/cdk/sync"
 	"github.com/ethereum/go-ethereum/common"
@@ -75,7 +73,9 @@ func (s *LastGERSync) Start(ctx context.Context) {
 	s.driver.Sync(ctx)
 }
 
-func (s *LastGERSync) GetFirstGERAfterL1InfoTreeIndex(ctx context.Context, atOrAfterL1InfoTreeIndex uint32) (injectedL1InfoTreeIndex uint32, ger common.Hash, err error) {
+func (s *LastGERSync) GetFirstGERAfterL1InfoTreeIndex(
+	ctx context.Context, atOrAfterL1InfoTreeIndex uint32,
+) (injectedL1InfoTreeIndex uint32, ger common.Hash, err error) {
 	return s.processor.GetFirstGERAfterL1InfoTreeIndex(ctx, atOrAfterL1InfoTreeIndex)
 }
 
diff --git a/lastgersync/processor.go b/lastgersync/processor.go
index 88e89be99..049b2847e 100644
--- a/lastgersync/processor.go
+++ b/lastgersync/processor.go
@@ -7,6 +7,7 @@ import (
 	"math"
 
 	"github.com/0xPolygon/cdk/common"
+	"github.com/0xPolygon/cdk/log"
 	"github.com/0xPolygon/cdk/sync"
 	ethCommon "github.com/ethereum/go-ethereum/common"
 	"github.com/ledgerwatch/erigon-lib/kv"
@@ -41,11 +42,13 @@ func (b *blockWithGERs) MarshalBinary() ([]byte, error) {
 }
 
 func (b *blockWithGERs) UnmarshalBinary(data []byte) error {
-	if len(data) != 8 {
-		return fmt.Errorf("expected len %d, actual len %d", 8, len(data))
+	const expectedDataLength = 8
+	if len(data) != expectedDataLength {
+		return fmt.Errorf("expected len %d, actual len %d", expectedDataLength, len(data))
 	}
 	b.FirstIndex = common.BytesToUint32(data[:4])
 	b.LastIndex = common.BytesToUint32(data[4:])
+
 	return nil
 }
 
@@ -60,6 +63,7 @@ func newProcessor(dbPath string) (*processor, error) {
 			gerTable:           {},
 			blockTable:         {},
 		}
+
 		return cfg
 	}
 	db, err := mdbx.NewMDBX(nil).
@@ -69,6 +73,7 @@ func newProcessor(dbPath string) (*processor, error) {
 	if err != nil {
 		return nil, err
 	}
+
 	return &processor{
 		db: db,
 	}, nil
@@ -82,6 +87,7 @@ func (p *processor) GetLastProcessedBlock(ctx context.Context) (uint64, error) {
 		return 0, err
 	}
 	defer tx.Rollback()
+
 	return p.getLastProcessedBlockWithTx(tx)
 }
 
@@ -107,6 +113,7 @@ func (p *processor) getLastIndexWithTx(tx kv.Tx) (uint32, error) {
 	if k == nil {
 		return 0, ErrNotFound
 	}
+
 	return common.BytesToUint32(k), nil
 }
 
@@ -134,10 +141,11 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error {
 	var lastIndex int64
 	if lenEvents > 0 {
 		li, err := p.getLastIndexWithTx(tx)
-		if err == ErrNotFound {
+		if errors.Is(err, ErrNotFound) {
 			lastIndex = -1
 		} else if err != nil {
 			tx.Rollback()
+
 			return err
 		} else {
 			lastIndex = int64(li)
@@ -145,7 +153,10 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error {
 	}
 
 	for _, e := range block.Events {
-		event := e.(Event)
+		event, ok := e.(Event)
+		if !ok {
+			log.Errorf("unexpected type %T in events", e)
+		}
 		if int64(event.L1InfoTreeIndex) < lastIndex {
 			continue
 		}
@@ -156,28 +167,49 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error {
 			event.GlobalExitRoot[:],
 		); err != nil {
 			tx.Rollback()
+
 			return err
 		}
 	}
 
 	if lenEvents > 0 {
+		firstEvent, ok := block.Events[0].(Event)
+		if !ok {
+			log.Errorf("unexpected type %T in events", block.Events[0])
+			tx.Rollback()
+
+			return fmt.Errorf("unexpected type %T in events", block.Events[0])
+		}
+
+		lastEvent, ok := block.Events[lenEvents-1].(Event)
+		if !ok {
+			log.Errorf("unexpected type %T in events", block.Events[lenEvents-1])
+			tx.Rollback()
+
+			return fmt.Errorf("unexpected type %T in events", block.Events[lenEvents-1])
+		}
+
 		bwg := blockWithGERs{
-			FirstIndex: block.Events[0].(Event).L1InfoTreeIndex,
-			LastIndex:  block.Events[lenEvents-1].(Event).L1InfoTreeIndex + 1,
+			FirstIndex: firstEvent.L1InfoTreeIndex,
+			LastIndex:  lastEvent.L1InfoTreeIndex + 1,
 		}
+
 		data, err := bwg.MarshalBinary()
 		if err != nil {
 			tx.Rollback()
+
 			return err
 		}
 		if err = tx.Put(blockTable, common.Uint64ToBytes(block.Num), data); err != nil {
 			tx.Rollback()
+
 			return err
 		}
 	}
 
 	if err := p.updateLastProcessedBlockWithTx(tx, block.Num); err != nil {
 		tx.Rollback()
+
 		return err
 	}
 
@@ -193,26 +225,31 @@ func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error {
 	iter, err := tx.Range(blockTable, common.Uint64ToBytes(firstReorgedBlock), nil)
 	if err != nil {
 		tx.Rollback()
+
 		return err
 	}
 	for bNumBytes, bWithGERBytes, err := iter.Next(); bNumBytes != nil; bNumBytes, bWithGERBytes, err = iter.Next() {
 		if err != nil {
 			tx.Rollback()
+
 			return err
 		}
 		if err := tx.Delete(blockTable, bNumBytes); err != nil {
 			tx.Rollback()
+
 			return err
 		}
 
 		bWithGER := &blockWithGERs{}
 		if err := bWithGER.UnmarshalBinary(bWithGERBytes); err != nil {
 			tx.Rollback()
+
 			return err
 		}
 		for i := bWithGER.FirstIndex; i < bWithGER.LastIndex; i++ {
 			if err := tx.Delete(gerTable, common.Uint32ToBytes(i)); err != nil {
 				tx.Rollback()
+
 				return err
 			}
 		}
@@ -220,6 +257,7 @@ func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error {
 
 	if err := p.updateLastProcessedBlockWithTx(tx, firstReorgedBlock-1); err != nil {
 		tx.Rollback()
+
 		return err
 	}
 
@@ -228,7 +266,9 @@ func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error {
 
 // GetFirstGERAfterL1InfoTreeIndex returns the first GER injected on the chain that is related to l1InfoTreeIndex
 // or greater
-func (p *processor) GetFirstGERAfterL1InfoTreeIndex(ctx context.Context, l1InfoTreeIndex uint32) (uint32, ethCommon.Hash, error) {
+func (p *processor) GetFirstGERAfterL1InfoTreeIndex(
+	ctx context.Context, l1InfoTreeIndex uint32,
+) (uint32, ethCommon.Hash, error) {
 	tx, err := p.db.BeginRo(ctx)
 	if err != nil {
 		return 0, ethCommon.Hash{}, err
@@ -246,5 +286,6 @@ func (p *processor) GetFirstGERAfterL1InfoTreeIndex(ctx context.Context, l1InfoT
 	if l1InfoIndexBytes == nil {
 		return 0, ethCommon.Hash{}, ErrNotFound
 	}
+
 	return common.BytesToUint32(l1InfoIndexBytes), ethCommon.BytesToHash(ger), nil
 }
diff --git a/log/config.go b/log/config.go
index 2f166ee92..4ebbf5020 100644
--- a/log/config.go
+++ b/log/config.go
@@ -3,11 +3,13 @@ package log
 // Config for log
 type Config struct {
 	// Environment defining the log format ("production" or "development").
-	// In development mode enables development mode (which makes DPanicLevel logs panic), uses a console encoder, writes to standard error, and disables sampling. Stacktraces are automatically included on logs of WarnLevel and above.
+	// In development mode enables development mode (which makes DPanicLevel logs panic),
+	// uses a console encoder, writes to standard error, and disables sampling.
+	// Stacktraces are automatically included on logs of WarnLevel and above.
 	// Check [here](https://pkg.go.dev/go.uber.org/zap@v1.24.0#NewDevelopmentConfig)
 	Environment LogEnvironment `mapstructure:"Environment" jsonschema:"enum=production,enum=development"`
 	// Level of log. As lower value more logs are going to be generated
-	Level string `mapstructure:"Level" jsonschema:"enum=debug,enum=info,enum=warn,enum=error,enum=dpanic,enum=panic,enum=fatal"`
+	Level string `mapstructure:"Level" jsonschema:"enum=debug,enum=info,enum=warn,enum=error,enum=dpanic,enum=panic,enum=fatal"` //nolint:lll
 	// Outputs
 	Outputs []string `mapstructure:"Outputs"`
 }
diff --git a/log/log.go b/log/log.go
index 6f776fa65..eae9f5438 100644
--- a/log/log.go
+++ b/log/log.go
@@ -70,7 +70,7 @@ func NewLogger(cfg Config) (*zap.SugaredLogger, *zap.AtomicLevel, error) {
 	var level zap.AtomicLevel
 	err := level.UnmarshalText([]byte(cfg.Level))
 	if err != nil {
-		return nil, nil, fmt.Errorf("error on setting log level: %s", err)
+		return nil, nil, fmt.Errorf("error on setting log level: %w", err)
 	}
 
 	var zapCfg zap.Config
@@ -93,7 +93,7 @@ func NewLogger(cfg Config) (*zap.SugaredLogger, *zap.AtomicLevel, error) {
 	if err != nil {
 		return nil, nil, err
 	}
-	defer logger.Sync() //nolint:gosec,errcheck
+	defer logger.Sync() //nolint:errcheck
 
 	// skip 2 callers: one for our wrapper methods and one for the package functions
 	withOptions := logger.WithOptions(zap.AddCallerSkip(2)) //nolint:gomnd
diff --git a/merkletree/key.go b/merkletree/key.go
index 1534f462e..1fd6feecc 100644
--- a/merkletree/key.go
+++ b/merkletree/key.go
@@ -52,7 +52,8 @@ func defaultCapIn() ([4]uint64, error) {
 
 // KeyEthAddrBalance returns the key of balance leaf:
 // hk0: H([0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0])
-// key: H([ethAddr[0:4], ethAddr[4:8], ethAddr[8:12], ethAddr[12:16], ethAddr[16:20], 0, 0, 0], [hk0[0], hk0[1], hk0[2], hk0[3]])
+// key: H([ethAddr[0:4], ethAddr[4:8], ethAddr[8:12], ethAddr[12:16], ethAddr[16:20], 0, 0, 0],
+// [hk0[0], hk0[1], hk0[2], hk0[3]])
 func KeyEthAddrBalance(ethAddr common.Address) ([]byte, error) {
 	capIn, err := defaultCapIn()
 	if err != nil {
@@ -64,7 +65,8 @@ func KeyEthAddrBalance(ethAddr common.Address) ([]byte, error) {
 
 // KeyEthAddrNonce returns the key of nonce leaf:
 // hk0: H([0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0])
-// key: H([ethAddr[0:4], ethAddr[4:8], ethAddr[8:12], ethAddr[12:16], ethAddr[16:20], 0, 1, 0], [hk0[0], hk0[1], hk0[2], hk0[3]]
+// key: H([ethAddr[0:4], ethAddr[4:8], ethAddr[8:12], ethAddr[12:16], ethAddr[16:20], 0, 1, 0],
+// [hk0[0], hk0[1], hk0[2], hk0[3]]
 func KeyEthAddrNonce(ethAddr common.Address) ([]byte, error) {
 	capIn, err := defaultCapIn()
 	if err != nil {
@@ -76,7 +78,8 @@ func KeyEthAddrNonce(ethAddr common.Address) ([]byte, error) {
 
 // KeyContractCode returns the key of contract code leaf:
 // hk0: H([0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0])
-// key: H([ethAddr[0:4], ethAddr[4:8], ethAddr[8:12], ethAddr[12:16], ethAddr[16:20], 0, 2, 0], [hk0[0], hk0[1], hk0[2], hk0[3]]
+// key: H([ethAddr[0:4], ethAddr[4:8], ethAddr[8:12], ethAddr[12:16], ethAddr[16:20], 0, 2, 0],
+// [hk0[0], hk0[1], hk0[2], hk0[3]]
 func KeyContractCode(ethAddr common.Address) ([]byte, error) {
 	capIn, err := defaultCapIn()
 	if err != nil {
@@ -87,8 +90,10 @@ func KeyContractCode(ethAddr common.Address) ([]byte, error) {
 }
 
 // KeyContractStorage returns the key of contract storage position leaf:
-// hk0: H([stoPos[0:4], stoPos[4:8], stoPos[8:12], stoPos[12:16], stoPos[16:20], stoPos[20:24], stoPos[24:28], stoPos[28:32], [0, 0, 0, 0])
-// key: H([ethAddr[0:4], ethAddr[4:8], ethAddr[8:12], ethAddr[12:16], ethAddr[16:20], 0, 3, 0], [hk0[0], hk0[1], hk0[2], hk0[3])
+// hk0: H([stoPos[0:4], stoPos[4:8], stoPos[8:12], stoPos[12:16], stoPos[16:20], stoPos[20:24],
+// stoPos[24:28], stoPos[28:32], [0, 0, 0, 0])
+// key: H([ethAddr[0:4], ethAddr[4:8], ethAddr[8:12], ethAddr[12:16], ethAddr[16:20], 0, 3, 0],
+// [hk0[0], hk0[1], hk0[2], hk0[3])
 func KeyContractStorage(ethAddr common.Address, storagePos []byte) ([]byte, error) {
 	storageBI := new(big.Int).SetBytes(storagePos)
 
@@ -122,14 +127,14 @@ func HashContractBytecode(code []byte) ([]uint64, error) {
 	)
 
 	// add 0x01
-	code = append(code, 0x01) // nolint:gomnd
+	code = append(code, 0x01) //nolint:gomnd
 
 	// add padding
-	for len(code)%(56) != 0 { // nolint:gomnd
-		code = append(code, 0x00) // nolint:gomnd
+	for len(code)%(56) != 0 {
+		code = append(code, 0x00) //nolint:gomnd
 	}
 
-	code[len(code)-1] = code[len(code)-1] | 0x80 // nolint:gomnd
+	code[len(code)-1] = code[len(code)-1] | 0x80 //nolint:gomnd
 
 	numHashes := int(math.Ceil(float64(len(code)) / float64(maxBytesToAdd)))
 
@@ -190,7 +195,8 @@ func HashContractBytecode(code []byte) ([]uint64, error) {
 
 // KeyCodeLength returns the key of code length leaf:
 // hk0: H([0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0])
-// key: H([ethAddr[0:4], ethAddr[4:8], ethAddr[8:12], ethAddr[12:16], ethAddr[16:20], 0, 4, 0], [hk0[0], hk0[1], hk0[2], hk0[3]]
+// key: H([ethAddr[0:4], ethAddr[4:8], ethAddr[8:12], ethAddr[12:16], ethAddr[16:20], 0, 4, 0],
+// [hk0[0], hk0[1], hk0[2], hk0[3]]
 func KeyCodeLength(ethAddr common.Address) ([]byte, error) {
 	capIn, err := defaultCapIn()
 	if err != nil {
diff --git a/merkletree/split.go b/merkletree/split.go
index 63fcae337..77189b208 100644
--- a/merkletree/split.go
+++ b/merkletree/split.go
@@ -55,13 +55,11 @@ func H4ToString(h4 []uint64) string {
 
 // StringToh4 converts an hex string into array of 4 Scalars of 64 bits.
 func StringToh4(str string) ([]uint64, error) {
-	if strings.HasPrefix(str, "0x") { // nolint
-		str = str[2:]
-	}
+	str = strings.TrimPrefix(str, "0x")
 
 	bi, ok := new(big.Int).SetString(str, hex.Base)
 	if !ok {
-		return nil, fmt.Errorf("Could not convert %q into big int", str)
+		return nil, fmt.Errorf("could not convert %q into big int", str)
 	}
 
 	return scalarToh4(bi), nil
diff --git a/reorgdetector/reorgdetector_test.go b/reorgdetector/reorgdetector_test.go
index 7adec4ca2..7efe0892a 100644
--- a/reorgdetector/reorgdetector_test.go
+++ b/reorgdetector/reorgdetector_test.go
@@ -18,9 +18,9 @@ import (
 func newSimulatedL1(t *testing.T, auth *bind.TransactOpts) *simulated.Backend {
 	t.Helper()
 
-	balance, _ := new(big.Int).SetString("10000000000000000000000000", 10) //nolint:gomnd
+	balance, _ := new(big.Int).SetString("10000000000000000000000000", 10)
 
-	blockGasLimit := uint64(999999999999999999) //nolint:gomnd
+	blockGasLimit := uint64(999999999999999999)
 	client := simulated.NewBackend(map[common.Address]types.Account{
 		auth.From: {
 			Balance: balance,
diff --git a/rpc/bridge.go b/rpc/bridge.go
index 0b550e72c..eb6da780d 100644
--- a/rpc/bridge.go
+++ b/rpc/bridge.go
@@ -22,6 +22,8 @@ const (
 	// BRIDGE is the namespace of the bridge service
 	BRIDGE    = "bridge"
 	meterName = "github.com/0xPolygon/cdk/rpc"
+
+	zeroHex = "0x0"
 )
 
 // BridgeEndpoints contains implementations for the "bridge" RPC endpoints
@@ -83,20 +85,26 @@ func (b *BridgeEndpoints) L1InfoTreeIndexForBridge(networkID uint32, depositCoun
 		// TODO: special treatment of the error when not found,
 		// as it's expected that it will take some time for the L1 Info tree to be updated
 		if err != nil {
-			return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get l1InfoTreeIndex, error: %s", err))
+			return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get l1InfoTreeIndex, error: %s", err))
 		}
 		return l1InfoTreeIndex, nil
 	}
 	if networkID == b.networkID {
 		// TODO: special treatment of the error when not found,
 		// as it's expected that it will take some time for the L1 Info tree to be updated
-		return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("TODO: batchsync / certificatesync missing implementation"))
-	}
-	return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("this client does not support network %d", networkID))
+		return zeroHex, rpc.NewRPCError(
+			rpc.DefaultErrorCode,
+			"TODO: batchsync / certificatesync missing implementation",
+		)
+	}
+	return zeroHex, rpc.NewRPCError(
+		rpc.DefaultErrorCode,
+		fmt.Sprintf("this client does not support network %d", networkID),
+	)
 }
 
 // InjectedInfoAfterIndex return the first GER injected onto the network that is linked
-// to the given index or greater. This call is usefull to understand when a bridge is ready to be claimed
+// to the given index or greater. This call is useful to understand when a bridge is ready to be claimed
 // on its destination network
 func (b *BridgeEndpoints) InjectedInfoAfterIndex(networkID uint32, l1InfoTreeIndex uint32) (interface{}, rpc.Error) {
 	ctx, cancel := context.WithTimeout(context.Background(), b.readTimeout)
@@ -111,22 +119,25 @@ func (b *BridgeEndpoints) InjectedInfoAfterIndex(networkID uint32, l1InfoTreeInd
 	if networkID == 0 {
 		info, err := b.l1InfoTree.GetInfoByIndex(ctx, l1InfoTreeIndex)
 		if err != nil {
-			return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get global exit root, error: %s", err))
+			return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get global exit root, error: %s", err))
 		}
 		return info, nil
 	}
 	if networkID == b.networkID {
 		injectedL1InfoTreeIndex, _, err := b.injectedGERs.GetFirstGERAfterL1InfoTreeIndex(ctx, l1InfoTreeIndex)
 		if err != nil {
-			return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get global exit root, error: %s", err))
+			return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get global exit root, error: %s", err))
 		}
 		info, err := b.l1InfoTree.GetInfoByIndex(ctx, injectedL1InfoTreeIndex)
 		if err != nil {
-			return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get global exit root, error: %s", err))
+			return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get global exit root, error: %s", err))
 		}
 		return info, nil
 	}
-	return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("this client does not support network %d", networkID))
+	return zeroHex, rpc.NewRPCError(
+		rpc.DefaultErrorCode,
+		fmt.Sprintf("this client does not support network %d", networkID),
+	)
 }
 
 type ClaimProof struct {
@@ -138,7 +149,9 @@ type ClaimProof struct {
 // ClaimProof returns the proofs needed to claim a bridge. NetworkID and depositCount refere to the bridge origin
 // while globalExitRoot should be already injected on the destination network.
 // This call needs to be done to a client of the same network were the bridge tx was sent
-func (b *BridgeEndpoints) ClaimProof(networkID uint32, depositCount uint32, l1InfoTreeIndex uint32) (interface{}, rpc.Error) {
+func (b *BridgeEndpoints) ClaimProof(
+	networkID uint32, depositCount uint32, l1InfoTreeIndex uint32,
+) (interface{}, rpc.Error) {
 	ctx, cancel := context.WithTimeout(context.Background(), b.readTimeout)
 	defer cancel()
 
@@ -150,29 +163,38 @@ func (b *BridgeEndpoints) ClaimProof(networkID uint32, depositCount uint32, l1In
 
 	info, err := b.l1InfoTree.GetInfoByIndex(ctx, l1InfoTreeIndex)
 	if err != nil {
-		return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get info from the tree: %s", err))
+		return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get info from the tree: %s", err))
 	}
 	proofRollupExitRoot, err := b.l1InfoTree.GetRollupExitTreeMerkleProof(ctx, networkID, info.GlobalExitRoot)
 	if err != nil {
-		return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get rollup exit proof, error: %s", err))
+		return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get rollup exit proof, error: %s", err))
 	}
 	var proofLocalExitRoot [32]common.Hash
 	if networkID == 0 {
 		proofLocalExitRoot, err = b.bridgeL1.GetProof(ctx, depositCount, info.MainnetExitRoot)
 		if err != nil {
-			return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get local exit proof, error: %s", err))
+			return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get local exit proof, error: %s", err))
 		}
 	} else if networkID == b.networkID {
 		localExitRoot, err := b.l1InfoTree.GetLocalExitRoot(ctx, networkID, info.RollupExitRoot)
 		if err != nil {
-			return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get local exit root from rollup exit tree, error: %s", err))
+			return zeroHex, rpc.NewRPCError(
+				rpc.DefaultErrorCode,
+				fmt.Sprintf("failed to get local exit root from rollup exit tree, error: %s", err),
+			)
 		}
 		proofLocalExitRoot, err = b.bridgeL2.GetProof(ctx, depositCount, localExitRoot)
 		if err != nil {
-			return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get local exit proof, error: %s", err))
+			return zeroHex, rpc.NewRPCError(
+				rpc.DefaultErrorCode,
+				fmt.Sprintf("failed to get local exit proof, error: %s", err),
+			)
 		}
 	} else {
-		return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("this client does not support network %d", networkID))
+		return zeroHex, rpc.NewRPCError(
+			rpc.DefaultErrorCode,
+			fmt.Sprintf("this client does not support network %d", networkID),
+		)
 	}
 	return ClaimProof{
 		ProofLocalExitRoot:  proofLocalExitRoot,
@@ -194,13 +216,16 @@ func (b *BridgeEndpoints) SponsorClaim(claim claimsponsor.Claim) (interface{}, r
 	c.Add(ctx, 1)
 
 	if b.sponsor == nil {
-		return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("this client does not support claim sponsoring"))
+		return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, "this client does not support claim sponsoring")
 	}
 	if claim.DestinationNetwork != b.networkID {
-		return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("this client only sponsors claims for network %d", b.networkID))
+		return zeroHex, rpc.NewRPCError(
+			rpc.DefaultErrorCode,
+			fmt.Sprintf("this client only sponsors claims for network %d", b.networkID),
+		)
 	}
 	if err := b.sponsor.AddClaimToQueue(ctx, &claim); err != nil {
-		return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("error adding claim to the queue %s", err))
+		return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("error adding claim to the queue %s", err))
 	}
 	return nil, nil
 }
@@ -218,11 +243,11 @@ func (b *BridgeEndpoints) GetSponsoredClaimStatus(globalIndex *big.Int) (interfa
 	c.Add(ctx, 1)
 
 	if b.sponsor == nil {
-		return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("this client does not support claim sponsoring"))
+		return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, "this client does not support claim sponsoring")
 	}
 	claim, err := b.sponsor.GetClaim(ctx, globalIndex)
 	if err != nil {
-		return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get claim status, error: %s", err))
+		return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get claim status, error: %s", err))
 	}
 	return claim.Status, nil
 }
diff --git a/rpc/bridge_client.go b/rpc/bridge_client.go
index 0063e6604..04d577001 100644
--- a/rpc/bridge_client.go
+++ b/rpc/bridge_client.go
@@ -34,9 +34,11 @@ func (c *Client) L1InfoTreeIndexForBridge(networkID uint32, depositCount uint32)
 }
 
 // InjectedInfoAfterIndex return the first GER injected onto the network that is linked
-// to the given index or greater. This call is usefull to understand when a bridge is ready to be claimed
+// to the given index or greater. This call is useful to understand when a bridge is ready to be claimed
 // on its destination network
-func (c *Client) InjectedInfoAfterIndex(networkID uint32, l1InfoTreeIndex uint32) (*l1infotreesync.L1InfoTreeLeaf, error) {
+func (c *Client) InjectedInfoAfterIndex(
+	networkID uint32, l1InfoTreeIndex uint32,
+) (*l1infotreesync.L1InfoTreeLeaf, error) {
 	response, err := rpc.JSONRPCCall(c.url, "bridge_injectedInfoAfterIndex", networkID, l1InfoTreeIndex)
 	if err != nil {
 		return nil, err
diff --git a/sequencesender/config.go b/sequencesender/config.go
index 3e3fd1c0f..7b7aada08 100644
--- a/sequencesender/config.go
+++ b/sequencesender/config.go
@@ -14,8 +14,9 @@ type Config struct {
 	WaitPeriodSendSequence types.Duration `mapstructure:"WaitPeriodSendSequence"`
 	// LastBatchVirtualizationTimeMaxWaitPeriod is time since sequences should be sent
 	LastBatchVirtualizationTimeMaxWaitPeriod types.Duration `mapstructure:"LastBatchVirtualizationTimeMaxWaitPeriod"`
-	// L1BlockTimestampMargin is the time difference (margin) that must exists between last L1 block and last L2 block in the sequence before
-	// to send the sequence to L1. If the difference is lower than this value then sequencesender will wait until the difference is equal or greater
+	// L1BlockTimestampMargin is the time difference (margin) that must exists between last L1 block
+	// and last L2 block in the sequence before sending the sequence to L1. If the difference is
+	// lower than this value, then sequencesender will wait until the difference is equal or greater
 	L1BlockTimestampMargin types.Duration `mapstructure:"L1BlockTimestampMargin"`
 	// MaxTxSizeForL1 is the maximum size a single transaction can have. This field has
 	// non-trivial consequences: larger transactions than 128KB are significantly harder and
@@ -65,7 +66,7 @@ type Config struct {
 	// MaxBatchesForL1 is the maximum amount of batches to be sequenced in a single L1 tx
 	MaxBatchesForL1 uint64 `mapstructure:"MaxBatchesForL1"`
 	// BlockFinality indicates the status of the blocks that will be queried in order to sync
-	BlockFinality string `jsonschema:"enum=LatestBlock, enum=SafeBlock, enum=PendingBlock, enum=FinalizedBlock, enum=EarliestBlock" mapstructure:"BlockFinality"`
+	BlockFinality string `jsonschema:"enum=LatestBlock, enum=SafeBlock, enum=PendingBlock, enum=FinalizedBlock, enum=EarliestBlock" mapstructure:"BlockFinality"` //nolint:lll
 
 	// SanityCheckRPCURL is the URL of the RPC server to perform sanity check regarding the number of blocks in a batch
 	SanityCheckRPCURL string `mapstructure:"SanityCheckRPCURL"`
diff --git a/sequencesender/sequencesender.go b/sequencesender/sequencesender.go
index 8c96789a9..8390c818c 100644
--- a/sequencesender/sequencesender.go
+++ b/sequencesender/sequencesender.go
@@ -289,7 +289,7 @@ func (s *SequenceSender) purgeEthTx(ctx context.Context) {
 }
 
 // syncEthTxResults syncs results from L1 for transactions in the memory structure
-func (s *SequenceSender) syncEthTxResults(ctx context.Context) (uint64, error) {
+func (s *SequenceSender) syncEthTxResults(ctx context.Context) (uint64, error) { //nolint:unparam
 	s.mutexEthTx.Lock()
 	var txPending uint64
 	var txSync uint64
@@ -362,7 +362,9 @@ func (s *SequenceSender) syncAllEthTxResults(ctx context.Context) error {
 }
 
 // copyTxData copies tx data in the internal structure
-func (s *SequenceSender) copyTxData(txHash common.Hash, txData []byte, txsResults map[common.Hash]ethtxmanager.TxResult) {
+func (s *SequenceSender) copyTxData(
+	txHash common.Hash, txData []byte, txsResults map[common.Hash]ethtxmanager.TxResult,
+) {
 	s.ethTxData[txHash] = make([]byte, len(txData))
 	copy(s.ethTxData[txHash], txData)
 
@@ -386,12 +388,16 @@ func (s *SequenceSender) updateEthTxResult(txData *ethTxData, txResult ethtxmana
 	if txData.Status != txResult.Status.String() {
 		log.Infof("update transaction %v to state %s", txResult.ID, txResult.Status.String())
 		txData.StatusTimestamp = time.Now()
-		stTrans := txData.StatusTimestamp.Format("2006-01-02T15:04:05.000-07:00") + ", " + txData.Status + ", " + txResult.Status.String()
+		stTrans := txData.StatusTimestamp.Format("2006-01-02T15:04:05.000-07:00") + ", " +
+			txData.Status + ", " + txResult.Status.String()
+
 		txData.Status = txResult.Status.String()
 		txData.StateHistory = append(txData.StateHistory, stTrans)
 
 		// Manage according to the state
-		statusConsolidated := txData.Status == ethtxmanager.MonitoredTxStatusSafe.String() || txData.Status == ethtxmanager.MonitoredTxStatusFinalized.String()
+		statusConsolidated := txData.Status == ethtxmanager.MonitoredTxStatusSafe.String() ||
+			txData.Status == ethtxmanager.MonitoredTxStatusFinalized.String()
+
 		if txData.Status == ethtxmanager.MonitoredTxStatusFailed.String() {
 			s.logFatalf("transaction %v result failed!")
 		} else if statusConsolidated && txData.ToBatch >= s.latestVirtualBatch {
@@ -419,7 +425,7 @@ func (s *SequenceSender) getResultAndUpdateEthTx(ctx context.Context, txHash com
 	}
 
 	txResult, err := s.ethTxManager.Result(ctx, txHash)
-	if err == ethtxmanager.ErrNotFound {
+	if errors.Is(err, ethtxmanager.ErrNotFound) {
 		log.Infof("transaction %v does not exist in ethtxmanager. Marking it", txHash)
 		txData.OnMonitor = false
 		// Resend tx
@@ -481,9 +487,12 @@ func (s *SequenceSender) tryToSendSequence(ctx context.Context) {
 	lastL2BlockTimestamp := lastSequence.LastL2BLockTimestamp()
 
 	log.Debugf(sequence.String())
-	log.Infof("sending sequences to L1. From batch %d to batch %d", firstSequence.BatchNumber(), lastSequence.BatchNumber())
+	log.Infof("sending sequences to L1. From batch %d to batch %d",
+		firstSequence.BatchNumber(), lastSequence.BatchNumber(),
+	)
 
-	// Wait until last L1 block timestamp is L1BlockTimestampMargin seconds above the timestamp of the last L2 block in the sequence
+	// Wait until last L1 block timestamp is L1BlockTimestampMargin seconds above the timestamp
+	// of the last L2 block in the sequence
 	timeMargin := int64(s.cfg.L1BlockTimestampMargin.Seconds())
 	for {
 		// Get header of the last L1 block
@@ -496,17 +505,27 @@ func (s *SequenceSender) tryToSendSequence(ctx context.Context) {
 		elapsed, waitTime := s.marginTimeElapsed(lastL2BlockTimestamp, lastL1BlockHeader.Time, timeMargin)
 
 		if !elapsed {
-			log.Infof("waiting at least %d seconds to send sequences, time difference between last L1 block %d (ts: %d) and last L2 block %d (ts: %d) in the sequence is lower than %d seconds",
-				waitTime, lastL1BlockHeader.Number, lastL1BlockHeader.Time, lastSequence.BatchNumber(), lastL2BlockTimestamp, timeMargin)
+			log.Infof("waiting at least %d seconds to send sequences, time difference between last L1 block %d (ts: %d) "+
+				"and last L2 block %d (ts: %d) in the sequence is lower than %d seconds",
+				waitTime, lastL1BlockHeader.Number, lastL1BlockHeader.Time,
+				lastSequence.BatchNumber(), lastL2BlockTimestamp, timeMargin,
+			)
 			time.Sleep(time.Duration(waitTime) * time.Second)
 		} else {
-			log.Infof("continuing, time difference between last L1 block %d (ts: %d) and last L2 block %d (ts: %d) in the sequence is greater than %d seconds",
-				lastL1BlockHeader.Number, lastL1BlockHeader.Time, lastSequence.BatchNumber, lastL2BlockTimestamp, timeMargin)
+			log.Infof("continuing, time difference between last L1 block %d (ts: %d) and last L2 block %d (ts: %d) "+
+				"in the sequence is greater than %d seconds",
+				lastL1BlockHeader.Number,
+				lastL1BlockHeader.Time,
+				lastSequence.BatchNumber,
+				lastL2BlockTimestamp,
+				timeMargin,
+			)
 			break
 		}
 	}
 
-	// Sanity check: Wait also until current time is L1BlockTimestampMargin seconds above the timestamp of the last L2 block in the sequence
+	// Sanity check: Wait also until current time is L1BlockTimestampMargin seconds above the
+	// timestamp of the last L2 block in the sequence
 	for {
 		currentTime := uint64(time.Now().Unix())
 
@@ -514,11 +533,13 @@ func (s *SequenceSender) tryToSendSequence(ctx context.Context) {
 
 		// Wait if the time difference is less than L1BlockTimestampMargin
 		if !elapsed {
-			log.Infof("waiting at least %d seconds to send sequences, time difference between now (ts: %d) and last L2 block %d (ts: %d) in the sequence is lower than %d seconds",
+			log.Infof("waiting at least %d seconds to send sequences, time difference between now (ts: %d) "+
+				"and last L2 block %d (ts: %d) in the sequence is lower than %d seconds",
 				waitTime, currentTime, lastSequence.BatchNumber, lastL2BlockTimestamp, timeMargin)
 			time.Sleep(time.Duration(waitTime) * time.Second)
 		} else {
-			log.Infof("[SeqSender]sending sequences now, time difference between now (ts: %d) and last L2 block %d (ts: %d) in the sequence is also greater than %d seconds",
+			log.Infof("[SeqSender]sending sequences now, time difference between now (ts: %d) and last L2 block %d (ts: %d) "+
+				"in the sequence is also greater than %d seconds",
 				currentTime, lastSequence.BatchNumber, lastL2BlockTimestamp, timeMargin)
 			break
 		}
@@ -526,7 +547,10 @@ func (s *SequenceSender) tryToSendSequence(ctx context.Context) {
 
 	// Send sequences to L1
 	log.Debugf(sequence.String())
-	log.Infof("sending sequences to L1. From batch %d to batch %d", firstSequence.BatchNumber(), lastSequence.BatchNumber())
+	log.Infof(
+		"sending sequences to L1. From batch %d to batch %d",
+		firstSequence.BatchNumber(), lastSequence.BatchNumber(),
+	)
 
 	tx, err := s.TxBuilder.BuildSequenceBatchesTx(ctx, sequence)
 	if err != nil {
@@ -565,7 +589,10 @@ func (s *SequenceSender) tryToSendSequence(ctx context.Context) {
 }
 
 // sendTx adds transaction to the ethTxManager to send it to L1
-func (s *SequenceSender) sendTx(ctx context.Context, resend bool, txOldHash *common.Hash, to *common.Address, fromBatch uint64, toBatch uint64, data []byte, gas uint64) error {
+func (s *SequenceSender) sendTx(
+	ctx context.Context, resend bool, txOldHash *common.Hash, to *common.Address,
+	fromBatch uint64, toBatch uint64, data []byte, gas uint64,
+) error {
 	// Params if new tx to send or resend a previous tx
 	var paramTo *common.Address
 	var paramNonce *uint64
@@ -639,7 +666,8 @@ func (s *SequenceSender) sendTx(ctx context.Context, resend bool, txOldHash *com
 	return nil
 }
 
-// getSequencesToSend generates sequences to be sent to L1. Empty array means there are no sequences to send or it's not worth sending
+// getSequencesToSend generates sequences to be sent to L1.
+// Empty array means there are no sequences to send or it's not worth sending
 func (s *SequenceSender) getSequencesToSend(ctx context.Context) (seqsendertypes.Sequence, error) {
 	// Add sequences until too big for a single L1 tx or last batch is reached
 	s.mutexSequence.Lock()
@@ -655,7 +683,10 @@ func (s *SequenceSender) getSequencesToSend(ctx context.Context) (seqsendertypes
 		// Check if the next batch belongs to a new forkid, in this case we need to stop sequencing as we need to
 		// wait the upgrade of forkid is completed and s.cfg.NumBatchForkIdUpgrade is disabled (=0) again
 		if (s.cfg.ForkUpgradeBatchNumber != 0) && (batchNumber == (s.cfg.ForkUpgradeBatchNumber + 1)) {
-			return nil, fmt.Errorf("aborting sequencing process as we reached the batch %d where a new forkid is applied (upgrade)", s.cfg.ForkUpgradeBatchNumber+1)
+			return nil, fmt.Errorf(
+				"aborting sequencing process as we reached the batch %d where a new forkid is applied (upgrade)",
+				s.cfg.ForkUpgradeBatchNumber+1,
+			)
 		}
 
 		// Check if batch is closed
@@ -669,7 +700,10 @@ func (s *SequenceSender) getSequencesToSend(ctx context.Context) (seqsendertypes
 
 		// If the coinbase changes, the sequence ends here
 		if len(sequenceBatches) > 0 && batch.LastCoinbase() != prevCoinbase {
-			log.Infof("batch with different coinbase (batch %v, sequence %v), sequence will be sent to this point", prevCoinbase, batch.LastCoinbase)
+			log.Infof(
+				"batch with different coinbase (batch %v, sequence %v), sequence will be sent to this point",
+				prevCoinbase, batch.LastCoinbase,
+			)
 			return s.TxBuilder.NewSequence(ctx, sequenceBatches, s.cfg.L2Coinbase)
 		}
 		prevCoinbase = batch.LastCoinbase()
@@ -685,9 +719,13 @@ func (s *SequenceSender) getSequencesToSend(ctx context.Context) (seqsendertypes
 			return newSeq, nil
 		}
 
-		// Check if the current batch is the last before a change to a new forkid, in this case we need to close and send the sequence to L1
+		// Check if the current batch is the last before a change to a new forkid
+		// In this case we need to close and send the sequence to L1
 		if (s.cfg.ForkUpgradeBatchNumber != 0) && (batchNumber == (s.cfg.ForkUpgradeBatchNumber)) {
-			log.Infof("sequence should be sent to L1, as we have reached the batch %d from which a new forkid is applied (upgrade)", s.cfg.ForkUpgradeBatchNumber)
+			log.Infof("sequence should be sent to L1, as we have reached the batch %d "+
+				"from which a new forkid is applied (upgrade)",
+				s.cfg.ForkUpgradeBatchNumber,
+			)
 			return s.TxBuilder.NewSequence(ctx, sequenceBatches, s.cfg.L2Coinbase)
 		}
 	}
@@ -790,7 +828,9 @@ func (s *SequenceSender) entryTypeToString(entryType datastream.EntryType) strin
 }
 
 // handleReceivedDataStream manages the events received by the streaming
-func (s *SequenceSender) handleReceivedDataStream(entry *datastreamer.FileEntry, client *datastreamer.StreamClient, server *datastreamer.StreamServer) error {
+func (s *SequenceSender) handleReceivedDataStream(
+	entry *datastreamer.FileEntry, client *datastreamer.StreamClient, server *datastreamer.StreamServer,
+) error {
 	dsType := datastream.EntryType(entry.Type)
 
 	var prevEntryType datastream.EntryType
@@ -809,12 +849,22 @@ func (s *SequenceSender) handleReceivedDataStream(entry *datastreamer.FileEntry,
 			return err
 		}
 
-		log.Infof("received L2Block entry, l2Block.Number: %d, l2Block.BatchNumber: %d, entry.Number: %d", l2Block.Number, l2Block.BatchNumber, entry.Number)
+		log.Infof("received L2Block entry, l2Block.Number: %d, l2Block.BatchNumber: %d, entry.Number: %d",
+			l2Block.Number, l2Block.BatchNumber, entry.Number,
+		)
 
 		// Sanity checks
-		if s.prevStreamEntry != nil && !(prevEntryType == datastream.EntryType_ENTRY_TYPE_BATCH_START || prevEntryType == datastream.EntryType_ENTRY_TYPE_L2_BLOCK || prevEntryType == datastream.EntryType_ENTRY_TYPE_TRANSACTION) {
-			log.Fatalf("unexpected L2Block entry received, entry.Number: %d, l2Block.Number: %d, prevEntry: %s, prevEntry.Number: %d",
-				entry.Number, l2Block.Number, s.entryTypeToString(prevEntryType), s.prevStreamEntry.Number)
+		if s.prevStreamEntry != nil &&
+			!(prevEntryType == datastream.EntryType_ENTRY_TYPE_BATCH_START ||
+				prevEntryType == datastream.EntryType_ENTRY_TYPE_L2_BLOCK ||
+				prevEntryType == datastream.EntryType_ENTRY_TYPE_TRANSACTION) {
+			log.Fatalf("unexpected L2Block entry received, entry.Number: %d, l2Block.Number: %d, "+
+				"prevEntry: %s, prevEntry.Number: %d",
+				entry.Number,
+				l2Block.Number,
+				s.entryTypeToString(prevEntryType),
+				s.prevStreamEntry.Number,
+			)
 		} else if prevEntryType == datastream.EntryType_ENTRY_TYPE_L2_BLOCK {
 			prevL2Block := &datastream.L2Block{}
 
@@ -870,11 +920,16 @@ func (s *SequenceSender) handleReceivedDataStream(entry *datastreamer.FileEntry,
 			return err
 		}
 
-		log.Debugf("received Transaction entry, tx.L2BlockNumber: %d, tx.Index: %d, entry.Number: %d", l2Tx.L2BlockNumber, l2Tx.Index, entry.Number)
+		log.Debugf(
+			"received Transaction entry, tx.L2BlockNumber: %d, tx.Index: %d, entry.Number: %d",
+			l2Tx.L2BlockNumber, l2Tx.Index, entry.Number,
+		)
 
 		// Sanity checks
-		if !(prevEntryType == datastream.EntryType_ENTRY_TYPE_L2_BLOCK || prevEntryType == datastream.EntryType_ENTRY_TYPE_TRANSACTION) {
-			log.Fatalf("unexpected Transaction entry received, entry.Number: %d, transaction.L2BlockNumber: %d, transaction.Index: %d, prevEntry: %s, prevEntry.Number: %d",
+		if !(prevEntryType == datastream.EntryType_ENTRY_TYPE_L2_BLOCK ||
+			prevEntryType == datastream.EntryType_ENTRY_TYPE_TRANSACTION) {
+			log.Fatalf("unexpected Transaction entry received, entry.Number: %d, transaction.L2BlockNumber: %d, "+
+				"transaction.Index: %d, prevEntry: %s, prevEntry.Number: %d",
 				entry.Number, l2Tx.L2BlockNumber, l2Tx.Index, s.entryTypeToString(prevEntryType), s.prevStreamEntry.Number)
 		}
 
@@ -925,8 +980,11 @@ func (s *SequenceSender) handleReceivedDataStream(entry *datastreamer.FileEntry,
 		log.Infof("received BatchEnd entry, batchEnd.Number: %d, entry.Number: %d", batch.Number, entry.Number)
 
 		// Sanity checks
-		if !(prevEntryType == datastream.EntryType_ENTRY_TYPE_L2_BLOCK || prevEntryType == datastream.EntryType_ENTRY_TYPE_TRANSACTION) {
-			log.Fatalf("unexpected BatchEnd entry received, entry.Number: %d, batchEnd.Number: %d, prevEntry.Type: %s, prevEntry.Number: %d",
+		if !(prevEntryType == datastream.EntryType_ENTRY_TYPE_L2_BLOCK ||
+			prevEntryType == datastream.EntryType_ENTRY_TYPE_TRANSACTION) {
+			log.Fatalf(
+				"unexpected BatchEnd entry received, entry.Number: %d, batchEnd.Number: %d, "+
+					"prevEntry.Type: %s, prevEntry.Number: %d",
 				entry.Number, batch.Number, s.entryTypeToString(prevEntryType), s.prevStreamEntry.Number)
 		}
 
@@ -976,10 +1034,14 @@ func (s *SequenceSender) closeSequenceBatch() error {
 		} else {
 			dsNumberOfBlocks := len(s.sequenceData[s.wipBatch].batchRaw.Blocks)
 			if rpcNumberOfBlocks != dsNumberOfBlocks {
-				log.Fatalf("number of blocks in batch %d (%d) does not match the number of blocks in the batch from the RPC (%d)", s.wipBatch, dsNumberOfBlocks, rpcNumberOfBlocks)
+				log.Fatalf(
+					"number of blocks in batch %d (%d) does not match the number of blocks in the batch from the RPC (%d)",
+					s.wipBatch, dsNumberOfBlocks, rpcNumberOfBlocks,
+				)
 			}
 
-			if data.batchType == datastream.BatchType_BATCH_TYPE_REGULAR && common.Bytes2Hex(data.batch.L2Data()) != batchL2Data {
+			if data.batchType == datastream.BatchType_BATCH_TYPE_REGULAR &&
+				common.Bytes2Hex(data.batch.L2Data()) != batchL2Data {
 				log.Infof("datastream batchL2Data: %s", common.Bytes2Hex(data.batch.L2Data()))
 				log.Infof("RPC batchL2Data: %s", batchL2Data)
 				log.Fatalf("batchL2Data in batch %d does not match batchL2Data from the RPC (%d)", s.wipBatch)
@@ -1015,7 +1077,10 @@ func (s *SequenceSender) getBatchFromRPC(batchNumber uint64) (int, string, error
 	// Get the batch number from the response hex string
 	err = json.Unmarshal(response.Result, &zkEVMBatchData)
 	if err != nil {
-		return 0, "", fmt.Errorf("error unmarshalling the batch number from the response calling zkevm_getBatchByNumber: %v", err)
+		return 0, "", fmt.Errorf(
+			"error unmarshalling the batch number from the response calling zkevm_getBatchByNumber: %w",
+			err,
+		)
 	}
 
 	return len(zkEVMBatchData.Blocks), zkEVMBatchData.BatchL2Data, nil
@@ -1054,14 +1119,20 @@ func (s *SequenceSender) addNewSequenceBatch(l2Block *datastream.L2Block) {
 // addInfoSequenceBatchStart adds info from the batch start
 func (s *SequenceSender) addInfoSequenceBatchStart(batch *datastream.BatchStart) {
 	s.mutexSequence.Lock()
-	log.Infof("batch %d (%s) Start: type %d forkId %d chainId %d", batch.Number, datastream.BatchType_name[int32(batch.Type)], batch.Type, batch.ForkId, batch.ChainId)
+	log.Infof(
+		"batch %d (%s) Start: type %d forkId %d chainId %d",
+		batch.Number, datastream.BatchType_name[int32(batch.Type)], batch.Type, batch.ForkId, batch.ChainId,
+	)
 
 	// Current batch
 	data := s.sequenceData[s.wipBatch]
 	if data != nil {
 		wipBatch := data.batch
 		if wipBatch.BatchNumber()+1 != batch.Number {
-			s.logFatalf("batch start number (%d) does not match the current consecutive one (%d)", batch.Number, wipBatch.BatchNumber)
+			s.logFatalf(
+				"batch start number (%d) does not match the current consecutive one (%d)",
+				batch.Number, wipBatch.BatchNumber,
+			)
 		}
 		data.batchType = batch.Type
 	}
@@ -1099,7 +1170,10 @@ func (s *SequenceSender) addNewBatchL2Block(l2Block *datastream.L2Block) {
 		data.batch.SetLastL2BLockTimestamp(l2Block.Timestamp)
 		// Sanity check: should be the same coinbase within the batch
 		if common.BytesToAddress(l2Block.Coinbase) != data.batch.LastCoinbase() {
-			s.logFatalf("coinbase changed within the batch! (Previous %v, Current %v)", data.batch.LastCoinbase, common.BytesToAddress(l2Block.Coinbase))
+			s.logFatalf(
+				"coinbase changed within the batch! (Previous %v, Current %v)",
+				data.batch.LastCoinbase, common.BytesToAddress(l2Block.Coinbase),
+			)
 		}
 		data.batch.SetLastCoinbase(common.BytesToAddress(l2Block.Coinbase))
 		data.batch.SetL1InfoTreeIndex(l2Block.L1InfotreeIndex)
@@ -1126,7 +1200,9 @@ func (s *SequenceSender) addNewBatchL2Block(l2Block *datastream.L2Block) {
 // addNewBlockTx adds a new Tx to the current L2 block
 func (s *SequenceSender) addNewBlockTx(l2Tx *datastream.Transaction) {
 	s.mutexSequence.Lock()
-	log.Debugf("........new tx, length %d EGP %d SR %x..", len(l2Tx.Encoded), l2Tx.EffectiveGasPricePercentage, l2Tx.ImStateRoot[:8])
+	log.Debugf("........new tx, length %d EGP %d SR %x..",
+		len(l2Tx.Encoded), l2Tx.EffectiveGasPricePercentage, l2Tx.ImStateRoot[:8],
+	)
 
 	// Current L2 block
 	_, blockRaw := s.getWipL2Block()
@@ -1150,7 +1226,7 @@ func (s *SequenceSender) addNewBlockTx(l2Tx *datastream.Transaction) {
 }
 
 // getWipL2Block returns index of the array and pointer to the current L2 block (helper func)
-func (s *SequenceSender) getWipL2Block() (uint64, *state.L2BlockRaw) {
+func (s *SequenceSender) getWipL2Block() (uint64, *state.L2BlockRaw) { //nolint:unparam
 	// Current batch
 	var wipBatchRaw *state.BatchRawV2
 	if s.sequenceData[s.wipBatch] != nil {
@@ -1186,7 +1262,9 @@ func (s *SequenceSender) updateLatestVirtualBatch() error {
 
 // marginTimeElapsed checks if the time between currentTime and l2BlockTimestamp is greater than timeMargin.
 // If it's greater returns true, otherwise it returns false and the waitTime needed to achieve this timeMargin
-func (s *SequenceSender) marginTimeElapsed(l2BlockTimestamp uint64, currentTime uint64, timeMargin int64) (bool, int64) {
+func (s *SequenceSender) marginTimeElapsed(
+	l2BlockTimestamp uint64, currentTime uint64, timeMargin int64,
+) (bool, int64) {
 	// Check the time difference between L2 block and currentTime
 	var timeDiff int64
 	if l2BlockTimestamp >= currentTime {
@@ -1242,17 +1320,23 @@ func printBatch(raw *state.BatchRawV2, showBlock bool, showTx bool) {
 			lastBlock = &raw.Blocks[numBlocks-1]
 		}
 		if firstBlock != nil {
-			log.Debugf("//    block first (indL1info: %d, delta-timestamp: %d, #L2txs: %d)", firstBlock.IndexL1InfoTree, firstBlock.DeltaTimestamp, len(firstBlock.Transactions))
+			log.Debugf("//    block first (indL1info: %d, delta-timestamp: %d, #L2txs: %d)",
+				firstBlock.IndexL1InfoTree, firstBlock.DeltaTimestamp, len(firstBlock.Transactions),
+			)
 			// Tx info
 			if showTx {
 				for iTx, tx := range firstBlock.Transactions {
 					v, r, s := tx.Tx.RawSignatureValues()
-					log.Debugf("//       tx(%d) effPct: %d, encoded: %t, v: %v, r: %v, s: %v", iTx, tx.EfficiencyPercentage, tx.TxAlreadyEncoded, v, r, s)
+					log.Debugf("//       tx(%d) effPct: %d, encoded: %t, v: %v, r: %v, s: %v",
+						iTx, tx.EfficiencyPercentage, tx.TxAlreadyEncoded, v, r, s,
+					)
 				}
 			}
 		}
 		if lastBlock != nil {
-			log.Debugf("//    block last (indL1info: %d, delta-timestamp: %d, #L2txs: %d)", lastBlock.DeltaTimestamp, lastBlock.DeltaTimestamp, len(lastBlock.Transactions))
+			log.Debugf("//    block last (indL1info: %d, delta-timestamp: %d, #L2txs: %d)",
+				lastBlock.DeltaTimestamp, lastBlock.DeltaTimestamp, len(lastBlock.Transactions),
+			)
 		}
 	}
 }
diff --git a/sequencesender/txbuilder/banana_base.go b/sequencesender/txbuilder/banana_base.go
index 15a25f563..871d02be4 100644
--- a/sequencesender/txbuilder/banana_base.go
+++ b/sequencesender/txbuilder/banana_base.go
@@ -58,7 +58,6 @@ func NewTxBuilderBananaBase(
 		blockFinality:          blockFinality,
 		opts:                   opts,
 	}
-
 }
 
 func (t *TxBuilderBananaBase) NewBatchFromL2Block(l2Block *datastream.L2Block) seqsendertypes.Batch {
@@ -72,11 +71,10 @@ func (t *TxBuilderBananaBase) NewBatchFromL2Block(l2Block *datastream.L2Block) s
 	return NewBananaBatch(batch)
 }
 
-func (t *TxBuilderBananaBase) NewSequence(ctx context.Context, batches []seqsendertypes.Batch, coinbase common.Address) (seqsendertypes.Sequence, error) {
-	ethBatches, err := toEthermanBatches(batches)
-	if err != nil {
-		return nil, err
-	}
+func (t *TxBuilderBananaBase) NewSequence(
+	ctx context.Context, batches []seqsendertypes.Batch, coinbase common.Address,
+) (seqsendertypes.Sequence, error) {
+	ethBatches := toEthermanBatches(batches)
 	sequence := etherman.NewSequenceBanana(ethBatches, coinbase)
 	var greatestL1Index uint32
 	for _, b := range sequence.Batches {
@@ -86,11 +84,11 @@ func (t *TxBuilderBananaBase) NewSequence(ctx context.Context, batches []seqsend
 	}
 	header, err := t.ethClient.HeaderByNumber(ctx, t.blockFinality)
 	if err != nil {
-		return nil, fmt.Errorf("error calling HeaderByNumber, with block finality %d: %v", t.blockFinality.Int64(), err)
+		return nil, fmt.Errorf("error calling HeaderByNumber, with block finality %d: %w", t.blockFinality.Int64(), err)
 	}
 	info, err := t.l1InfoTree.GetLatestInfoUntilBlock(ctx, header.Number.Uint64())
 	if err != nil {
-		return nil, fmt.Errorf("error calling GetLatestInfoUntilBlock with block num %d: %v", header.Number.Uint64(), err)
+		return nil, fmt.Errorf("error calling GetLatestInfoUntilBlock with block num %d: %w", header.Number.Uint64(), err)
 	}
 	if info.L1InfoTreeIndex >= greatestL1Index {
 		sequence.CounterL1InfoRoot = info.L1InfoTreeIndex + 1
@@ -126,7 +124,9 @@ func (t *TxBuilderBananaBase) NewSequence(ctx context.Context, batches []seqsend
 			blockHash = batch.ForcedBlockHashL1
 		}
 
-		accInputHash = cdkcommon.CalculateAccInputHash(accInputHash, batch.L2Data, infoRootHash, timestamp, batch.LastCoinbase, blockHash)
+		accInputHash = cdkcommon.CalculateAccInputHash(
+			accInputHash, batch.L2Data, infoRootHash, timestamp, batch.LastCoinbase, blockHash,
+		)
 	}
 
 	sequence.OldAccInputHash = oldAccInputHash
@@ -156,17 +156,14 @@ func convertToSequenceBanana(sequences seqsendertypes.Sequence) (etherman.Sequen
 	}
 
 	for _, batch := range sequences.Batches() {
-		ethBatch, err := toEthermanBatch(batch)
-		if err != nil {
-			return etherman.SequenceBanana{}, err
-		}
+		ethBatch := toEthermanBatch(batch)
 		ethermanSequence.Batches = append(ethermanSequence.Batches, ethBatch)
 	}
 
 	return ethermanSequence, nil
 }
 
-func toEthermanBatch(batch seqsendertypes.Batch) (etherman.Batch, error) {
+func toEthermanBatch(batch seqsendertypes.Batch) etherman.Batch {
 	return etherman.Batch{
 		L2Data:               batch.L2Data(),
 		LastCoinbase:         batch.LastCoinbase(),
@@ -177,18 +174,14 @@ func toEthermanBatch(batch seqsendertypes.Batch) (etherman.Batch, error) {
 		L1InfoTreeIndex:      batch.L1InfoTreeIndex(),
 		LastL2BLockTimestamp: batch.LastL2BLockTimestamp(),
 		GlobalExitRoot:       batch.GlobalExitRoot(),
-	}, nil
+	}
 }
 
-func toEthermanBatches(batch []seqsendertypes.Batch) ([]etherman.Batch, error) {
+func toEthermanBatches(batch []seqsendertypes.Batch) []etherman.Batch {
 	result := make([]etherman.Batch, len(batch))
 	for i, b := range batch {
-		var err error
-		result[i], err = toEthermanBatch(b)
-		if err != nil {
-			return nil, err
-		}
+		result[i] = toEthermanBatch(b)
 	}
 
-	return result, nil
+	return result
 }
diff --git a/sequencesender/txbuilder/banana_base_test.go b/sequencesender/txbuilder/banana_base_test.go
index 6ad08b800..713131915 100644
--- a/sequencesender/txbuilder/banana_base_test.go
+++ b/sequencesender/txbuilder/banana_base_test.go
@@ -88,6 +88,8 @@ type testDataBananaBase struct {
 }
 
 func newBananaBaseTestData(t *testing.T) *testDataBananaBase {
+	t.Helper()
+
 	zkevmContractMock := mocks_txbuilder.NewRollupBananaBaseContractor(t)
 	gerContractMock := mocks_txbuilder.NewGlobalExitRootBananaContractor(t)
 	opts := bind.TransactOpts{}
diff --git a/sequencesender/txbuilder/banana_types.go b/sequencesender/txbuilder/banana_types.go
index 5a38cab08..c09095b6c 100644
--- a/sequencesender/txbuilder/banana_types.go
+++ b/sequencesender/txbuilder/banana_types.go
@@ -57,8 +57,12 @@ func (b *BananaSequence) Len() int {
 }
 
 func (b *BananaSequence) String() string {
-	res := fmt.Sprintf("Seq/Banana: L2Coinbase: %s, OldAccInputHash: %x, AccInputHash: %x, L1InfoRoot: %x, MaxSequenceTimestamp: %d, IndexL1InfoRoot: %d",
-		b.L2Coinbase().String(), b.OldAccInputHash.String(), b.AccInputHash.String(), b.L1InfoRoot().String(), b.MaxSequenceTimestamp(), b.IndexL1InfoRoot())
+	res := fmt.Sprintf(
+		"Seq/Banana: L2Coinbase: %s, OldAccInputHash: %x, AccInputHash: %x, L1InfoRoot: %x, "+
+			"MaxSequenceTimestamp: %d, IndexL1InfoRoot: %d",
+		b.L2Coinbase().String(), b.OldAccInputHash.String(), b.AccInputHash.String(), b.L1InfoRoot().String(),
+		b.MaxSequenceTimestamp(), b.IndexL1InfoRoot(),
+	)
 
 	for i, batch := range b.Batches() {
 		res += fmt.Sprintf("\n\tBatch %d: %s", i, batch.String())
@@ -127,8 +131,12 @@ func (b *BananaBatch) L1InfoTreeIndex() uint32 {
 }
 
 func (b *BananaBatch) String() string {
-	return fmt.Sprintf("Batch/Banana: LastCoinbase: %s, ForcedBatchTimestamp: %d, ForcedGlobalExitRoot: %x, ForcedBlockHashL1: %x, L2Data: %x, LastL2BLockTimestamp: %d, BatchNumber: %d, GlobalExitRoot: %x, L1InfoTreeIndex: %d",
-		b.LastCoinbase().String(), b.ForcedBatchTimestamp(), b.ForcedGlobalExitRoot().String(), b.ForcedBlockHashL1().String(), b.L2Data(), b.LastL2BLockTimestamp(), b.BatchNumber(), b.GlobalExitRoot().String(), b.L1InfoTreeIndex(),
+	return fmt.Sprintf("Batch/Banana: LastCoinbase: %s, ForcedBatchTimestamp: %d, ForcedGlobalExitRoot: %x, "+
+		"ForcedBlockHashL1: %x, L2Data: %x, LastL2BLockTimestamp: %d, BatchNumber: %d, "+
+		"GlobalExitRoot: %x, L1InfoTreeIndex: %d",
+		b.LastCoinbase().String(), b.ForcedBatchTimestamp(), b.ForcedGlobalExitRoot().String(),
+		b.ForcedBlockHashL1().String(), b.L2Data(), b.LastL2BLockTimestamp(), b.BatchNumber(),
+		b.GlobalExitRoot().String(), b.L1InfoTreeIndex(),
 	)
 }
 
diff --git a/sequencesender/txbuilder/banana_validium.go b/sequencesender/txbuilder/banana_validium.go
index 9ab1b929d..882b25402 100644
--- a/sequencesender/txbuilder/banana_validium.go
+++ b/sequencesender/txbuilder/banana_validium.go
@@ -25,7 +25,15 @@ type TxBuilderBananaValidium struct {
 
 type rollupBananaValidiumContractor interface {
 	rollupBananaBaseContractor
-	SequenceBatchesValidium(opts *bind.TransactOpts, batches []polygonvalidiumetrog.PolygonValidiumEtrogValidiumBatchData, indexL1InfoRoot uint32, maxSequenceTimestamp uint64, expectedFinalAccInputHash [32]byte, l2Coinbase common.Address, dataAvailabilityMessage []byte) (*types.Transaction, error)
+	SequenceBatchesValidium(
+		opts *bind.TransactOpts,
+		batches []polygonvalidiumetrog.PolygonValidiumEtrogValidiumBatchData,
+		indexL1InfoRoot uint32,
+		maxSequenceTimestamp uint64,
+		expectedFinalAccInputHash [32]byte,
+		l2Coinbase common.Address,
+		dataAvailabilityMessage []byte,
+	) (*types.Transaction, error)
 }
 
 func NewTxBuilderBananaValidium(
@@ -44,7 +52,9 @@ func NewTxBuilderBananaValidium(
 	}
 }
 
-func (t *TxBuilderBananaValidium) NewSequenceIfWorthToSend(ctx context.Context, sequenceBatches []seqsendertypes.Batch, l2Coinbase common.Address, batchNumber uint64) (seqsendertypes.Sequence, error) {
+func (t *TxBuilderBananaValidium) NewSequenceIfWorthToSend(
+	ctx context.Context, sequenceBatches []seqsendertypes.Batch, l2Coinbase common.Address, batchNumber uint64,
+) (seqsendertypes.Sequence, error) {
 	return t.condNewSeq.NewSequenceIfWorthToSend(ctx, t, sequenceBatches, l2Coinbase)
 }
 
@@ -55,7 +65,9 @@ func (t *TxBuilderBananaValidium) SetCondNewSeq(cond CondNewSequence) CondNewSeq
 	return previous
 }
 
-func (t *TxBuilderBananaValidium) BuildSequenceBatchesTx(ctx context.Context, sequences seqsendertypes.Sequence) (*types.Transaction, error) {
+func (t *TxBuilderBananaValidium) BuildSequenceBatchesTx(
+	ctx context.Context, sequences seqsendertypes.Sequence,
+) (*types.Transaction, error) {
 	// TODO: param sender
 	// Post sequences to DA backend
 	var dataAvailabilityMessage []byte
@@ -100,7 +112,9 @@ func (t *TxBuilderBananaValidium) internalBuildSequenceBatchesTx(sequence etherm
 	return t.sequenceBatchesValidium(newopts, sequence, dataAvailabilityMessage)
 }
 
-func (t *TxBuilderBananaValidium) sequenceBatchesValidium(opts bind.TransactOpts, sequence etherman.SequenceBanana, dataAvailabilityMessage []byte) (*types.Transaction, error) {
+func (t *TxBuilderBananaValidium) sequenceBatchesValidium(
+	opts bind.TransactOpts, sequence etherman.SequenceBanana, dataAvailabilityMessage []byte,
+) (*types.Transaction, error) {
 	batches := make([]polygonvalidiumetrog.PolygonValidiumEtrogValidiumBatchData, len(sequence.Batches))
 	for i, batch := range sequence.Batches {
 		var ger common.Hash
@@ -117,7 +131,10 @@ func (t *TxBuilderBananaValidium) sequenceBatchesValidium(opts bind.TransactOpts
 	}
 
 	log.Infof("building banana sequence tx. AccInputHash: %s", sequence.AccInputHash.Hex())
-	tx, err := t.rollupContract.SequenceBatchesValidium(&opts, batches, sequence.CounterL1InfoRoot, sequence.MaxSequenceTimestamp, sequence.AccInputHash, sequence.L2Coinbase, dataAvailabilityMessage)
+	tx, err := t.rollupContract.SequenceBatchesValidium(
+		&opts, batches, sequence.CounterL1InfoRoot, sequence.MaxSequenceTimestamp,
+		sequence.AccInputHash, sequence.L2Coinbase, dataAvailabilityMessage,
+	)
 	if err != nil {
 		log.Debugf("Batches to send: %+v", batches)
 		log.Debug("l2CoinBase: ", sequence.L2Coinbase)
diff --git a/sequencesender/txbuilder/banana_validium_test.go b/sequencesender/txbuilder/banana_validium_test.go
index 97ec2286f..75a9bf462 100644
--- a/sequencesender/txbuilder/banana_validium_test.go
+++ b/sequencesender/txbuilder/banana_validium_test.go
@@ -16,7 +16,6 @@ import (
 	"github.com/ethereum/go-ethereum/accounts/abi/bind"
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/core/types"
-	ethtypes "github.com/ethereum/go-ethereum/core/types"
 	"github.com/stretchr/testify/mock"
 	"github.com/stretchr/testify/require"
 )
@@ -45,7 +44,6 @@ func TestBananaValidiumBuildSequenceBatchesTxSequenceErrorsFromDA(t *testing.T)
 	testData.da.EXPECT().PostSequenceBanana(ctx, mock.Anything).Return(nil, fmt.Errorf("test error"))
 	_, err = testData.sut.BuildSequenceBatchesTx(ctx, seq)
 	require.Error(t, err, "error posting sequences to the data availability protocol: test error")
-
 }
 
 func TestBananaValidiumBuildSequenceBatchesTxSequenceDAOk(t *testing.T) {
@@ -59,8 +57,8 @@ func TestBananaValidiumBuildSequenceBatchesTxSequenceDAOk(t *testing.T) {
 	ctx := context.TODO()
 	daMessage := []byte{1}
 	testData.da.EXPECT().PostSequenceBanana(ctx, mock.Anything).Return(daMessage, nil)
-	inner := &ethtypes.LegacyTx{}
-	seqBatchesTx := ethtypes.NewTx(inner)
+	inner := &types.LegacyTx{}
+	seqBatchesTx := types.NewTx(inner)
 	testData.rollupContract.EXPECT().SequenceBatchesValidium(mock.MatchedBy(func(opts *bind.TransactOpts) bool {
 		return opts.NoSend == true
 	}), mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, daMessage).Return(seqBatchesTx, nil).Once()
@@ -81,6 +79,8 @@ type testDataBananaValidium struct {
 }
 
 func newBananaValidiumTestData(t *testing.T, maxBatchesForL1 uint64) *testDataBananaValidium {
+	t.Helper()
+
 	zkevmContractMock := mocks_txbuilder.NewRollupBananaValidiumContractor(t)
 	gerContractMock := mocks_txbuilder.NewGlobalExitRootBananaContractor(t)
 	condMock := mocks_txbuilder.NewCondNewSequence(t)
diff --git a/sequencesender/txbuilder/banana_zkevm.go b/sequencesender/txbuilder/banana_zkevm.go
index c0216d528..53856cd00 100644
--- a/sequencesender/txbuilder/banana_zkevm.go
+++ b/sequencesender/txbuilder/banana_zkevm.go
@@ -21,7 +21,14 @@ type TxBuilderBananaZKEVM struct {
 
 type rollupBananaZKEVMContractor interface {
 	rollupBananaBaseContractor
-	SequenceBatches(opts *bind.TransactOpts, batches []polygonvalidiumetrog.PolygonRollupBaseEtrogBatchData, indexL1InfoRoot uint32, maxSequenceTimestamp uint64, expectedFinalAccInputHash [32]byte, l2Coinbase common.Address) (*types.Transaction, error)
+	SequenceBatches(
+		opts *bind.TransactOpts,
+		batches []polygonvalidiumetrog.PolygonRollupBaseEtrogBatchData,
+		indexL1InfoRoot uint32,
+		maxSequenceTimestamp uint64,
+		expectedFinalAccInputHash [32]byte,
+		l2Coinbase common.Address,
+	) (*types.Transaction, error)
 }
 
 type globalExitRootBananaZKEVMContractor interface {
@@ -44,7 +51,9 @@ func NewTxBuilderBananaZKEVM(
 	}
 }
 
-func (t *TxBuilderBananaZKEVM) NewSequenceIfWorthToSend(ctx context.Context, sequenceBatches []seqsendertypes.Batch, l2Coinbase common.Address, batchNumber uint64) (seqsendertypes.Sequence, error) {
+func (t *TxBuilderBananaZKEVM) NewSequenceIfWorthToSend(
+	ctx context.Context, sequenceBatches []seqsendertypes.Batch, l2Coinbase common.Address, batchNumber uint64,
+) (seqsendertypes.Sequence, error) {
 	return t.condNewSeq.NewSequenceIfWorthToSend(ctx, t, sequenceBatches, l2Coinbase)
 }
 
@@ -55,7 +64,9 @@ func (t *TxBuilderBananaZKEVM) SetCondNewSeq(cond CondNewSequence) CondNewSequen
 	return previous
 }
 
-func (t *TxBuilderBananaZKEVM) BuildSequenceBatchesTx(ctx context.Context, sequences seqsendertypes.Sequence) (*types.Transaction, error) {
+func (t *TxBuilderBananaZKEVM) BuildSequenceBatchesTx(
+	ctx context.Context, sequences seqsendertypes.Sequence,
+) (*types.Transaction, error) {
 	var err error
 	ethseq, err := convertToSequenceBanana(sequences)
 	if err != nil {
@@ -78,7 +89,9 @@ func (t *TxBuilderBananaZKEVM) BuildSequenceBatchesTx(ctx context.Context, seque
 	return tx, nil
 }
 
-func (t *TxBuilderBananaZKEVM) sequenceBatchesRollup(opts bind.TransactOpts, sequence etherman.SequenceBanana) (*types.Transaction, error) {
+func (t *TxBuilderBananaZKEVM) sequenceBatchesRollup(
+	opts bind.TransactOpts, sequence etherman.SequenceBanana,
+) (*types.Transaction, error) {
 	batches := make([]polygonvalidiumetrog.PolygonRollupBaseEtrogBatchData, len(sequence.Batches))
 	for i, batch := range sequence.Batches {
 		var ger common.Hash
@@ -94,7 +107,9 @@ func (t *TxBuilderBananaZKEVM) sequenceBatchesRollup(opts bind.TransactOpts, seq
 		}
 	}
 
-	tx, err := t.rollupContract.SequenceBatches(&opts, batches, sequence.CounterL1InfoRoot, sequence.MaxSequenceTimestamp, sequence.AccInputHash, sequence.L2Coinbase)
+	tx, err := t.rollupContract.SequenceBatches(
+		&opts, batches, sequence.CounterL1InfoRoot, sequence.MaxSequenceTimestamp, sequence.AccInputHash, sequence.L2Coinbase,
+	)
 	if err != nil {
 		log.Debugf("Batches to send: %+v", batches)
 		log.Debug("l2CoinBase: ", sequence.L2Coinbase)
diff --git a/sequencesender/txbuilder/banana_zkevm_test.go b/sequencesender/txbuilder/banana_zkevm_test.go
index 10043b8aa..9252f91de 100644
--- a/sequencesender/txbuilder/banana_zkevm_test.go
+++ b/sequencesender/txbuilder/banana_zkevm_test.go
@@ -15,7 +15,6 @@ import (
 	"github.com/ethereum/go-ethereum/accounts/abi/bind"
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/core/types"
-	ethtypes "github.com/ethereum/go-ethereum/core/types"
 	"github.com/stretchr/testify/mock"
 	"github.com/stretchr/testify/require"
 )
@@ -43,8 +42,8 @@ func TestBananaZkevmBuildSequenceBatchesTxOk(t *testing.T) {
 	seq, err := newSequenceBananaZKEVMForTest(testData)
 	require.NoError(t, err)
 
-	inner := &ethtypes.LegacyTx{}
-	tx := ethtypes.NewTx(inner)
+	inner := &types.LegacyTx{}
+	tx := types.NewTx(inner)
 
 	// It check that SequenceBatches is not going to be send
 	testData.rollupContract.EXPECT().SequenceBatches(mock.MatchedBy(func(opts *bind.TransactOpts) bool {
@@ -82,6 +81,8 @@ type testDataBananaZKEVM struct {
 }
 
 func newBananaZKEVMTestData(t *testing.T, maxTxSizeForL1 uint64) *testDataBananaZKEVM {
+	t.Helper()
+
 	zkevmContractMock := mocks_txbuilder.NewRollupBananaZKEVMContractor(t)
 	gerContractMock := mocks_txbuilder.NewGlobalExitRootBananaContractor(t)
 	condMock := mocks_txbuilder.NewCondNewSequence(t)
diff --git a/sequencesender/txbuilder/elderberry_base.go b/sequencesender/txbuilder/elderberry_base.go
index c076d7d9e..9022eae31 100644
--- a/sequencesender/txbuilder/elderberry_base.go
+++ b/sequencesender/txbuilder/elderberry_base.go
@@ -25,7 +25,9 @@ func (t *TxBuilderElderberryBase) SetAuth(auth *bind.TransactOpts) {
 	t.opts = *auth
 }
 
-func (t *TxBuilderElderberryBase) NewSequence(ctx context.Context, batches []seqsendertypes.Batch, coinbase common.Address) (seqsendertypes.Sequence, error) {
+func (t *TxBuilderElderberryBase) NewSequence(
+	ctx context.Context, batches []seqsendertypes.Batch, coinbase common.Address,
+) (seqsendertypes.Sequence, error) {
 	seq := ElderberrySequence{
 		l2Coinbase: coinbase,
 		batches:    batches,
diff --git a/sequencesender/txbuilder/elderberry_base_test.go b/sequencesender/txbuilder/elderberry_base_test.go
index c25079075..9483e2a8a 100644
--- a/sequencesender/txbuilder/elderberry_base_test.go
+++ b/sequencesender/txbuilder/elderberry_base_test.go
@@ -36,7 +36,6 @@ func TestElderberryBaseNewBatchFromL2Block(t *testing.T) {
 	require.Equal(t, l2Block.L1InfotreeIndex, batch.L1InfoTreeIndex())
 	require.Equal(t, common.BytesToAddress(l2Block.Coinbase), batch.LastCoinbase())
 	require.Equal(t, common.BytesToHash(l2Block.GlobalExitRoot), batch.GlobalExitRoot())
-
 }
 
 func TestElderberryBasegetLastSequencedBatchNumberEmpty(t *testing.T) {
@@ -92,6 +91,8 @@ func TestElderberryBaseGetLastSequencedBatchFirstBatchIsZeroThrowAPanic(t *testi
 }
 
 func newElderberryBaseSUT(t *testing.T) *TxBuilderElderberryBase {
+	t.Helper()
+
 	opts := bind.TransactOpts{}
 	sut := NewTxBuilderElderberryBase(opts)
 	require.NotNil(t, sut)
diff --git a/sequencesender/txbuilder/elderberry_validium.go b/sequencesender/txbuilder/elderberry_validium.go
index c2acc6495..23e1ba08e 100644
--- a/sequencesender/txbuilder/elderberry_validium.go
+++ b/sequencesender/txbuilder/elderberry_validium.go
@@ -26,7 +26,14 @@ type TxBuilderElderberryValidium struct {
 }
 
 type rollupElderberryValidiumContractor interface {
-	SequenceBatchesValidium(opts *bind.TransactOpts, batches []polygonvalidiumetrog.PolygonValidiumEtrogValidiumBatchData, maxSequenceTimestamp uint64, initSequencedBatch uint64, l2Coinbase common.Address, dataAvailabilityMessage []byte) (*types.Transaction, error)
+	SequenceBatchesValidium(
+		opts *bind.TransactOpts,
+		batches []polygonvalidiumetrog.PolygonValidiumEtrogValidiumBatchData,
+		maxSequenceTimestamp uint64,
+		initSequencedBatch uint64,
+		l2Coinbase common.Address,
+		dataAvailabilityMessage []byte,
+	) (*types.Transaction, error)
 }
 
 func NewTxBuilderElderberryValidium(zkevm contracts.RollupElderberryType,
@@ -39,7 +46,9 @@ func NewTxBuilderElderberryValidium(zkevm contracts.RollupElderberryType,
 		rollupContract:          zkevm,
 	}
 }
-func (t *TxBuilderElderberryValidium) NewSequenceIfWorthToSend(ctx context.Context, sequenceBatches []seqsendertypes.Batch, l2Coinbase common.Address, batchNumber uint64) (seqsendertypes.Sequence, error) {
+func (t *TxBuilderElderberryValidium) NewSequenceIfWorthToSend(
+	ctx context.Context, sequenceBatches []seqsendertypes.Batch, l2Coinbase common.Address, batchNumber uint64,
+) (seqsendertypes.Sequence, error) {
 	return t.condNewSeq.NewSequenceIfWorthToSend(ctx, t, sequenceBatches, l2Coinbase)
 }
 
@@ -50,7 +59,9 @@ func (t *TxBuilderElderberryValidium) SetCondNewSeq(cond CondNewSequence) CondNe
 	return previous
 }
 
-func (t *TxBuilderElderberryValidium) BuildSequenceBatchesTx(ctx context.Context, sequences seqsendertypes.Sequence) (*types.Transaction, error) {
+func (t *TxBuilderElderberryValidium) BuildSequenceBatchesTx(
+	ctx context.Context, sequences seqsendertypes.Sequence,
+) (*types.Transaction, error) {
 	if sequences == nil || sequences.Len() == 0 {
 		return nil, fmt.Errorf("can't sequence an empty sequence")
 	}
@@ -87,13 +98,16 @@ func (t *TxBuilderElderberryValidium) buildSequenceBatchesTxValidium(opts *bind.
 		batches[i] = polygonvalidiumetrog.PolygonValidiumEtrogValidiumBatchData{
 			TransactionsHash:     crypto.Keccak256Hash(seq.L2Data()),
 			ForcedGlobalExitRoot: ger,
-			ForcedTimestamp:      uint64(seq.ForcedBatchTimestamp()),
+			ForcedTimestamp:      seq.ForcedBatchTimestamp(),
 			ForcedBlockHashL1:    seq.ForcedBlockHashL1(),
 		}
 	}
 	lastSequencedBatchNumber := getLastSequencedBatchNumber(sequences)
-	log.Infof("SequenceBatchesValidium(from=%s, len(batches)=%d, MaxSequenceTimestamp=%d, lastSequencedBatchNumber=%d, L2Coinbase=%s, dataAvailabilityMessage=%s)",
-		t.opts.From.String(), len(batches), sequences.MaxSequenceTimestamp(), lastSequencedBatchNumber, sequences.L2Coinbase().String(), hex.EncodeToString(dataAvailabilityMessage))
+	log.Infof("SequenceBatchesValidium(from=%s, len(batches)=%d, MaxSequenceTimestamp=%d, "+
+		"lastSequencedBatchNumber=%d, L2Coinbase=%s, dataAvailabilityMessage=%s)",
+		t.opts.From.String(), len(batches), sequences.MaxSequenceTimestamp(), lastSequencedBatchNumber,
+		sequences.L2Coinbase().String(), hex.EncodeToString(dataAvailabilityMessage),
+	)
 	tx, err := t.rollupContract.SequenceBatchesValidium(opts, batches, sequences.MaxSequenceTimestamp(),
 		lastSequencedBatchNumber, sequences.L2Coinbase(), dataAvailabilityMessage)
 	if err != nil {
diff --git a/sequencesender/txbuilder/elderberry_validium_test.go b/sequencesender/txbuilder/elderberry_validium_test.go
index adc8456a7..7607576d3 100644
--- a/sequencesender/txbuilder/elderberry_validium_test.go
+++ b/sequencesender/txbuilder/elderberry_validium_test.go
@@ -98,6 +98,8 @@ type testDataElderberryValidium struct {
 }
 
 func newElderberryValidiumSUT(t *testing.T) *testDataElderberryValidium {
+	t.Helper()
+
 	zkevmContract, err := contracts.NewContractMagic[contracts.RollupElderberryType](polygonvalidiumetrog.NewPolygonvalidiumetrog, common.Address{}, nil, contracts.ContractNameRollup, contracts.VersionElderberry)
 	require.NoError(t, err)
 	privateKey, err := crypto.HexToECDSA("64e679029f5032046955d41713dcc4b565de77ab891748d31bcf38864b54c175")
diff --git a/sequencesender/txbuilder/elderberry_zkevm.go b/sequencesender/txbuilder/elderberry_zkevm.go
index 870be1bb8..3f446b7af 100644
--- a/sequencesender/txbuilder/elderberry_zkevm.go
+++ b/sequencesender/txbuilder/elderberry_zkevm.go
@@ -21,10 +21,18 @@ type TxBuilderElderberryZKEVM struct {
 }
 
 type rollupElderberryZKEVMContractor interface {
-	SequenceBatches(opts *bind.TransactOpts, batches []polygonvalidiumetrog.PolygonRollupBaseEtrogBatchData, maxSequenceTimestamp uint64, initSequencedBatch uint64, l2Coinbase common.Address) (*types.Transaction, error)
+	SequenceBatches(
+		opts *bind.TransactOpts,
+		batches []polygonvalidiumetrog.PolygonRollupBaseEtrogBatchData,
+		maxSequenceTimestamp uint64,
+		initSequencedBatch uint64,
+		l2Coinbase common.Address,
+	) (*types.Transaction, error)
 }
 
-func NewTxBuilderElderberryZKEVM(zkevm rollupElderberryZKEVMContractor, opts bind.TransactOpts, maxTxSizeForL1 uint64) *TxBuilderElderberryZKEVM {
+func NewTxBuilderElderberryZKEVM(
+	zkevm rollupElderberryZKEVMContractor, opts bind.TransactOpts, maxTxSizeForL1 uint64,
+) *TxBuilderElderberryZKEVM {
 	return &TxBuilderElderberryZKEVM{
 		TxBuilderElderberryBase: *NewTxBuilderElderberryBase(opts),
 		condNewSeq:              NewConditionalNewSequenceMaxSize(maxTxSizeForL1),
@@ -32,7 +40,9 @@ func NewTxBuilderElderberryZKEVM(zkevm rollupElderberryZKEVMContractor, opts bin
 	}
 }
 
-func (t *TxBuilderElderberryZKEVM) NewSequenceIfWorthToSend(ctx context.Context, sequenceBatches []seqsendertypes.Batch, l2Coinbase common.Address, batchNumber uint64) (seqsendertypes.Sequence, error) {
+func (t *TxBuilderElderberryZKEVM) NewSequenceIfWorthToSend(
+	ctx context.Context, sequenceBatches []seqsendertypes.Batch, l2Coinbase common.Address, batchNumber uint64,
+) (seqsendertypes.Sequence, error) {
 	return t.condNewSeq.NewSequenceIfWorthToSend(ctx, t, sequenceBatches, l2Coinbase)
 }
 
@@ -43,7 +53,9 @@ func (t *TxBuilderElderberryZKEVM) SetCondNewSeq(cond CondNewSequence) CondNewSe
 	return previous
 }
 
-func (t *TxBuilderElderberryZKEVM) BuildSequenceBatchesTx(ctx context.Context, sequences seqsendertypes.Sequence) (*types.Transaction, error) {
+func (t *TxBuilderElderberryZKEVM) BuildSequenceBatchesTx(
+	ctx context.Context, sequences seqsendertypes.Sequence,
+) (*types.Transaction, error) {
 	newopts := t.opts
 	newopts.NoSend = true
 
@@ -55,7 +67,9 @@ func (t *TxBuilderElderberryZKEVM) BuildSequenceBatchesTx(ctx context.Context, s
 	return t.sequenceBatchesRollup(newopts, sequences)
 }
 
-func (t *TxBuilderElderberryZKEVM) sequenceBatchesRollup(opts bind.TransactOpts, sequences seqsendertypes.Sequence) (*types.Transaction, error) {
+func (t *TxBuilderElderberryZKEVM) sequenceBatchesRollup(
+	opts bind.TransactOpts, sequences seqsendertypes.Sequence,
+) (*types.Transaction, error) {
 	if sequences == nil || sequences.Len() == 0 {
 		return nil, fmt.Errorf("can't sequence an empty sequence")
 	}
@@ -69,13 +83,15 @@ func (t *TxBuilderElderberryZKEVM) sequenceBatchesRollup(opts bind.TransactOpts,
 		batches[i] = polygonvalidiumetrog.PolygonRollupBaseEtrogBatchData{
 			Transactions:         seq.L2Data(),
 			ForcedGlobalExitRoot: ger,
-			ForcedTimestamp:      uint64(seq.ForcedBatchTimestamp()),
+			ForcedTimestamp:      seq.ForcedBatchTimestamp(),
 			// TODO: Check that is ok to use ForcedBlockHashL1 instead PrevBlockHash
 			ForcedBlockHashL1: seq.ForcedBlockHashL1(),
 		}
 	}
 	lastSequencedBatchNumber := getLastSequencedBatchNumber(sequences)
-	tx, err := t.rollupContract.SequenceBatches(&opts, batches, sequences.MaxSequenceTimestamp(), lastSequencedBatchNumber, sequences.L2Coinbase())
+	tx, err := t.rollupContract.SequenceBatches(
+		&opts, batches, sequences.MaxSequenceTimestamp(), lastSequencedBatchNumber, sequences.L2Coinbase(),
+	)
 	if err != nil {
 		t.warningMessage(batches, sequences.L2Coinbase(), &opts)
 		if parsedErr, ok := etherman.TryParseError(err); ok {
@@ -86,7 +102,9 @@ func (t *TxBuilderElderberryZKEVM) sequenceBatchesRollup(opts bind.TransactOpts,
 	return tx, err
 }
 
-func (t *TxBuilderElderberryZKEVM) warningMessage(batches []polygonvalidiumetrog.PolygonRollupBaseEtrogBatchData, l2Coinbase common.Address, opts *bind.TransactOpts) {
+func (t *TxBuilderElderberryZKEVM) warningMessage(
+	batches []polygonvalidiumetrog.PolygonRollupBaseEtrogBatchData, l2Coinbase common.Address, opts *bind.TransactOpts,
+) {
 	log.Warnf("Sequencer address: ", opts.From, "l2CoinBase: ", l2Coinbase, " Batches to send: %+v", batches)
 }
 
diff --git a/sequencesender/txbuilder/elderberry_zkevm_test.go b/sequencesender/txbuilder/elderberry_zkevm_test.go
index 94623048c..27e54df86 100644
--- a/sequencesender/txbuilder/elderberry_zkevm_test.go
+++ b/sequencesender/txbuilder/elderberry_zkevm_test.go
@@ -98,6 +98,8 @@ func TestElderberryZkevmNewSequenceIfWorthToSend(t *testing.T) {
 }
 
 func newElderberryZkevmSUT(t *testing.T) *txbuilder.TxBuilderElderberryZKEVM {
+	t.Helper()
+
 	zkevmContract, err := contracts.NewContractMagic[contracts.RollupElderberryType](polygonvalidiumetrog.NewPolygonvalidiumetrog, common.Address{}, nil, contracts.ContractNameRollup, contracts.VersionElderberry)
 	require.NoError(t, err)
 	privateKey, err := crypto.HexToECDSA("64e679029f5032046955d41713dcc4b565de77ab891748d31bcf38864b54c175")
diff --git a/sequencesender/txbuilder/interface.go b/sequencesender/txbuilder/interface.go
index bde8b6346..1a16dbbad 100644
--- a/sequencesender/txbuilder/interface.go
+++ b/sequencesender/txbuilder/interface.go
@@ -13,9 +13,13 @@ type TxBuilder interface {
 	// BuildSequenceBatchesTx  Builds a sequence of batches transaction
 	BuildSequenceBatchesTx(ctx context.Context, sequences seqsendertypes.Sequence) (*ethtypes.Transaction, error)
 	// NewSequence  Creates a new sequence
-	NewSequence(ctx context.Context, batches []seqsendertypes.Batch, coinbase common.Address) (seqsendertypes.Sequence, error)
+	NewSequence(
+		ctx context.Context, batches []seqsendertypes.Batch, coinbase common.Address,
+	) (seqsendertypes.Sequence, error)
 	// NewSequenceIfWorthToSend  Creates a new sequence if it is worth sending
-	NewSequenceIfWorthToSend(ctx context.Context, sequenceBatches []seqsendertypes.Batch, l2Coinbase common.Address, batchNumber uint64) (seqsendertypes.Sequence, error)
+	NewSequenceIfWorthToSend(
+		ctx context.Context, sequenceBatches []seqsendertypes.Batch, l2Coinbase common.Address, batchNumber uint64,
+	) (seqsendertypes.Sequence, error)
 	// NewBatchFromL2Block  Creates a new batch from the L2 block from a datastream
 	NewBatchFromL2Block(l2Block *datastream.L2Block) seqsendertypes.Batch
 	//SetCondNewSeq  Allows to override the condition to send a new sequence, returns previous one
@@ -25,5 +29,7 @@ type TxBuilder interface {
 
 type CondNewSequence interface {
 	//NewSequenceIfWorthToSend  Return nil, nil if the sequence is not worth sending
-	NewSequenceIfWorthToSend(ctx context.Context, txBuilder TxBuilder, sequenceBatches []seqsendertypes.Batch, l2Coinbase common.Address) (seqsendertypes.Sequence, error)
+	NewSequenceIfWorthToSend(
+		ctx context.Context, txBuilder TxBuilder, sequenceBatches []seqsendertypes.Batch, l2Coinbase common.Address,
+	) (seqsendertypes.Sequence, error)
 }
diff --git a/sequencesender/txbuilder/interface_test.go b/sequencesender/txbuilder/interface_test.go
index 6db0216ab..428f04b79 100644
--- a/sequencesender/txbuilder/interface_test.go
+++ b/sequencesender/txbuilder/interface_test.go
@@ -17,6 +17,8 @@ This test ara auxiliars function based on the common behaviour of the interfaces
 */
 
 func testSequenceIfWorthToSendNoNewSeq(t *testing.T, sut txbuilder.TxBuilder) {
+	t.Helper()
+
 	cond := mocks_txbuilder.NewCondNewSequence(t)
 	sut.SetCondNewSeq(cond)
 	cond.EXPECT().NewSequenceIfWorthToSend(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, nil).Once()
@@ -26,6 +28,8 @@ func testSequenceIfWorthToSendNoNewSeq(t *testing.T, sut txbuilder.TxBuilder) {
 }
 
 func testSequenceIfWorthToSendErr(t *testing.T, sut txbuilder.TxBuilder) {
+	t.Helper()
+
 	cond := mocks_txbuilder.NewCondNewSequence(t)
 	sut.SetCondNewSeq(cond)
 	returnErr := fmt.Errorf("test-error")
@@ -36,6 +40,8 @@ func testSequenceIfWorthToSendErr(t *testing.T, sut txbuilder.TxBuilder) {
 }
 
 func testSetCondNewSeq(t *testing.T, sut txbuilder.TxBuilder) {
+	t.Helper()
+
 	cond := mocks_txbuilder.NewCondNewSequence(t)
 	sut.SetCondNewSeq(cond)
 	cond2 := mocks_txbuilder.NewCondNewSequence(t)
diff --git a/sequencesender/txbuilder/validium_cond_num_batches.go b/sequencesender/txbuilder/validium_cond_num_batches.go
index 11329a06d..14a0bddfe 100644
--- a/sequencesender/txbuilder/validium_cond_num_batches.go
+++ b/sequencesender/txbuilder/validium_cond_num_batches.go
@@ -20,7 +20,9 @@ func NewConditionalNewSequenceNumBatches(maxBatchesForL1 uint64) *ConditionalNew
 	}
 }
 
-func (c *ConditionalNewSequenceNumBatches) NewSequenceIfWorthToSend(ctx context.Context, txBuilder TxBuilder, sequenceBatches []seqsendertypes.Batch, l2Coinbase common.Address) (seqsendertypes.Sequence, error) {
+func (c *ConditionalNewSequenceNumBatches) NewSequenceIfWorthToSend(
+	ctx context.Context, txBuilder TxBuilder, sequenceBatches []seqsendertypes.Batch, l2Coinbase common.Address,
+) (seqsendertypes.Sequence, error) {
 	if c.maxBatchesForL1 != MaxBatchesForL1Disabled && len(sequenceBatches) >= int(c.maxBatchesForL1) {
 		log.Infof(
 			"sequence should be sent to L1, because MaxBatchesForL1 (%d) has been reached",
diff --git a/sequencesender/txbuilder/zkevm_cond_max_size.go b/sequencesender/txbuilder/zkevm_cond_max_size.go
index 5c931f8ec..dff4636e7 100644
--- a/sequencesender/txbuilder/zkevm_cond_max_size.go
+++ b/sequencesender/txbuilder/zkevm_cond_max_size.go
@@ -27,7 +27,9 @@ func NewConditionalNewSequenceMaxSize(maxTxSizeForL1 uint64) *ConditionalNewSequ
 	}
 }
 
-func (c *ConditionalNewSequenceMaxSize) NewSequenceIfWorthToSend(ctx context.Context, txBuilder TxBuilder, sequenceBatches []seqsendertypes.Batch, l2Coinbase common.Address) (seqsendertypes.Sequence, error) {
+func (c *ConditionalNewSequenceMaxSize) NewSequenceIfWorthToSend(
+	ctx context.Context, txBuilder TxBuilder, sequenceBatches []seqsendertypes.Batch, l2Coinbase common.Address,
+) (seqsendertypes.Sequence, error) {
 	if c.maxTxSizeForL1 == MaxTxSizeForL1Disabled {
 		log.Debugf("maxTxSizeForL1 is %d, so is disabled", MaxTxSizeForL1Disabled)
 		return nil, nil
@@ -68,19 +70,27 @@ func (c *ConditionalNewSequenceMaxSize) NewSequenceIfWorthToSend(ctx context.Con
 			txReduced, err := txBuilder.BuildSequenceBatchesTx(ctx, sequence)
 			log.Debugf("After reducing batches:  (txSize %d -> %d)", tx.Size(), txReduced.Size())
 			if err == nil && txReduced != nil && txReduced.Size() > c.maxTxSizeForL1 {
-				log.Warnf("After reducing batches:  (txSize %d -> %d) is still too big > %d", tx.Size(), txReduced.Size(), c.maxTxSizeForL1)
+				log.Warnf("After reducing batches:  (txSize %d -> %d) is still too big > %d",
+					tx.Size(), txReduced.Size(), c.maxTxSizeForL1,
+				)
 			}
 			return sequence, err
 		}
 
 		return sequence, err
 	}
-	log.Debugf("Current size:%d < max_size:%d  num_batches: %d, no sequence promoted yet", tx.Size(), c.maxTxSizeForL1, sequence.Len())
+	log.Debugf(
+		"Current size:%d < max_size:%d  num_batches: %d, no sequence promoted yet",
+		tx.Size(), c.maxTxSizeForL1, sequence.Len(),
+	)
 	return nil, nil
 }
 
-// handleEstimateGasSendSequenceErr handles an error on the estimate gas. Results: (nil,nil)=requires waiting, (nil,error)=no handled gracefully, (seq,nil) handled gracefully
-func handleEstimateGasSendSequenceErr(sequenceBatches []seqsendertypes.Batch, err error) ([]seqsendertypes.Batch, error) {
+// handleEstimateGasSendSequenceErr handles an error on the estimate gas.
+// Results: (nil,nil)=requires waiting, (nil,error)=no handled gracefully, (seq,nil) handled gracefully
+func handleEstimateGasSendSequenceErr(
+	sequenceBatches []seqsendertypes.Batch, err error,
+) ([]seqsendertypes.Batch, error) {
 	// Insufficient allowance
 	if errors.Is(err, etherman.ErrInsufficientAllowance) {
 		return nil, err
@@ -89,12 +99,15 @@ func handleEstimateGasSendSequenceErr(sequenceBatches []seqsendertypes.Batch, er
 	if isDataForEthTxTooBig(err) {
 		errMsg = fmt.Sprintf("caused the L1 tx to be too big: %v", err)
 	}
-	adjustMsg := ""
+	var adjustMsg string
 	if len(sequenceBatches) > 1 {
 		lastPrevious := sequenceBatches[len(sequenceBatches)-1].BatchNumber()
 		sequenceBatches = sequenceBatches[:len(sequenceBatches)-1]
 		lastCurrent := sequenceBatches[len(sequenceBatches)-1].BatchNumber()
-		adjustMsg = fmt.Sprintf("removing last batch: old  BatchNumber:%d ->  %d, new length: %d", lastPrevious, lastCurrent, len(sequenceBatches))
+		adjustMsg = fmt.Sprintf(
+			"removing last batch: old  BatchNumber:%d ->  %d, new length: %d",
+			lastPrevious, lastCurrent, len(sequenceBatches),
+		)
 	} else {
 		sequenceBatches = nil
 		adjustMsg = "removing all batches"
diff --git a/sonar-project.properties b/sonar-project.properties
index 973e1bc7e..559f70735 100644
--- a/sonar-project.properties
+++ b/sonar-project.properties
@@ -1,2 +1,30 @@
+# =====================================================
+#   Standard properties
+# =====================================================
+
 sonar.projectKey=0xPolygon_cdk
+sonar.projectName=cdk
 sonar.organization=0xpolygon
+
+sonar.sources=.
+sonar.exclusions=**/test/**,**/vendor/**,**/mocks/**,**/build/**,**/target/**,**/proto/include/**,**/*.pb.go,**/docs/**,**/*.sql
+
+sonar.tests=.
+sonar.test.inclusions=**/*_test.go
+sonar.test.exclusions=**/vendor/**,**/docs/**,**/mocks/**,**/*.pb.go,**/*.yml,**/*.yaml,**/*.json,**/*.xml,**/*.toml
+sonar.issue.enforceSemantic=true
+
+# =====================================================
+#   Meta-data for the project
+# =====================================================
+
+sonar.links.homepage=https://github.com/0xPolygon/cdk
+sonar.links.ci=https://github.com/0xPolygon/cdk/actions
+sonar.links.scm=https://github.com/0xPolygon/cdk
+sonar.links.issue=https://github.com/0xPolygon/cdk/issues
+
+# =====================================================
+#   Properties specific to Go
+# =====================================================
+
+sonar.go.coverage.reportPaths=coverage.out
diff --git a/state/encoding_batch_v2.go b/state/encoding_batch_v2.go
index 8e3561bdd..f058f072f 100644
--- a/state/encoding_batch_v2.go
+++ b/state/encoding_batch_v2.go
@@ -99,8 +99,11 @@ const (
 )
 
 var (
-	// ErrBatchV2DontStartWithChangeL2Block is returned when the batch start directly with a trsansaction (without a changeL2Block)
-	ErrBatchV2DontStartWithChangeL2Block = errors.New("batch v2 must start with changeL2Block before Tx (suspect a V1 Batch or a ForcedBatch?))")
+	// ErrBatchV2DontStartWithChangeL2Block is returned when the batch start directly
+	// with a trsansaction (without a preceding changeL2Block)
+	ErrBatchV2DontStartWithChangeL2Block = errors.New(
+		"batch v2 must start with changeL2Block before Tx (suspect a V1 Batch or a ForcedBatch?)",
+	)
 	// ErrInvalidBatchV2 is returned when the batch is invalid.
 	ErrInvalidBatchV2 = errors.New("invalid batch v2")
 	// ErrInvalidRLP is returned when the rlp is invalid.
@@ -206,7 +209,10 @@ func (tx L2TxRaw) Encode(batchData []byte) ([]byte, error) {
 
 // DecodeBatchV2 decodes a batch of transactions from a byte slice.
 func DecodeBatchV2(txsData []byte) (*BatchRawV2, error) {
-	// The transactions is not RLP encoded. Is the raw bytes in this form: 1 byte for the transaction type (always 0b for changeL2Block) + 4 bytes for deltaTimestamp + for bytes for indexL1InfoTree
+	// The transactions are not RLP encoded. They are in raw byte format as follows:
+	// 1 byte for the transaction type (always 0b for changeL2Block)
+	// 4 bytes for deltaTimestamp
+	// 4 bytes for indexL1InfoTree.
 	var err error
 	var blocks []L2BlockRaw
 	var currentBlock *L2BlockRaw
@@ -258,7 +264,10 @@ func DecodeForcedBatchV2(txsData []byte) (*ForcedBatchRawV2, error) {
 	}
 	// Sanity check, this should never happen
 	if len(efficiencyPercentages) != len(txs) {
-		return nil, fmt.Errorf("error decoding len(efficiencyPercentages) != len(txs). len(efficiencyPercentages)=%d, len(txs)=%d : %w", len(efficiencyPercentages), len(txs), ErrInvalidRLP)
+		return nil, fmt.Errorf(
+			"error decoding len(efficiencyPercentages) != len(txs). len(efficiencyPercentages)=%d, len(txs)=%d : %w",
+			len(efficiencyPercentages), len(txs), ErrInvalidRLP,
+		)
 	}
 	forcedBatch := ForcedBatchRawV2{}
 	for i := range txs {
@@ -311,12 +320,22 @@ func DecodeTxRLP(txsData []byte, offset int) (int, *L2TxRaw, error) {
 	var rlpFields [][]byte
 	err = rlp.DecodeBytes(txInfo, &rlpFields)
 	if err != nil {
-		log.Error("error decoding tx Bytes: ", err, ". fullDataTx: ", hex.EncodeToString(fullDataTx), "\n tx: ", hex.EncodeToString(txInfo), "\n Txs received: ", hex.EncodeToString(txsData))
+		log.Error(
+			"error decoding tx Bytes: ", err,
+			". fullDataTx: ", hex.EncodeToString(fullDataTx),
+			"\n tx: ", hex.EncodeToString(txInfo),
+			"\n Txs received: ", hex.EncodeToString(txsData),
+		)
 		return 0, nil, err
 	}
 	legacyTx, err := RlpFieldsToLegacyTx(rlpFields, vData, rData, sData)
 	if err != nil {
-		log.Debug("error creating tx from rlp fields: ", err, ". fullDataTx: ", hex.EncodeToString(fullDataTx), "\n tx: ", hex.EncodeToString(txInfo), "\n Txs received: ", hex.EncodeToString(txsData))
+		log.Debug(
+			"error creating tx from rlp fields: ", err,
+			". fullDataTx: ", hex.EncodeToString(fullDataTx),
+			"\n tx: ", hex.EncodeToString(txInfo),
+			"\n Txs received: ", hex.EncodeToString(txsData),
+		)
 		return 0, nil, err
 	}
 
@@ -348,7 +367,11 @@ func decodeRLPListLengthFromOffset(txsData []byte, offset int) (uint64, error) {
 			return 0, fmt.Errorf("not enough data to get length: %w", ErrInvalidRLP)
 		}
 
-		n, err := strconv.ParseUint(hex.EncodeToString(txsData[pos64+1:pos64+1+lengthInByteOfSize]), hex.Base, hex.BitSize64) // +1 is the header. For example 0xf7
+		n, err := strconv.ParseUint(
+			hex.EncodeToString(txsData[pos64+1:pos64+1+lengthInByteOfSize]), // +1 is the header. For example 0xf7
+			hex.Base,
+			hex.BitSize64,
+		)
 		if err != nil {
 			log.Debug("error parsing length: ", err)
 			return 0, fmt.Errorf("error parsing length value: %w", err)
diff --git a/state/errors.go b/state/errors.go
index 5a394a240..4bd686056 100644
--- a/state/errors.go
+++ b/state/errors.go
@@ -36,11 +36,15 @@ var (
 	ErrDBTxNil = errors.New("the method requires a dbTx that is not nil")
 	// ErrExistingTxGreaterThanProcessedTx indicates that we have more txs stored
 	// in db than the txs we want to process.
-	ErrExistingTxGreaterThanProcessedTx = errors.New("there are more transactions in the database than in the processed transaction set")
+	ErrExistingTxGreaterThanProcessedTx = errors.New(
+		"there are more transactions in the database than in the processed transaction set",
+	)
 	// ErrOutOfOrderProcessedTx indicates the the processed transactions of an
 	// ongoing batch are not in the same order as the transactions stored in the
 	// database for the same batch.
-	ErrOutOfOrderProcessedTx = errors.New("the processed transactions are not in the same order as the stored transactions")
+	ErrOutOfOrderProcessedTx = errors.New(
+		"the processed transactions are not in the same order as the stored transactions",
+	)
 	// ErrInsufficientFundsForTransfer is returned if the transaction sender doesn't
 	// have enough funds for transfer(topmost call only).
 	ErrInsufficientFundsForTransfer = errors.New("insufficient funds for transfer")
diff --git a/state/forkid.go b/state/forkid.go
index 276173138..13fec6712 100644
--- a/state/forkid.go
+++ b/state/forkid.go
@@ -2,11 +2,11 @@ package state
 
 const (
 	// FORKID_BLUEBERRY is the fork id 4
-	FORKID_BLUEBERRY = 4
+	FORKID_BLUEBERRY = 4 //nolint:stylecheck
 	// FORKID_DRAGONFRUIT is the fork id 5
-	FORKID_DRAGONFRUIT = 5
+	FORKID_DRAGONFRUIT = 5 //nolint:stylecheck
 	// FORKID_INCABERRY is the fork id 6
-	FORKID_INCABERRY = 6
+	FORKID_INCABERRY = 6 //nolint:stylecheck
 	// FORKID_ETROG is the fork id 7
-	FORKID_ETROG = 7
+	FORKID_ETROG = 7 //nolint:stylecheck
 )
diff --git a/state/helper.go b/state/helper.go
index c717fb56a..aeedb8fe3 100644
--- a/state/helper.go
+++ b/state/helper.go
@@ -99,7 +99,11 @@ func DecodeTxs(txsData []byte, forkID uint64) ([]*types.Transaction, []byte, []u
 				log.Debug("error parsing length: ", err)
 				return []*types.Transaction{}, txsData, []uint8{}, err
 			}
-			n, err := strconv.ParseUint(hex.EncodeToString(txsData[pos+1:pos+1+num-f7]), hex.Base, hex.BitSize64) // +1 is the header. For example 0xf7
+			n, err := strconv.ParseUint(
+				hex.EncodeToString(txsData[pos+1:pos+1+num-f7]), // +1 is the header. For example 0xf7
+				hex.Base,
+				hex.BitSize64,
+			)
 			if err != nil {
 				log.Debug("error parsing length: ", err)
 				return []*types.Transaction{}, txsData, []uint8{}, err
@@ -153,13 +157,23 @@ func DecodeTxs(txsData []byte, forkID uint64) ([]*types.Transaction, []byte, []u
 		var rlpFields [][]byte
 		err = rlp.DecodeBytes(txInfo, &rlpFields)
 		if err != nil {
-			log.Error("error decoding tx Bytes: ", err, ". fullDataTx: ", hex.EncodeToString(fullDataTx), "\n tx: ", hex.EncodeToString(txInfo), "\n Txs received: ", hex.EncodeToString(txsData))
+			log.Error(
+				"error decoding tx Bytes: ", err,
+				". fullDataTx: ", hex.EncodeToString(fullDataTx),
+				"\n tx: ", hex.EncodeToString(txInfo),
+				"\n Txs received: ", hex.EncodeToString(txsData),
+			)
 			return []*types.Transaction{}, txsData, []uint8{}, ErrInvalidData
 		}
 
 		legacyTx, err := RlpFieldsToLegacyTx(rlpFields, vData, rData, sData)
 		if err != nil {
-			log.Debug("error creating tx from rlp fields: ", err, ". fullDataTx: ", hex.EncodeToString(fullDataTx), "\n tx: ", hex.EncodeToString(txInfo), "\n Txs received: ", hex.EncodeToString(txsData))
+			log.Debug(
+				"error creating tx from rlp fields: ", err, ". fullDataTx: ",
+				hex.EncodeToString(fullDataTx),
+				"\n tx: ", hex.EncodeToString(txInfo),
+				"\n Txs received: ", hex.EncodeToString(txsData),
+			)
 			return []*types.Transaction{}, txsData, []uint8{}, err
 		}
 
diff --git a/state/pgstatestorage/batch.go b/state/pgstatestorage/batch.go
index db818df5f..6273f0641 100644
--- a/state/pgstatestorage/batch.go
+++ b/state/pgstatestorage/batch.go
@@ -11,9 +11,17 @@ import (
 
 // AddBatch stores a batch
 func (p *PostgresStorage) AddBatch(ctx context.Context, dbBatch *state.DBBatch, dbTx pgx.Tx) error {
-	const addInputHashSQL = "INSERT INTO aggregator.batch (batch_num, batch, datastream, witness) VALUES ($1, $2, $3, $4) ON CONFLICT (batch_num) DO UPDATE SET batch = $2, datastream = $3, witness = $4"
+	const addInputHashSQL = `
+		INSERT INTO aggregator.batch (batch_num, batch, datastream, witness) 
+		VALUES ($1, $2, $3, $4) 
+		ON CONFLICT (batch_num) DO UPDATE 
+		SET batch = $2, datastream = $3, witness = $4
+	`
 	e := p.getExecQuerier(dbTx)
-	_, err := e.Exec(ctx, addInputHashSQL, dbBatch.Batch.BatchNumber, &dbBatch.Batch, common.Bytes2Hex(dbBatch.Datastream), common.Bytes2Hex(dbBatch.Witness))
+	_, err := e.Exec(
+		ctx, addInputHashSQL, dbBatch.Batch.BatchNumber, &dbBatch.Batch,
+		common.Bytes2Hex(dbBatch.Datastream), common.Bytes2Hex(dbBatch.Witness),
+	)
 	return err
 }
 
@@ -38,7 +46,9 @@ func (p *PostgresStorage) GetBatch(ctx context.Context, batchNumber uint64, dbTx
 }
 
 // DeleteBatchesOlderThanBatchNumber deletes batches previous to the given batch number
-func (p *PostgresStorage) DeleteBatchesOlderThanBatchNumber(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) error {
+func (p *PostgresStorage) DeleteBatchesOlderThanBatchNumber(
+	ctx context.Context, batchNumber uint64, dbTx pgx.Tx,
+) error {
 	const deleteBatchesSQL = "DELETE FROM aggregator.batch WHERE batch_num < $1"
 	e := p.getExecQuerier(dbTx)
 	_, err := e.Exec(ctx, deleteBatchesSQL, batchNumber)
@@ -46,7 +56,9 @@ func (p *PostgresStorage) DeleteBatchesOlderThanBatchNumber(ctx context.Context,
 }
 
 // DeleteBatchesNewerThanBatchNumber deletes batches previous to the given batch number
-func (p *PostgresStorage) DeleteBatchesNewerThanBatchNumber(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) error {
+func (p *PostgresStorage) DeleteBatchesNewerThanBatchNumber(
+	ctx context.Context, batchNumber uint64, dbTx pgx.Tx,
+) error {
 	const deleteBatchesSQL = "DELETE FROM aggregator.batch WHERE batch_num > $1"
 	e := p.getExecQuerier(dbTx)
 	_, err := e.Exec(ctx, deleteBatchesSQL, batchNumber)
diff --git a/state/pgstatestorage/proof.go b/state/pgstatestorage/proof.go
index 98668a448..a5ae6331d 100644
--- a/state/pgstatestorage/proof.go
+++ b/state/pgstatestorage/proof.go
@@ -25,7 +25,9 @@ func (p *PostgresStorage) CheckProofExistsForBatch(ctx context.Context, batchNum
 }
 
 // CheckProofContainsCompleteSequences checks if a recursive proof contains complete sequences
-func (p *PostgresStorage) CheckProofContainsCompleteSequences(ctx context.Context, proof *state.Proof, dbTx pgx.Tx) (bool, error) {
+func (p *PostgresStorage) CheckProofContainsCompleteSequences(
+	ctx context.Context, proof *state.Proof, dbTx pgx.Tx,
+) (bool, error) {
 	const getProofContainsCompleteSequencesSQL = `
 		SELECT EXISTS (SELECT 1 FROM aggregator.sequence s1 WHERE s1.from_batch_num = $1) AND
 			   EXISTS (SELECT 1 FROM aggregator.sequence s2 WHERE s2.to_batch_num = $2)
@@ -40,7 +42,9 @@ func (p *PostgresStorage) CheckProofContainsCompleteSequences(ctx context.Contex
 }
 
 // GetProofReadyToVerify return the proof that is ready to verify
-func (p *PostgresStorage) GetProofReadyToVerify(ctx context.Context, lastVerfiedBatchNumber uint64, dbTx pgx.Tx) (*state.Proof, error) {
+func (p *PostgresStorage) GetProofReadyToVerify(
+	ctx context.Context, lastVerfiedBatchNumber uint64, dbTx pgx.Tx,
+) (*state.Proof, error) {
 	const getProofReadyToVerifySQL = `
 		SELECT 
 			p.batch_num, 
@@ -59,11 +63,15 @@ func (p *PostgresStorage) GetProofReadyToVerify(ctx context.Context, lastVerfied
 			EXISTS (SELECT 1 FROM aggregator.sequence s2 WHERE s2.to_batch_num = p.batch_num_final)		
 		`
 
-	var proof *state.Proof = &state.Proof{}
+	var proof = &state.Proof{}
 
 	e := p.getExecQuerier(dbTx)
 	row := e.QueryRow(ctx, getProofReadyToVerifySQL, lastVerfiedBatchNumber+1)
-	err := row.Scan(&proof.BatchNumber, &proof.BatchNumberFinal, &proof.Proof, &proof.ProofID, &proof.InputProver, &proof.Prover, &proof.ProverID, &proof.GeneratingSince, &proof.CreatedAt, &proof.UpdatedAt)
+	err := row.Scan(
+		&proof.BatchNumber, &proof.BatchNumberFinal, &proof.Proof, &proof.ProofID,
+		&proof.InputProver, &proof.Prover, &proof.ProverID, &proof.GeneratingSince,
+		&proof.CreatedAt, &proof.UpdatedAt,
+	)
 
 	if errors.Is(err, pgx.ErrNoRows) {
 		return nil, state.ErrNotFound
@@ -77,8 +85,8 @@ func (p *PostgresStorage) GetProofReadyToVerify(ctx context.Context, lastVerfied
 // GetProofsToAggregate return the next to proof that it is possible to aggregate
 func (p *PostgresStorage) GetProofsToAggregate(ctx context.Context, dbTx pgx.Tx) (*state.Proof, *state.Proof, error) {
 	var (
-		proof1 *state.Proof = &state.Proof{}
-		proof2 *state.Proof = &state.Proof{}
+		proof1 = &state.Proof{}
+		proof2 = &state.Proof{}
 	)
 
 	// TODO: add comments to explain the query
@@ -130,8 +138,13 @@ func (p *PostgresStorage) GetProofsToAggregate(ctx context.Context, dbTx pgx.Tx)
 	e := p.getExecQuerier(dbTx)
 	row := e.QueryRow(ctx, getProofsToAggregateSQL)
 	err := row.Scan(
-		&proof1.BatchNumber, &proof1.BatchNumberFinal, &proof1.Proof, &proof1.ProofID, &proof1.InputProver, &proof1.Prover, &proof1.ProverID, &proof1.GeneratingSince, &proof1.CreatedAt, &proof1.UpdatedAt,
-		&proof2.BatchNumber, &proof2.BatchNumberFinal, &proof2.Proof, &proof2.ProofID, &proof2.InputProver, &proof2.Prover, &proof2.ProverID, &proof2.GeneratingSince, &proof2.CreatedAt, &proof2.UpdatedAt)
+		&proof1.BatchNumber, &proof1.BatchNumberFinal, &proof1.Proof, &proof1.ProofID,
+		&proof1.InputProver, &proof1.Prover, &proof1.ProverID, &proof1.GeneratingSince,
+		&proof1.CreatedAt, &proof1.UpdatedAt,
+		&proof2.BatchNumber, &proof2.BatchNumberFinal, &proof2.Proof, &proof2.ProofID,
+		&proof2.InputProver, &proof2.Prover, &proof2.ProverID, &proof2.GeneratingSince,
+		&proof2.CreatedAt, &proof2.UpdatedAt,
+	)
 
 	if errors.Is(err, pgx.ErrNoRows) {
 		return nil, nil, state.ErrNotFound
@@ -144,25 +157,51 @@ func (p *PostgresStorage) GetProofsToAggregate(ctx context.Context, dbTx pgx.Tx)
 
 // AddGeneratedProof adds a generated proof to the storage
 func (p *PostgresStorage) AddGeneratedProof(ctx context.Context, proof *state.Proof, dbTx pgx.Tx) error {
-	const addGeneratedProofSQL = "INSERT INTO aggregator.proof (batch_num, batch_num_final, proof, proof_id, input_prover, prover, prover_id, generating_since, created_at, updated_at) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)"
+	const addGeneratedProofSQL = `
+		INSERT INTO aggregator.proof (
+			batch_num, batch_num_final, proof, proof_id, input_prover, prover, 
+			prover_id, generating_since, created_at, updated_at
+		) VALUES (
+			$1, $2, $3, $4, $5, $6, $7, $8, $9, $10
+		)
+	`
 	e := p.getExecQuerier(dbTx)
 	now := time.Now().UTC().Round(time.Microsecond)
-	_, err := e.Exec(ctx, addGeneratedProofSQL, proof.BatchNumber, proof.BatchNumberFinal, proof.Proof, proof.ProofID, proof.InputProver, proof.Prover, proof.ProverID, proof.GeneratingSince, now, now)
+	_, err := e.Exec(
+		ctx, addGeneratedProofSQL, proof.BatchNumber, proof.BatchNumberFinal, proof.Proof, proof.ProofID,
+		proof.InputProver, proof.Prover, proof.ProverID, proof.GeneratingSince, now, now,
+	)
 	return err
 }
 
 // UpdateGeneratedProof updates a generated proof in the storage
 func (p *PostgresStorage) UpdateGeneratedProof(ctx context.Context, proof *state.Proof, dbTx pgx.Tx) error {
-	const addGeneratedProofSQL = "UPDATE aggregator.proof SET proof = $3, proof_id = $4, input_prover = $5, prover = $6, prover_id = $7, generating_since = $8, updated_at = $9 WHERE batch_num = $1 AND batch_num_final = $2"
+	const addGeneratedProofSQL = `
+	UPDATE aggregator.proof 
+	SET proof = $3, 
+		proof_id = $4, 
+		input_prover = $5, 
+		prover = $6, 
+		prover_id = $7, 
+		generating_since = $8, 
+		updated_at = $9 
+	WHERE batch_num = $1 
+		AND batch_num_final = $2
+	`
 	e := p.getExecQuerier(dbTx)
 	now := time.Now().UTC().Round(time.Microsecond)
-	_, err := e.Exec(ctx, addGeneratedProofSQL, proof.BatchNumber, proof.BatchNumberFinal, proof.Proof, proof.ProofID, proof.InputProver, proof.Prover, proof.ProverID, proof.GeneratingSince, now)
+	_, err := e.Exec(
+		ctx, addGeneratedProofSQL, proof.BatchNumber, proof.BatchNumberFinal, proof.Proof, proof.ProofID,
+		proof.InputProver, proof.Prover, proof.ProverID, proof.GeneratingSince, now,
+	)
 	return err
 }
 
 // DeleteGeneratedProofs deletes from the storage the generated proofs falling
 // inside the batch numbers range.
-func (p *PostgresStorage) DeleteGeneratedProofs(ctx context.Context, batchNumber uint64, batchNumberFinal uint64, dbTx pgx.Tx) error {
+func (p *PostgresStorage) DeleteGeneratedProofs(
+	ctx context.Context, batchNumber uint64, batchNumberFinal uint64, dbTx pgx.Tx,
+) error {
 	const deleteGeneratedProofSQL = "DELETE FROM aggregator.proof WHERE batch_num >= $1 AND batch_num_final <= $2"
 	e := p.getExecQuerier(dbTx)
 	_, err := e.Exec(ctx, deleteGeneratedProofSQL, batchNumber, batchNumberFinal)
diff --git a/state/pgstatestorage/sequence.go b/state/pgstatestorage/sequence.go
index 12b19f7e2..7d5be9fb9 100644
--- a/state/pgstatestorage/sequence.go
+++ b/state/pgstatestorage/sequence.go
@@ -9,7 +9,11 @@ import (
 
 // AddSequence stores the sequence information to allow the aggregator verify sequences.
 func (p *PostgresStorage) AddSequence(ctx context.Context, sequence state.Sequence, dbTx pgx.Tx) error {
-	const addSequenceSQL = "INSERT INTO aggregator.sequence (from_batch_num, to_batch_num) VALUES($1, $2) ON CONFLICT (from_batch_num) DO UPDATE SET to_batch_num = $2"
+	const addSequenceSQL = `
+	INSERT INTO aggregator.sequence (from_batch_num, to_batch_num) 
+	VALUES($1, $2) 
+	ON CONFLICT (from_batch_num) DO UPDATE SET to_batch_num = $2
+	`
 
 	e := p.getExecQuerier(dbTx)
 	_, err := e.Exec(ctx, addSequenceSQL, sequence.FromBatchNumber, sequence.ToBatchNumber)
diff --git a/state/types.go b/state/types.go
index d5a8d155c..62c0f5023 100644
--- a/state/types.go
+++ b/state/types.go
@@ -14,7 +14,7 @@ type ZKCounters struct {
 	UsedArithmetics      uint32
 	UsedBinaries         uint32
 	UsedSteps            uint32
-	UsedSha256Hashes_V2  uint32
+	UsedSha256Hashes_V2  uint32 //nolint:stylecheck
 }
 
 // BatchResources is a struct that contains the ZKEVM resources used by a batch/tx
diff --git a/sync/evmdownloader.go b/sync/evmdownloader.go
index 31613cf3b..1e70d12f4 100644
--- a/sync/evmdownloader.go
+++ b/sync/evmdownloader.go
@@ -143,7 +143,9 @@ func NewEVMDownloaderImplementation(
 	}
 }
 
-func (d *EVMDownloaderImplementation) WaitForNewBlocks(ctx context.Context, lastBlockSeen uint64) (newLastBlock uint64) {
+func (d *EVMDownloaderImplementation) WaitForNewBlocks(
+	ctx context.Context, lastBlockSeen uint64,
+) (newLastBlock uint64) {
 	attempts := 0
 	ticker := time.NewTicker(d.waitForNewBlocksPeriod)
 	defer ticker.Stop()
@@ -175,8 +177,10 @@ func (d *EVMDownloaderImplementation) GetEventsByBlockRange(ctx context.Context,
 			b := d.GetBlockHeader(ctx, l.BlockNumber)
 			if b.Hash != l.BlockHash {
 				d.log.Infof(
-					"there has been a block hash change between the event query and the block query for block %d: %s vs %s. Retrtying.",
-					l.BlockNumber, b.Hash, l.BlockHash)
+					"there has been a block hash change between the event query and the block query "+
+						"for block %d: %s vs %s. Retrying.",
+					l.BlockNumber, b.Hash, l.BlockHash,
+				)
 				return d.GetEventsByBlockRange(ctx, fromBlock, toBlock)
 			}
 			blocks = append(blocks, EVMBlock{
diff --git a/sync/evmdownloader_test.go b/sync/evmdownloader_test.go
index 15a6608ce..59c43b8ff 100644
--- a/sync/evmdownloader_test.go
+++ b/sync/evmdownloader_test.go
@@ -389,6 +389,8 @@ func buildAppender() LogAppenderMap {
 }
 
 func NewTestDownloader(t *testing.T) (*EVMDownloader, *L2Mock) {
+	t.Helper()
+
 	rh := &RetryHandler{
 		MaxRetryAttemptsAfterError: 5,
 		RetryAfterErrorPeriod:      time.Millisecond * 100,
diff --git a/sync/evmdriver.go b/sync/evmdriver.go
index 2edd2e159..f42d040e3 100644
--- a/sync/evmdriver.go
+++ b/sync/evmdriver.go
@@ -75,7 +75,7 @@ reset:
 		lastProcessedBlock, err = d.processor.GetLastProcessedBlock(ctx)
 		if err != nil {
 			attempts++
-			d.log.Error("error geting last processed block: ", err)
+			d.log.Error("error getting last processed block: ", err)
 			d.rh.Handle("Sync", attempts)
 			continue
 		}
diff --git a/sync/evmdriver_test.go b/sync/evmdriver_test.go
index 746923216..907dac28f 100644
--- a/sync/evmdriver_test.go
+++ b/sync/evmdriver_test.go
@@ -53,8 +53,18 @@ func TestSync(t *testing.T) {
 	}
 	reorg1Completed := reorgSemaphore{}
 	dm.On("Download", mock.Anything, mock.Anything, mock.Anything).Run(func(args mock.Arguments) {
-		ctx := args.Get(0).(context.Context)
-		downloadedCh := args.Get(2).(chan EVMBlock)
+		ctx, ok := args.Get(0).(context.Context)
+		if !ok {
+			log.Error("failed to assert type for context")
+			return
+		}
+
+		downloadedCh, ok := args.Get(2).(chan EVMBlock)
+		if !ok {
+			log.Error("failed to assert type for downloadedCh")
+			return
+		}
+
 		log.Info("entering mock loop")
 		for {
 			select {
@@ -168,7 +178,6 @@ func TestHandleNewBlock(t *testing.T) {
 	pm.On("ProcessBlock", ctx, Block{Num: b3.Num, Events: b3.Events}).
 		Return(nil).Once()
 	driver.handleNewBlock(ctx, b3)
-
 }
 
 func TestHandleReorg(t *testing.T) {
diff --git a/test/helpers/aggoracle_e2e.go b/test/helpers/aggoracle_e2e.go
index fdde39dd2..311ba1897 100644
--- a/test/helpers/aggoracle_e2e.go
+++ b/test/helpers/aggoracle_e2e.go
@@ -26,7 +26,10 @@ import (
 )
 
 const (
-	NetworkIDL2 = uint32(1)
+	NetworkIDL2    = uint32(1)
+	chainID        = 1337
+	initialBalance = "10000000000000000000000000"
+	blockGasLimit  = uint64(999999999999999999)
 )
 
 type AggoracleWithEVMChainEnv struct {
@@ -51,10 +54,12 @@ type AggoracleWithEVMChainEnv struct {
 }
 
 func SetupAggoracleWithEVMChain(t *testing.T) *AggoracleWithEVMChainEnv {
+	t.Helper()
+
 	ctx := context.Background()
 	l1Client, syncer, gerL1Contract, gerL1Addr, bridgeL1Contract, bridgeL1Addr, authL1, rd := CommonSetup(t)
 	sender, l2Client, gerL2Contract, gerL2Addr, bridgeL2Contract, bridgeL2Addr, authL2, ethTxManMockL2 := EVMSetup(t)
-	oracle, err := aggoracle.New(sender, l1Client.Client(), syncer, etherman.LatestBlock, time.Millisecond*20)
+	oracle, err := aggoracle.New(sender, l1Client.Client(), syncer, etherman.LatestBlock, time.Millisecond*20) //nolint:gomnd
 	require.NoError(t, err)
 	go oracle.Start(ctx)
 
@@ -90,12 +95,14 @@ func CommonSetup(t *testing.T) (
 	*bind.TransactOpts,
 	*reorgdetector.ReorgDetector,
 ) {
+	t.Helper()
+
 	// Config and spin up
 	ctx := context.Background()
 	// Simulated L1
 	privateKeyL1, err := crypto.GenerateKey()
 	require.NoError(t, err)
-	authL1, err := bind.NewKeyedTransactorWithChainID(privateKeyL1, big.NewInt(1337))
+	authL1, err := bind.NewKeyedTransactorWithChainID(privateKeyL1, big.NewInt(chainID))
 	require.NoError(t, err)
 	l1Client, gerL1Addr, gerL1Contract, bridgeL1Addr, bridgeL1Contract, err := newSimulatedL1(authL1)
 	require.NoError(t, err)
@@ -105,7 +112,7 @@ func CommonSetup(t *testing.T) (
 	require.NoError(t, err)
 	// Syncer
 	dbPathSyncer := t.TempDir()
-	syncer, err := l1infotreesync.New(ctx, dbPathSyncer, gerL1Addr, common.Address{}, 10, etherman.LatestBlock, reorg, l1Client.Client(), time.Millisecond, 0, 100*time.Millisecond, 3)
+	syncer, err := l1infotreesync.New(ctx, dbPathSyncer, gerL1Addr, common.Address{}, 10, etherman.LatestBlock, reorg, l1Client.Client(), time.Millisecond, 0, 100*time.Millisecond, 3) //nolint:gomnd
 	require.NoError(t, err)
 	go syncer.Start(ctx)
 
@@ -122,14 +129,16 @@ func EVMSetup(t *testing.T) (
 	*bind.TransactOpts,
 	*EthTxManagerMock,
 ) {
+	t.Helper()
+
 	privateKeyL2, err := crypto.GenerateKey()
 	require.NoError(t, err)
-	authL2, err := bind.NewKeyedTransactorWithChainID(privateKeyL2, big.NewInt(1337))
+	authL2, err := bind.NewKeyedTransactorWithChainID(privateKeyL2, big.NewInt(chainID))
 	require.NoError(t, err)
 	l2Client, gerL2Addr, gerL2Sc, bridgeL2Addr, bridgeL2Sc, err := newSimulatedEVMAggSovereignChain(authL2)
 	require.NoError(t, err)
 	ethTxManMock := NewEthTxManMock(t, l2Client, authL2)
-	sender, err := chaingersender.NewEVMChainGERSender(gerL2Addr, authL2.From, l2Client.Client(), ethTxManMock, 0, time.Millisecond*50)
+	sender, err := chaingersender.NewEVMChainGERSender(gerL2Addr, authL2.From, l2Client.Client(), ethTxManMock, 0, time.Millisecond*50) //nolint:gomnd
 	require.NoError(t, err)
 
 	return sender, l2Client, gerL2Sc, gerL2Addr, bridgeL2Sc, bridgeL2Addr, authL2, ethTxManMock
@@ -144,12 +153,18 @@ func newSimulatedL1(auth *bind.TransactOpts) (
 	err error,
 ) {
 	ctx := context.Background()
+
 	privateKeyL1, err := crypto.GenerateKey()
 	if err != nil {
-		return
+		return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to generate private key: %w", err)
 	}
-	authDeployer, err := bind.NewKeyedTransactorWithChainID(privateKeyL1, big.NewInt(1337))
-	balance, _ := new(big.Int).SetString("10000000000000000000000000", 10) //nolint:gomnd
+
+	authDeployer, err := bind.NewKeyedTransactorWithChainID(privateKeyL1, big.NewInt(chainID))
+	if err != nil {
+		return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to create transactor: %w", err)
+	}
+
+	balance, _ := new(big.Int).SetString(initialBalance, 10) //nolint:gomnd
 	address := auth.From
 	genesisAlloc := map[common.Address]types.Account{
 		address: {
@@ -159,28 +174,29 @@ func newSimulatedL1(auth *bind.TransactOpts) (
 			Balance: balance,
 		},
 	}
-	blockGasLimit := uint64(999999999999999999) //nolint:gomnd
+
 	client = simulated.NewBackend(genesisAlloc, simulated.WithBlockGasLimit(blockGasLimit))
 
 	bridgeImplementationAddr, _, _, err := polygonzkevmbridgev2.DeployPolygonzkevmbridgev2(authDeployer, client.Client())
 	if err != nil {
-		return
+		return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to deploy bridge implementation: %w", err)
 	}
 	client.Commit()
 
 	nonce, err := client.Client().PendingNonceAt(ctx, authDeployer.From)
 	if err != nil {
-		return
+		return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to get pending nonce: %w", err)
 	}
 	precalculatedAddr := crypto.CreateAddress(authDeployer.From, nonce+1)
 	bridgeABI, err := polygonzkevmbridgev2.Polygonzkevmbridgev2MetaData.GetAbi()
 	if err != nil {
-		return
+		return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to get bridge ABI: %w", err)
 	}
 	if bridgeABI == nil {
 		err = errors.New("GetABI returned nil")
-		return
+		return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to get bridge ABI: %w", err)
 	}
+
 	dataCallProxy, err := bridgeABI.Pack("initialize",
 		uint32(0),        // networkIDMainnet
 		common.Address{}, // gasTokenAddressMainnet"
@@ -190,8 +206,9 @@ func newSimulatedL1(auth *bind.TransactOpts) (
 		[]byte{}, // gasTokenMetadata
 	)
 	if err != nil {
-		return
+		return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to pack data for proxy initialization: %w", err)
 	}
+
 	bridgeAddr, _, _, err = transparentupgradableproxy.DeployTransparentupgradableproxy(
 		authDeployer,
 		client.Client(),
@@ -200,28 +217,40 @@ func newSimulatedL1(auth *bind.TransactOpts) (
 		dataCallProxy,
 	)
 	if err != nil {
-		return
+		return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to deploy transparent upgradable proxy: %w", err)
 	}
 	client.Commit()
+
 	bridgeContract, err = polygonzkevmbridgev2.NewPolygonzkevmbridgev2(bridgeAddr, client.Client())
 	if err != nil {
-		return
+		return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to create bridge contract instance: %w", err)
 	}
+
 	checkGERAddr, err := bridgeContract.GlobalExitRootManager(&bind.CallOpts{Pending: false})
 	if err != nil {
-		return
+		return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to get Global Exit Root Manager: %w", err)
 	}
 	if precalculatedAddr != checkGERAddr {
-		err = fmt.Errorf("error deploying bridge, unexpected GER addr. Expected %s. Actual %s", precalculatedAddr.Hex(), checkGERAddr.Hex())
+		return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf(
+			"error deploying bridge, unexpected GER addr. Expected %s. Actual %s",
+			precalculatedAddr.Hex(), checkGERAddr.Hex(),
+		)
 	}
 
 	gerAddr, _, gerContract, err = gerContractL1.DeployGlobalexitrootnopush0(authDeployer, client.Client(), auth.From, bridgeAddr)
-
+	if err != nil {
+		return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to deploy GER contract: %w", err)
+	}
 	client.Commit()
+
 	if precalculatedAddr != gerAddr {
-		err = fmt.Errorf("error calculating addr. Expected %s. Actual %s", precalculatedAddr.Hex(), gerAddr.Hex())
+		return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf(
+			"error calculating GER address. Expected %s. Actual %s",
+			precalculatedAddr.Hex(), gerAddr.Hex(),
+		)
 	}
-	return
+
+	return client, gerAddr, gerContract, bridgeAddr, bridgeContract, nil
 }
 
 func newSimulatedEVMAggSovereignChain(auth *bind.TransactOpts) (
@@ -233,14 +262,21 @@ func newSimulatedEVMAggSovereignChain(auth *bind.TransactOpts) (
 	err error,
 ) {
 	ctx := context.Background()
+
 	privateKeyL1, err := crypto.GenerateKey()
 	if err != nil {
-		return
+		return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to generate private key: %w", err)
 	}
-	authDeployer, err := bind.NewKeyedTransactorWithChainID(privateKeyL1, big.NewInt(1337))
-	balance, _ := new(big.Int).SetString("10000000000000000000000000", 10) //nolint:gomnd
+
+	authDeployer, err := bind.NewKeyedTransactorWithChainID(privateKeyL1, big.NewInt(chainID))
+	if err != nil {
+		return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to create transactor: %w", err)
+	}
+
+	balance, _ := new(big.Int).SetString(initialBalance, 10) //nolint:gomnd
 	address := auth.From
 	precalculatedBridgeAddr := crypto.CreateAddress(authDeployer.From, 1)
+
 	genesisAlloc := map[common.Address]types.Account{
 		address: {
 			Balance: balance,
@@ -252,28 +288,31 @@ func newSimulatedEVMAggSovereignChain(auth *bind.TransactOpts) (
 			Balance: balance,
 		},
 	}
-	blockGasLimit := uint64(999999999999999999) //nolint:gomnd
+
+	const blockGasLimit = uint64(999999999999999999)
 	client = simulated.NewBackend(genesisAlloc, simulated.WithBlockGasLimit(blockGasLimit))
 
 	bridgeImplementationAddr, _, _, err := polygonzkevmbridgev2.DeployPolygonzkevmbridgev2(authDeployer, client.Client())
 	if err != nil {
-		return
+		return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to deploy bridge implementation: %w", err)
 	}
 	client.Commit()
 
 	nonce, err := client.Client().PendingNonceAt(ctx, authDeployer.From)
 	if err != nil {
-		return
+		return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to get pending nonce: %w", err)
 	}
 	precalculatedAddr := crypto.CreateAddress(authDeployer.From, nonce+1)
+
 	bridgeABI, err := polygonzkevmbridgev2.Polygonzkevmbridgev2MetaData.GetAbi()
 	if err != nil {
-		return
+		return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to get bridge ABI: %w", err)
 	}
 	if bridgeABI == nil {
 		err = errors.New("GetABI returned nil")
-		return
+		return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to get bridge ABI: %w", err)
 	}
+
 	dataCallProxy, err := bridgeABI.Pack("initialize",
 		NetworkIDL2,
 		common.Address{}, // gasTokenAddressMainnet"
@@ -283,8 +322,9 @@ func newSimulatedEVMAggSovereignChain(auth *bind.TransactOpts) (
 		[]byte{}, // gasTokenMetadata
 	)
 	if err != nil {
-		return
+		return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to pack data for proxy initialization: %w", err)
 	}
+
 	bridgeAddr, _, _, err = transparentupgradableproxy.DeployTransparentupgradableproxy(
 		authDeployer,
 		client.Client(),
@@ -293,40 +333,52 @@ func newSimulatedEVMAggSovereignChain(auth *bind.TransactOpts) (
 		dataCallProxy,
 	)
 	if err != nil {
-		return
+		return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to deploy transparent upgradable proxy: %w", err)
 	}
 	if bridgeAddr != precalculatedBridgeAddr {
-		err = fmt.Errorf("error calculating bridge addr. Expected: %s. Actual: %s", precalculatedBridgeAddr, bridgeAddr)
-		return
+		return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf(
+			"error calculating bridge addr. Expected: %s. Actual: %s",
+			precalculatedBridgeAddr, bridgeAddr,
+		)
 	}
 	client.Commit()
+
 	bridgeContract, err = polygonzkevmbridgev2.NewPolygonzkevmbridgev2(bridgeAddr, client.Client())
 	if err != nil {
-		return
+		return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to create bridge contract instance: %w", err)
 	}
+
 	checkGERAddr, err := bridgeContract.GlobalExitRootManager(&bind.CallOpts{})
 	if err != nil {
-		return
+		return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to get Global Exit Root Manager: %w", err)
 	}
 	if precalculatedAddr != checkGERAddr {
-		err = errors.New("error deploying bridge")
+		return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf(
+			"error deploying bridge, unexpected Global Exit Root Manager address",
+		)
 	}
 
 	gerAddr, _, gerContract, err = gerContractEVMChain.DeployPessimisticglobalexitrootnopush0(authDeployer, client.Client(), auth.From)
 	if err != nil {
-		return
+		return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to deploy GER contract: %w", err)
 	}
 	client.Commit()
 
-	_GLOBAL_EXIT_ROOT_SETTER_ROLE := common.HexToHash("0x7b95520991dfda409891be0afa2635b63540f92ee996fda0bf695a166e5c5176")
-	_, err = gerContract.GrantRole(authDeployer, _GLOBAL_EXIT_ROOT_SETTER_ROLE, auth.From)
+	globalExitRootSetterRole := common.HexToHash("0x7b95520991dfda409891be0afa2635b63540f92ee996fda0bf695a166e5c5176")
+	_, err = gerContract.GrantRole(authDeployer, globalExitRootSetterRole, auth.From)
+	if err != nil {
+		return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to grant role to GER contract: %w", err)
+	}
 	client.Commit()
-	hasRole, _ := gerContract.HasRole(&bind.CallOpts{Pending: false}, _GLOBAL_EXIT_ROOT_SETTER_ROLE, auth.From)
+
+	hasRole, _ := gerContract.HasRole(&bind.CallOpts{Pending: false}, globalExitRootSetterRole, auth.From)
 	if !hasRole {
-		err = errors.New("failed to set role")
+		return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to set role for GER contract")
 	}
+
 	if precalculatedAddr != gerAddr {
-		err = errors.New("error calculating addr")
+		return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("error calculating GER address")
 	}
-	return
+
+	return client, gerAddr, gerContract, bridgeAddr, bridgeContract, nil
 }
diff --git a/test/helpers/ethtxmanmock_e2e.go b/test/helpers/ethtxmanmock_e2e.go
index b63ecc496..c7d7455de 100644
--- a/test/helpers/ethtxmanmock_e2e.go
+++ b/test/helpers/ethtxmanmock_e2e.go
@@ -20,6 +20,14 @@ func NewEthTxManMock(
 	client *simulated.Backend,
 	auth *bind.TransactOpts,
 ) *EthTxManagerMock {
+	t.Helper()
+
+	const (
+		ArgToIndex   = 1
+		ArgDataIndex = 4
+		ZeroValue    = 0
+	)
+
 	ethTxMock := NewEthTxManagerMock(t)
 	ethTxMock.On("Add", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).
 		Run(func(args mock.Arguments) {
@@ -31,17 +39,17 @@ func NewEthTxManMock(
 			}
 			gas, err := client.Client().EstimateGas(ctx, ethereum.CallMsg{
 				From:  auth.From,
-				To:    args.Get(1).(*common.Address),
-				Value: big.NewInt(0),
-				Data:  args.Get(4).([]byte),
+				To:    args.Get(ArgToIndex).(*common.Address),
+				Value: big.NewInt(ZeroValue),
+				Data:  args.Get(ArgDataIndex).([]byte),
 			})
 			if err != nil {
 				log.Error(err)
 				res, err := client.Client().CallContract(ctx, ethereum.CallMsg{
 					From:  auth.From,
-					To:    args.Get(1).(*common.Address),
-					Value: big.NewInt(0),
-					Data:  args.Get(4).([]byte),
+					To:    args.Get(ArgToIndex).(*common.Address),
+					Value: big.NewInt(ZeroValue),
+					Data:  args.Get(ArgDataIndex).([]byte),
 				}, nil)
 				log.Debugf("contract call: %s", res)
 				if err != nil {
@@ -53,11 +61,22 @@ func NewEthTxManMock(
 			if err != nil {
 				log.Error(err)
 			}
+
+			to, ok := args.Get(ArgToIndex).(*common.Address)
+			if !ok {
+				log.Error("expected *common.Address for ArgToIndex")
+				return
+			}
+			data, ok := args.Get(ArgDataIndex).([]byte)
+			if !ok {
+				log.Error("expected []byte for ArgDataIndex")
+				return
+			}
 			tx := types.NewTx(&types.LegacyTx{
-				To:       args.Get(1).(*common.Address),
+				To:       to,
 				Nonce:    nonce,
-				Value:    big.NewInt(0),
-				Data:     args.Get(4).([]byte),
+				Value:    big.NewInt(ZeroValue),
+				Data:     data,
 				Gas:      gas,
 				GasPrice: price,
 			})
diff --git a/translator/translator_impl.go b/translator/translator_impl.go
index 1e1a2a6a7..33e07eefa 100644
--- a/translator/translator_impl.go
+++ b/translator/translator_impl.go
@@ -21,7 +21,9 @@ func (t *TranslatorFullMatchRule) Translate(contextName string, data string) str
 	return t.NewString
 }
 
-func NewTranslatorFullMatchRule(contextName *string, fullMatchString string, newString string) *TranslatorFullMatchRule {
+func NewTranslatorFullMatchRule(
+	contextName *string, fullMatchString string, newString string,
+) *TranslatorFullMatchRule {
 	return &TranslatorFullMatchRule{
 		ContextName:     contextName,
 		FullMatchString: fullMatchString,
@@ -58,7 +60,8 @@ func (t *TranslatorImpl) AddConfigRules(cfg Config) {
 	for _, v := range cfg.FullMatchRules {
 		var contextName *string
 		if v.ContextName != "" {
-			contextName = &v.ContextName
+			name := v.ContextName
+			contextName = &name
 		}
 		rule := NewTranslatorFullMatchRule(contextName, v.Old, v.New)
 		t.AddRule(*rule)
diff --git a/tree/appendonlytree.go b/tree/appendonlytree.go
index 5b714bfbc..67a1a8f89 100644
--- a/tree/appendonlytree.go
+++ b/tree/appendonlytree.go
@@ -77,7 +77,7 @@ func (t *AppendOnlyTree) addLeaf(tx kv.RwTx, leaf Leaf) error {
 				right: t.zeroHashes[h],
 			}
 			// Update cache
-			// TODO: review this part of the logic, skipping ?optimizaton?
+			// TODO: review this part of the logic, skipping? optimisation?
 			// from OG implementation
 			t.lastLeftCache[h] = currentChildHash
 		}
@@ -86,7 +86,9 @@ func (t *AppendOnlyTree) addLeaf(tx kv.RwTx, leaf Leaf) error {
 	}
 
 	// store root
-	t.storeRoot(tx, uint64(leaf.Index), currentChildHash)
+	if err := t.storeRoot(tx, uint64(leaf.Index), currentChildHash); err != nil {
+		return fmt.Errorf("failed to store root: %w", err)
+	}
 	root := currentChildHash
 	if err := tx.Put(t.rootTable, dbCommon.Uint64ToBytes(uint64(leaf.Index)), root[:]); err != nil {
 		return err
@@ -168,7 +170,7 @@ func (t *AppendOnlyTree) initLastLeftCache(tx kv.Tx, lastIndex int64, lastRoot c
 		currentNode, err := t.getRHTNode(tx, currentNodeHash)
 		if err != nil {
 			return fmt.Errorf(
-				"error getting node %s from the RHT at height %d with root %s: %v",
+				"error getting node %s from the RHT at height %d with root %s: %w",
 				currentNodeHash.Hex(), h, lastRoot.Hex(), err,
 			)
 		}
diff --git a/tree/testvectors/types.go b/tree/testvectors/types.go
index 905718d89..af3c75195 100644
--- a/tree/testvectors/types.go
+++ b/tree/testvectors/types.go
@@ -27,7 +27,7 @@ func (d *DepositVectorRaw) Hash() common.Hash {
 	binary.BigEndian.PutUint32(destNet, d.DestinationNetwork)
 
 	metaHash := keccak256.Hash(common.FromHex(d.Metadata))
-	var buf [32]byte //nolint:gomnd
+	var buf [32]byte
 	amount, _ := big.NewInt(0).SetString(d.Amount, 0)
 	origAddrBytes := common.HexToAddress(d.TokenAddress)
 	destAddrBytes := common.HexToAddress(d.DestinationAddress)
diff --git a/tree/tree.go b/tree/tree.go
index 77c0e4521..5756df10e 100644
--- a/tree/tree.go
+++ b/tree/tree.go
@@ -56,8 +56,9 @@ func (n *treeNode) MarshalBinary() ([]byte, error) {
 }
 
 func (n *treeNode) UnmarshalBinary(data []byte) error {
-	if len(data) != 64 {
-		return fmt.Errorf("expected len %d, actual len %d", 64, len(data))
+	const nodeDataLength = 64
+	if len(data) != nodeDataLength {
+		return fmt.Errorf("expected len %d, actual len %d", nodeDataLength, len(data))
 	}
 	n.left = common.Hash(data[:32])
 	n.right = common.Hash(data[32:])
@@ -122,14 +123,14 @@ func (t *Tree) getSiblings(tx kv.Tx, index uint32, root common.Hash) (
 		var currentNode *treeNode
 		currentNode, err = t.getRHTNode(tx, currentNodeHash)
 		if err != nil {
-			if err == ErrNotFound {
+			if errors.Is(err, ErrNotFound) {
 				hasUsedZeroHashes = true
 				siblings[h] = t.zeroHashes[h]
 				err = nil
 				continue
 			} else {
 				err = fmt.Errorf(
-					"height: %d, currentNode: %s, error: %v",
+					"height: %d, currentNode: %s, error: %w",
 					h, currentNodeHash.Hex(), err,
 				)
 				return
@@ -202,8 +203,9 @@ func generateZeroHashes(height uint8) []common.Hash {
 	var zeroHashes = []common.Hash{
 		{},
 	}
-	// This generates a leaf = HashZero in position 0. In the rest of the positions that are equivalent to the ascending levels,
-	// we set the hashes of the nodes. So all nodes from level i=5 will have the same value and same children nodes.
+	// This generates a leaf = HashZero in position 0. In the rest of the positions that are
+	// equivalent to the ascending levels, we set the hashes of the nodes.
+	// So all nodes from level i=5 will have the same value and same children nodes.
 	for i := 1; i <= int(height); i++ {
 		hasher := sha3.NewLegacyKeccak256()
 		hasher.Write(zeroHashes[i-1][:])
diff --git a/tree/updatabletree.go b/tree/updatabletree.go
index 5c54deb1d..53f45889d 100644
--- a/tree/updatabletree.go
+++ b/tree/updatabletree.go
@@ -114,7 +114,7 @@ func (t *UpdatableTree) Reorg(tx kv.RwTx, firstReorgedIndex uint64) (func(), err
 		t.lastRoot = rootBackup
 	}
 
-	for lastIndexBytes, rootBytes, err := iter.Next(); lastIndexBytes != nil; lastIndexBytes, rootBytes, err = iter.Next() {
+	for lastIndexBytes, rootBytes, err := iter.Next(); lastIndexBytes != nil; lastIndexBytes, rootBytes, err = iter.Next() { //nolint:lll
 		if err != nil {
 			return rollback, err
 		}

From e24dadd25e81b7e7ffdaeedeea88ad8a07d95782 Mon Sep 17 00:00:00 2001
From: laisolizq <37299818+laisolizq@users.noreply.github.com>
Date: Thu, 12 Sep 2024 10:07:29 +0200
Subject: [PATCH 4/6] update test claimcalldata (#64)

* update test claimcalldata

* fix test claimcalldata

* add tests claimcalldata
---
 bridgesync/claimcalldata_test.go              | 774 +++++++++++++++++-
 test/contracts/abi/claimmock.abi              |   2 +-
 test/contracts/abi/claimmockcaller.abi        |   2 +-
 test/contracts/bin/claimmock.bin              |   2 +-
 test/contracts/bin/claimmockcaller.bin        |   2 +-
 test/contracts/claimmock/ClaimMock.sol        |  10 +-
 test/contracts/claimmock/claimmock.go         |  16 +-
 .../claimmockcaller/ClaimMockCaller.sol       | 115 ++-
 .../claimmockcaller/claimmockcaller.go        |  94 ++-
 test/contracts/compile.sh                     |   4 +-
 10 files changed, 946 insertions(+), 75 deletions(-)

diff --git a/bridgesync/claimcalldata_test.go b/bridgesync/claimcalldata_test.go
index 20c1b7c52..d788c2c71 100644
--- a/bridgesync/claimcalldata_test.go
+++ b/bridgesync/claimcalldata_test.go
@@ -7,6 +7,7 @@ import (
 	"testing"
 	"time"
 
+	"github.com/0xPolygon/cdk/log"
 	"github.com/0xPolygon/cdk/test/contracts/claimmock"
 	"github.com/0xPolygon/cdk/test/contracts/claimmockcaller"
 	"github.com/ethereum/go-ethereum/accounts/abi/bind"
@@ -57,7 +58,6 @@ func TestClaimCalldata(t *testing.T) {
 	proofRollup[4] = common.HexToHash("a1fa")
 	proofRollupH[4] = common.HexToHash("a1fa")
 	expectedClaim := Claim{
-		GlobalIndex:         big.NewInt(420),
 		OriginNetwork:       69,
 		OriginAddress:       common.HexToAddress("ffaaffaa"),
 		DestinationAddress:  common.HexToAddress("123456789"),
@@ -69,9 +69,26 @@ func TestClaimCalldata(t *testing.T) {
 		DestinationNetwork:  0,
 		Metadata:            []byte{},
 	}
+	expectedClaim2 := Claim{
+		OriginNetwork:       87,
+		OriginAddress:       common.HexToAddress("eebbeebb"),
+		DestinationAddress:  common.HexToAddress("2233445566"),
+		Amount:              big.NewInt(4),
+		MainnetExitRoot:     common.HexToHash("5ca1e"),
+		RollupExitRoot:      common.HexToHash("dead"),
+		ProofLocalExitRoot:  proofLocalH,
+		ProofRollupExitRoot: proofRollupH,
+		DestinationNetwork:  0,
+		Metadata:            []byte{},
+	}
 	auth.GasLimit = 999999 // for some reason gas estimation fails :(
 
+	abi, err := claimmock.ClaimmockMetaData.GetAbi()
+	require.NoError(t, err)
+
 	// direct call claim asset
+	expectedClaim.GlobalIndex = big.NewInt(421)
+	expectedClaim.IsMessage = false
 	tx, err := bridgeContract.ClaimAsset(
 		auth,
 		proofLocal,
@@ -89,7 +106,6 @@ func TestClaimCalldata(t *testing.T) {
 	require.NoError(t, err)
 	time.Sleep(1 * time.Second)
 	r, err := client.TransactionReceipt(ctx, tx.Hash())
-	expectedClaim.IsMessage = false
 	testCases = append(testCases, testCase{
 		description:   "direct call to claim asset",
 		bridgeAddr:    bridgeAddr,
@@ -98,6 +114,8 @@ func TestClaimCalldata(t *testing.T) {
 	})
 
 	// indirect call claim asset
+	expectedClaim.IsMessage = false
+	expectedClaim.GlobalIndex = big.NewInt(422)
 	tx, err = claimCaller.ClaimAsset(
 		auth,
 		proofLocal,
@@ -111,11 +129,11 @@ func TestClaimCalldata(t *testing.T) {
 		expectedClaim.DestinationAddress,
 		expectedClaim.Amount,
 		nil,
+		false,
 	)
 	require.NoError(t, err)
 	time.Sleep(1 * time.Second)
 	r, err = client.TransactionReceipt(ctx, tx.Hash())
-	expectedClaim.IsMessage = false
 	testCases = append(testCases, testCase{
 		description:   "indirect call to claim asset",
 		bridgeAddr:    bridgeAddr,
@@ -123,7 +141,42 @@ func TestClaimCalldata(t *testing.T) {
 		expectedClaim: expectedClaim,
 	})
 
+	// indirect call claim asset bytes
+	expectedClaim.GlobalIndex = big.NewInt(423)
+	expectedClaim.IsMessage = false
+	expectedClaimBytes, err := abi.Pack(
+		"claimAsset",
+		proofLocal,
+		proofRollup,
+		expectedClaim.GlobalIndex,
+		expectedClaim.MainnetExitRoot,
+		expectedClaim.RollupExitRoot,
+		expectedClaim.OriginNetwork,
+		expectedClaim.OriginAddress,
+		expectedClaim.DestinationNetwork,
+		expectedClaim.DestinationAddress,
+		expectedClaim.Amount,
+		expectedClaim.Metadata,
+	)
+	require.NoError(t, err)
+	tx, err = claimCaller.ClaimBytes(
+		auth,
+		expectedClaimBytes,
+		false,
+	)
+	require.NoError(t, err)
+	time.Sleep(1 * time.Second)
+	r, err = client.TransactionReceipt(ctx, tx.Hash())
+	testCases = append(testCases, testCase{
+		description:   "indirect call to claim asset bytes",
+		bridgeAddr:    bridgeAddr,
+		log:           *r.Logs[0],
+		expectedClaim: expectedClaim,
+	})
+
 	// direct call claim message
+	expectedClaim.IsMessage = true
+	expectedClaim.GlobalIndex = big.NewInt(424)
 	tx, err = bridgeContract.ClaimMessage(
 		auth,
 		proofLocal,
@@ -141,7 +194,6 @@ func TestClaimCalldata(t *testing.T) {
 	require.NoError(t, err)
 	time.Sleep(1 * time.Second)
 	r, err = client.TransactionReceipt(ctx, tx.Hash())
-	expectedClaim.IsMessage = true
 	testCases = append(testCases, testCase{
 		description:   "direct call to claim message",
 		bridgeAddr:    bridgeAddr,
@@ -150,6 +202,8 @@ func TestClaimCalldata(t *testing.T) {
 	})
 
 	// indirect call claim message
+	expectedClaim.IsMessage = true
+	expectedClaim.GlobalIndex = big.NewInt(425)
 	tx, err = claimCaller.ClaimMessage(
 		auth,
 		proofLocal,
@@ -163,11 +217,11 @@ func TestClaimCalldata(t *testing.T) {
 		expectedClaim.DestinationAddress,
 		expectedClaim.Amount,
 		nil,
+		false,
 	)
 	require.NoError(t, err)
 	time.Sleep(1 * time.Second)
 	r, err = client.TransactionReceipt(ctx, tx.Hash())
-	expectedClaim.IsMessage = true
 	testCases = append(testCases, testCase{
 		description:   "indirect call to claim message",
 		bridgeAddr:    bridgeAddr,
@@ -175,7 +229,717 @@ func TestClaimCalldata(t *testing.T) {
 		expectedClaim: expectedClaim,
 	})
 
+	// indirect call claim message bytes
+	expectedClaim.GlobalIndex = big.NewInt(426)
+	expectedClaim.IsMessage = true
+	expectedClaimBytes, err = abi.Pack(
+		"claimMessage",
+		proofLocal,
+		proofRollup,
+		expectedClaim.GlobalIndex,
+		expectedClaim.MainnetExitRoot,
+		expectedClaim.RollupExitRoot,
+		expectedClaim.OriginNetwork,
+		expectedClaim.OriginAddress,
+		expectedClaim.DestinationNetwork,
+		expectedClaim.DestinationAddress,
+		expectedClaim.Amount,
+		expectedClaim.Metadata,
+	)
+	require.NoError(t, err)
+	tx, err = claimCaller.ClaimBytes(
+		auth,
+		expectedClaimBytes,
+		false,
+	)
+	require.NoError(t, err)
+	time.Sleep(1 * time.Second)
+	r, err = client.TransactionReceipt(ctx, tx.Hash())
+	testCases = append(testCases, testCase{
+		description:   "indirect call to claim message bytes",
+		bridgeAddr:    bridgeAddr,
+		log:           *r.Logs[0],
+		expectedClaim: expectedClaim,
+	})
+
+	// indirect call claim message bytes
+	expectedClaim.GlobalIndex = big.NewInt(427)
+	expectedClaim.IsMessage = true
+	expectedClaimBytes, err = abi.Pack(
+		"claimMessage",
+		proofLocal,
+		proofRollup,
+		expectedClaim.GlobalIndex,
+		expectedClaim.MainnetExitRoot,
+		expectedClaim.RollupExitRoot,
+		expectedClaim.OriginNetwork,
+		expectedClaim.OriginAddress,
+		expectedClaim.DestinationNetwork,
+		expectedClaim.DestinationAddress,
+		expectedClaim.Amount,
+		expectedClaim.Metadata,
+	)
+	require.NoError(t, err)
+	tx, err = claimCaller.ClaimBytes(
+		auth,
+		expectedClaimBytes,
+		true,
+	)
+	require.NoError(t, err)
+	time.Sleep(1 * time.Second)
+	r, err = client.TransactionReceipt(ctx, tx.Hash())
+	log.Infof("%+v", r.Logs)
+
+	reverted := [2]bool{false, false}
+
+	// 2 indirect call claim message (same global index)
+	expectedClaim.IsMessage = true
+	expectedClaim.GlobalIndex = big.NewInt(427)
+	expectedClaim2.IsMessage = true
+	expectedClaim2.GlobalIndex = big.NewInt(427)
+	expectedClaimBytes, err = abi.Pack(
+		"claimMessage",
+		proofLocal,
+		proofRollup,
+		expectedClaim.GlobalIndex,
+		expectedClaim.MainnetExitRoot,
+		expectedClaim.RollupExitRoot,
+		expectedClaim.OriginNetwork,
+		expectedClaim.OriginAddress,
+		expectedClaim.DestinationNetwork,
+		expectedClaim.DestinationAddress,
+		expectedClaim.Amount,
+		expectedClaim.Metadata,
+	)
+	require.NoError(t, err)
+	expectedClaimBytes2, err := abi.Pack(
+		"claimMessage",
+		proofLocal,
+		proofRollup,
+		expectedClaim2.GlobalIndex,
+		expectedClaim2.MainnetExitRoot,
+		expectedClaim2.RollupExitRoot,
+		expectedClaim2.OriginNetwork,
+		expectedClaim2.OriginAddress,
+		expectedClaim2.DestinationNetwork,
+		expectedClaim2.DestinationAddress,
+		expectedClaim2.Amount,
+		expectedClaim2.Metadata,
+	)
+	require.NoError(t, err)
+	tx, err = claimCaller.Claim2Bytes(
+		auth,
+		expectedClaimBytes,
+		expectedClaimBytes2,
+		reverted,
+	)
+	require.NoError(t, err)
+	time.Sleep(1 * time.Second)
+	r, err = client.TransactionReceipt(ctx, tx.Hash())
+	testCases = append(testCases, testCase{
+		description:   "2 indirect call claim message 1 (same globalIndex)",
+		bridgeAddr:    bridgeAddr,
+		log:           *r.Logs[0],
+		expectedClaim: expectedClaim,
+	})
+	testCases = append(testCases, testCase{
+		description:   "2 indirect call claim message 2 (same globalIndex)",
+		bridgeAddr:    bridgeAddr,
+		log:           *r.Logs[1],
+		expectedClaim: expectedClaim2,
+	})
+
+	// 2 indirect call claim message (diff global index)
+	expectedClaim.IsMessage = true
+	expectedClaim.GlobalIndex = big.NewInt(428)
+	expectedClaim2.IsMessage = true
+	expectedClaim2.GlobalIndex = big.NewInt(429)
+	expectedClaimBytes, err = abi.Pack(
+		"claimMessage",
+		proofLocal,
+		proofRollup,
+		expectedClaim.GlobalIndex,
+		expectedClaim.MainnetExitRoot,
+		expectedClaim.RollupExitRoot,
+		expectedClaim.OriginNetwork,
+		expectedClaim.OriginAddress,
+		expectedClaim.DestinationNetwork,
+		expectedClaim.DestinationAddress,
+		expectedClaim.Amount,
+		expectedClaim.Metadata,
+	)
+	require.NoError(t, err)
+	expectedClaimBytes2, err = abi.Pack(
+		"claimMessage",
+		proofLocal,
+		proofRollup,
+		expectedClaim2.GlobalIndex,
+		expectedClaim2.MainnetExitRoot,
+		expectedClaim2.RollupExitRoot,
+		expectedClaim2.OriginNetwork,
+		expectedClaim2.OriginAddress,
+		expectedClaim2.DestinationNetwork,
+		expectedClaim2.DestinationAddress,
+		expectedClaim2.Amount,
+		expectedClaim2.Metadata,
+	)
+	require.NoError(t, err)
+	tx, err = claimCaller.Claim2Bytes(
+		auth,
+		expectedClaimBytes,
+		expectedClaimBytes2,
+		reverted,
+	)
+	require.NoError(t, err)
+	time.Sleep(1 * time.Second)
+	r, err = client.TransactionReceipt(ctx, tx.Hash())
+	testCases = append(testCases, testCase{
+		description:   "2 indirect call claim message 1 (diff globalIndex)",
+		bridgeAddr:    bridgeAddr,
+		log:           *r.Logs[0],
+		expectedClaim: expectedClaim,
+	})
+	testCases = append(testCases, testCase{
+		description:   "2 indirect call claim message 2 (diff globalIndex)",
+		bridgeAddr:    bridgeAddr,
+		log:           *r.Logs[1],
+		expectedClaim: expectedClaim2,
+	})
+
+	reverted = [2]bool{false, true}
+
+	// 2 indirect call claim message (same global index) (1 ok, 1 reverted)
+	expectedClaim.IsMessage = true
+	expectedClaim.GlobalIndex = big.NewInt(430)
+	expectedClaim2.IsMessage = true
+	expectedClaim2.GlobalIndex = big.NewInt(430)
+	expectedClaimBytes, err = abi.Pack(
+		"claimMessage",
+		proofLocal,
+		proofRollup,
+		expectedClaim.GlobalIndex,
+		expectedClaim.MainnetExitRoot,
+		expectedClaim.RollupExitRoot,
+		expectedClaim.OriginNetwork,
+		expectedClaim.OriginAddress,
+		expectedClaim.DestinationNetwork,
+		expectedClaim.DestinationAddress,
+		expectedClaim.Amount,
+		expectedClaim.Metadata,
+	)
+	require.NoError(t, err)
+	expectedClaimBytes2, err = abi.Pack(
+		"claimMessage",
+		proofLocal,
+		proofRollup,
+		expectedClaim2.GlobalIndex,
+		expectedClaim2.MainnetExitRoot,
+		expectedClaim2.RollupExitRoot,
+		expectedClaim2.OriginNetwork,
+		expectedClaim2.OriginAddress,
+		expectedClaim2.DestinationNetwork,
+		expectedClaim2.DestinationAddress,
+		expectedClaim2.Amount,
+		expectedClaim2.Metadata,
+	)
+	require.NoError(t, err)
+	tx, err = claimCaller.Claim2Bytes(
+		auth,
+		expectedClaimBytes,
+		expectedClaimBytes2,
+		reverted,
+	)
+	require.NoError(t, err)
+	time.Sleep(1 * time.Second)
+	r, err = client.TransactionReceipt(ctx, tx.Hash())
+	testCases = append(testCases, testCase{
+		description:   "2 indirect call claim message (same globalIndex) (1 ok, 1 reverted)",
+		bridgeAddr:    bridgeAddr,
+		log:           *r.Logs[0],
+		expectedClaim: expectedClaim,
+	})
+
+	// 2 indirect call claim message (diff global index) (1 ok, 1 reverted)
+	expectedClaim.IsMessage = true
+	expectedClaim.GlobalIndex = big.NewInt(431)
+	expectedClaim2.IsMessage = true
+	expectedClaim2.GlobalIndex = big.NewInt(432)
+	expectedClaimBytes, err = abi.Pack(
+		"claimMessage",
+		proofLocal,
+		proofRollup,
+		expectedClaim.GlobalIndex,
+		expectedClaim.MainnetExitRoot,
+		expectedClaim.RollupExitRoot,
+		expectedClaim.OriginNetwork,
+		expectedClaim.OriginAddress,
+		expectedClaim.DestinationNetwork,
+		expectedClaim.DestinationAddress,
+		expectedClaim.Amount,
+		expectedClaim.Metadata,
+	)
+	require.NoError(t, err)
+	expectedClaimBytes2, err = abi.Pack(
+		"claimMessage",
+		proofLocal,
+		proofRollup,
+		expectedClaim2.GlobalIndex,
+		expectedClaim2.MainnetExitRoot,
+		expectedClaim2.RollupExitRoot,
+		expectedClaim2.OriginNetwork,
+		expectedClaim2.OriginAddress,
+		expectedClaim2.DestinationNetwork,
+		expectedClaim2.DestinationAddress,
+		expectedClaim2.Amount,
+		expectedClaim2.Metadata,
+	)
+	require.NoError(t, err)
+	tx, err = claimCaller.Claim2Bytes(
+		auth,
+		expectedClaimBytes,
+		expectedClaimBytes2,
+		reverted,
+	)
+	require.NoError(t, err)
+	time.Sleep(1 * time.Second)
+	r, err = client.TransactionReceipt(ctx, tx.Hash())
+	testCases = append(testCases, testCase{
+		description:   "2 indirect call claim message (diff globalIndex) (1 ok, 1 reverted)",
+		bridgeAddr:    bridgeAddr,
+		log:           *r.Logs[0],
+		expectedClaim: expectedClaim,
+	})
+
+	reverted = [2]bool{true, false}
+
+	// 2 indirect call claim message (same global index) (1 reverted, 1 ok)
+	expectedClaim.IsMessage = true
+	expectedClaim.GlobalIndex = big.NewInt(433)
+	expectedClaim2.IsMessage = true
+	expectedClaim2.GlobalIndex = big.NewInt(433)
+	expectedClaimBytes, err = abi.Pack(
+		"claimMessage",
+		proofLocal,
+		proofRollup,
+		expectedClaim.GlobalIndex,
+		expectedClaim.MainnetExitRoot,
+		expectedClaim.RollupExitRoot,
+		expectedClaim.OriginNetwork,
+		expectedClaim.OriginAddress,
+		expectedClaim.DestinationNetwork,
+		expectedClaim.DestinationAddress,
+		expectedClaim.Amount,
+		expectedClaim.Metadata,
+	)
+	require.NoError(t, err)
+	expectedClaimBytes2, err = abi.Pack(
+		"claimMessage",
+		proofLocal,
+		proofRollup,
+		expectedClaim2.GlobalIndex,
+		expectedClaim2.MainnetExitRoot,
+		expectedClaim2.RollupExitRoot,
+		expectedClaim2.OriginNetwork,
+		expectedClaim2.OriginAddress,
+		expectedClaim2.DestinationNetwork,
+		expectedClaim2.DestinationAddress,
+		expectedClaim2.Amount,
+		expectedClaim2.Metadata,
+	)
+	require.NoError(t, err)
+	tx, err = claimCaller.Claim2Bytes(
+		auth,
+		expectedClaimBytes,
+		expectedClaimBytes2,
+		reverted,
+	)
+	require.NoError(t, err)
+	time.Sleep(1 * time.Second)
+	r, err = client.TransactionReceipt(ctx, tx.Hash())
+	testCases = append(testCases, testCase{
+		description:   "2 indirect call claim message (same globalIndex) (reverted,ok)",
+		bridgeAddr:    bridgeAddr,
+		log:           *r.Logs[0],
+		expectedClaim: expectedClaim2,
+	})
+
+	// 2 indirect call claim message (diff global index) (1 reverted, 1 ok)
+	expectedClaim.IsMessage = true
+	expectedClaim.GlobalIndex = big.NewInt(434)
+	expectedClaim2.IsMessage = true
+	expectedClaim2.GlobalIndex = big.NewInt(435)
+	expectedClaimBytes, err = abi.Pack(
+		"claimMessage",
+		proofLocal,
+		proofRollup,
+		expectedClaim.GlobalIndex,
+		expectedClaim.MainnetExitRoot,
+		expectedClaim.RollupExitRoot,
+		expectedClaim.OriginNetwork,
+		expectedClaim.OriginAddress,
+		expectedClaim.DestinationNetwork,
+		expectedClaim.DestinationAddress,
+		expectedClaim.Amount,
+		expectedClaim.Metadata,
+	)
+	require.NoError(t, err)
+	expectedClaimBytes2, err = abi.Pack(
+		"claimMessage",
+		proofLocal,
+		proofRollup,
+		expectedClaim2.GlobalIndex,
+		expectedClaim2.MainnetExitRoot,
+		expectedClaim2.RollupExitRoot,
+		expectedClaim2.OriginNetwork,
+		expectedClaim2.OriginAddress,
+		expectedClaim2.DestinationNetwork,
+		expectedClaim2.DestinationAddress,
+		expectedClaim2.Amount,
+		expectedClaim2.Metadata,
+	)
+	require.NoError(t, err)
+	tx, err = claimCaller.Claim2Bytes(
+		auth,
+		expectedClaimBytes,
+		expectedClaimBytes2,
+		reverted,
+	)
+	require.NoError(t, err)
+	time.Sleep(1 * time.Second)
+	r, err = client.TransactionReceipt(ctx, tx.Hash())
+	testCases = append(testCases, testCase{
+		description:   "2 indirect call claim message (diff globalIndex) (reverted,ok)",
+		bridgeAddr:    bridgeAddr,
+		log:           *r.Logs[0],
+		expectedClaim: expectedClaim2,
+	})
+
+	reverted = [2]bool{false, false}
+
+	// 2 indirect call claim asset (same global index)
+	expectedClaim.IsMessage = false
+	expectedClaim.GlobalIndex = big.NewInt(436)
+	expectedClaim2.IsMessage = false
+	expectedClaim2.GlobalIndex = big.NewInt(436)
+	expectedClaimBytes, err = abi.Pack(
+		"claimAsset",
+		proofLocal,
+		proofRollup,
+		expectedClaim.GlobalIndex,
+		expectedClaim.MainnetExitRoot,
+		expectedClaim.RollupExitRoot,
+		expectedClaim.OriginNetwork,
+		expectedClaim.OriginAddress,
+		expectedClaim.DestinationNetwork,
+		expectedClaim.DestinationAddress,
+		expectedClaim.Amount,
+		expectedClaim.Metadata,
+	)
+	require.NoError(t, err)
+	expectedClaimBytes2, err = abi.Pack(
+		"claimAsset",
+		proofLocal,
+		proofRollup,
+		expectedClaim2.GlobalIndex,
+		expectedClaim2.MainnetExitRoot,
+		expectedClaim2.RollupExitRoot,
+		expectedClaim2.OriginNetwork,
+		expectedClaim2.OriginAddress,
+		expectedClaim2.DestinationNetwork,
+		expectedClaim2.DestinationAddress,
+		expectedClaim2.Amount,
+		expectedClaim2.Metadata,
+	)
+	require.NoError(t, err)
+	tx, err = claimCaller.Claim2Bytes(
+		auth,
+		expectedClaimBytes,
+		expectedClaimBytes2,
+		reverted,
+	)
+	require.NoError(t, err)
+	time.Sleep(1 * time.Second)
+	r, err = client.TransactionReceipt(ctx, tx.Hash())
+	testCases = append(testCases, testCase{
+		description:   "2 indirect call claim asset 1 (same globalIndex)",
+		bridgeAddr:    bridgeAddr,
+		log:           *r.Logs[0],
+		expectedClaim: expectedClaim,
+	})
+	testCases = append(testCases, testCase{
+		description:   "2 indirect call claim asset 2 (same globalIndex)",
+		bridgeAddr:    bridgeAddr,
+		log:           *r.Logs[1],
+		expectedClaim: expectedClaim2,
+	})
+
+	// 2 indirect call claim asset (diff global index)
+	expectedClaim.IsMessage = false
+	expectedClaim.GlobalIndex = big.NewInt(437)
+	expectedClaim2.IsMessage = false
+	expectedClaim2.GlobalIndex = big.NewInt(438)
+	expectedClaimBytes, err = abi.Pack(
+		"claimAsset",
+		proofLocal,
+		proofRollup,
+		expectedClaim.GlobalIndex,
+		expectedClaim.MainnetExitRoot,
+		expectedClaim.RollupExitRoot,
+		expectedClaim.OriginNetwork,
+		expectedClaim.OriginAddress,
+		expectedClaim.DestinationNetwork,
+		expectedClaim.DestinationAddress,
+		expectedClaim.Amount,
+		expectedClaim.Metadata,
+	)
+	require.NoError(t, err)
+	expectedClaimBytes2, err = abi.Pack(
+		"claimAsset",
+		proofLocal,
+		proofRollup,
+		expectedClaim2.GlobalIndex,
+		expectedClaim2.MainnetExitRoot,
+		expectedClaim2.RollupExitRoot,
+		expectedClaim2.OriginNetwork,
+		expectedClaim2.OriginAddress,
+		expectedClaim2.DestinationNetwork,
+		expectedClaim2.DestinationAddress,
+		expectedClaim2.Amount,
+		expectedClaim2.Metadata,
+	)
+	require.NoError(t, err)
+	tx, err = claimCaller.Claim2Bytes(
+		auth,
+		expectedClaimBytes,
+		expectedClaimBytes2,
+		reverted,
+	)
+	require.NoError(t, err)
+	time.Sleep(1 * time.Second)
+	r, err = client.TransactionReceipt(ctx, tx.Hash())
+	testCases = append(testCases, testCase{
+		description:   "2 indirect call claim asset 1 (diff globalIndex)",
+		bridgeAddr:    bridgeAddr,
+		log:           *r.Logs[0],
+		expectedClaim: expectedClaim,
+	})
+	testCases = append(testCases, testCase{
+		description:   "2 indirect call claim asset 2 (diff globalIndex)",
+		bridgeAddr:    bridgeAddr,
+		log:           *r.Logs[1],
+		expectedClaim: expectedClaim2,
+	})
+
+	reverted = [2]bool{false, true}
+
+	// 2 indirect call claim asset (same global index) (1 ok, 1 reverted)
+	expectedClaim.IsMessage = false
+	expectedClaim.GlobalIndex = big.NewInt(439)
+	expectedClaim2.IsMessage = false
+	expectedClaim2.GlobalIndex = big.NewInt(439)
+	expectedClaimBytes, err = abi.Pack(
+		"claimAsset",
+		proofLocal,
+		proofRollup,
+		expectedClaim.GlobalIndex,
+		expectedClaim.MainnetExitRoot,
+		expectedClaim.RollupExitRoot,
+		expectedClaim.OriginNetwork,
+		expectedClaim.OriginAddress,
+		expectedClaim.DestinationNetwork,
+		expectedClaim.DestinationAddress,
+		expectedClaim.Amount,
+		expectedClaim.Metadata,
+	)
+	require.NoError(t, err)
+	expectedClaimBytes2, err = abi.Pack(
+		"claimAsset",
+		proofLocal,
+		proofRollup,
+		expectedClaim2.GlobalIndex,
+		expectedClaim2.MainnetExitRoot,
+		expectedClaim2.RollupExitRoot,
+		expectedClaim2.OriginNetwork,
+		expectedClaim2.OriginAddress,
+		expectedClaim2.DestinationNetwork,
+		expectedClaim2.DestinationAddress,
+		expectedClaim2.Amount,
+		expectedClaim2.Metadata,
+	)
+	require.NoError(t, err)
+	tx, err = claimCaller.Claim2Bytes(
+		auth,
+		expectedClaimBytes,
+		expectedClaimBytes2,
+		reverted,
+	)
+	require.NoError(t, err)
+	time.Sleep(1 * time.Second)
+	r, err = client.TransactionReceipt(ctx, tx.Hash())
+	testCases = append(testCases, testCase{
+		description:   "2 indirect call claim asset (same globalIndex) (1 ok, 1 reverted)",
+		bridgeAddr:    bridgeAddr,
+		log:           *r.Logs[0],
+		expectedClaim: expectedClaim,
+	})
+
+	// 2 indirect call claim message (diff global index) (1 ok, 1 reverted)
+	expectedClaim.IsMessage = false
+	expectedClaim.GlobalIndex = big.NewInt(440)
+	expectedClaim2.IsMessage = false
+	expectedClaim2.GlobalIndex = big.NewInt(441)
+	expectedClaimBytes, err = abi.Pack(
+		"claimAsset",
+		proofLocal,
+		proofRollup,
+		expectedClaim.GlobalIndex,
+		expectedClaim.MainnetExitRoot,
+		expectedClaim.RollupExitRoot,
+		expectedClaim.OriginNetwork,
+		expectedClaim.OriginAddress,
+		expectedClaim.DestinationNetwork,
+		expectedClaim.DestinationAddress,
+		expectedClaim.Amount,
+		expectedClaim.Metadata,
+	)
+	require.NoError(t, err)
+	expectedClaimBytes2, err = abi.Pack(
+		"claimAsset",
+		proofLocal,
+		proofRollup,
+		expectedClaim2.GlobalIndex,
+		expectedClaim2.MainnetExitRoot,
+		expectedClaim2.RollupExitRoot,
+		expectedClaim2.OriginNetwork,
+		expectedClaim2.OriginAddress,
+		expectedClaim2.DestinationNetwork,
+		expectedClaim2.DestinationAddress,
+		expectedClaim2.Amount,
+		expectedClaim2.Metadata,
+	)
+	require.NoError(t, err)
+	tx, err = claimCaller.Claim2Bytes(
+		auth,
+		expectedClaimBytes,
+		expectedClaimBytes2,
+		reverted,
+	)
+	require.NoError(t, err)
+	time.Sleep(1 * time.Second)
+	r, err = client.TransactionReceipt(ctx, tx.Hash())
+	testCases = append(testCases, testCase{
+		description:   "2 indirect call claim asset (diff globalIndex) (1 ok, 1 reverted)",
+		bridgeAddr:    bridgeAddr,
+		log:           *r.Logs[0],
+		expectedClaim: expectedClaim,
+	})
+
+	reverted = [2]bool{true, false}
+
+	// 2 indirect call claim asset (same global index) (1 reverted, 1 ok)
+	expectedClaim.IsMessage = false
+	expectedClaim.GlobalIndex = big.NewInt(442)
+	expectedClaim2.IsMessage = false
+	expectedClaim2.GlobalIndex = big.NewInt(442)
+	expectedClaimBytes, err = abi.Pack(
+		"claimAsset",
+		proofLocal,
+		proofRollup,
+		expectedClaim.GlobalIndex,
+		expectedClaim.MainnetExitRoot,
+		expectedClaim.RollupExitRoot,
+		expectedClaim.OriginNetwork,
+		expectedClaim.OriginAddress,
+		expectedClaim.DestinationNetwork,
+		expectedClaim.DestinationAddress,
+		expectedClaim.Amount,
+		expectedClaim.Metadata,
+	)
+	require.NoError(t, err)
+	expectedClaimBytes2, err = abi.Pack(
+		"claimAsset",
+		proofLocal,
+		proofRollup,
+		expectedClaim2.GlobalIndex,
+		expectedClaim2.MainnetExitRoot,
+		expectedClaim2.RollupExitRoot,
+		expectedClaim2.OriginNetwork,
+		expectedClaim2.OriginAddress,
+		expectedClaim2.DestinationNetwork,
+		expectedClaim2.DestinationAddress,
+		expectedClaim2.Amount,
+		expectedClaim2.Metadata,
+	)
+	require.NoError(t, err)
+	tx, err = claimCaller.Claim2Bytes(
+		auth,
+		expectedClaimBytes,
+		expectedClaimBytes2,
+		reverted,
+	)
+	require.NoError(t, err)
+	time.Sleep(1 * time.Second)
+	r, err = client.TransactionReceipt(ctx, tx.Hash())
+	testCases = append(testCases, testCase{
+		description:   "2 indirect call claim asset (same globalIndex) (reverted,ok)",
+		bridgeAddr:    bridgeAddr,
+		log:           *r.Logs[0],
+		expectedClaim: expectedClaim2,
+	})
+
+	// 2 indirect call claim asset (diff global index) (1 reverted, 1 ok)
+	expectedClaim.IsMessage = false
+	expectedClaim.GlobalIndex = big.NewInt(443)
+	expectedClaim2.IsMessage = false
+	expectedClaim2.GlobalIndex = big.NewInt(444)
+	expectedClaimBytes, err = abi.Pack(
+		"claimAsset",
+		proofLocal,
+		proofRollup,
+		expectedClaim.GlobalIndex,
+		expectedClaim.MainnetExitRoot,
+		expectedClaim.RollupExitRoot,
+		expectedClaim.OriginNetwork,
+		expectedClaim.OriginAddress,
+		expectedClaim.DestinationNetwork,
+		expectedClaim.DestinationAddress,
+		expectedClaim.Amount,
+		expectedClaim.Metadata,
+	)
+	require.NoError(t, err)
+	expectedClaimBytes2, err = abi.Pack(
+		"claimAsset",
+		proofLocal,
+		proofRollup,
+		expectedClaim2.GlobalIndex,
+		expectedClaim2.MainnetExitRoot,
+		expectedClaim2.RollupExitRoot,
+		expectedClaim2.OriginNetwork,
+		expectedClaim2.OriginAddress,
+		expectedClaim2.DestinationNetwork,
+		expectedClaim2.DestinationAddress,
+		expectedClaim2.Amount,
+		expectedClaim2.Metadata,
+	)
+	require.NoError(t, err)
+	tx, err = claimCaller.Claim2Bytes(
+		auth,
+		expectedClaimBytes,
+		expectedClaimBytes2,
+		reverted,
+	)
+	require.NoError(t, err)
+	time.Sleep(1 * time.Second)
+	r, err = client.TransactionReceipt(ctx, tx.Hash())
+	testCases = append(testCases, testCase{
+		description:   "2 indirect call claim asset (diff globalIndex) (reverted,ok)",
+		bridgeAddr:    bridgeAddr,
+		log:           *r.Logs[0],
+		expectedClaim: expectedClaim2,
+	})
+
 	for _, tc := range testCases {
+		log.Info(tc.description)
 		t.Run(tc.description, func(t *testing.T) {
 			claimEvent, err := bridgeContract.ParseClaimEvent(tc.log)
 			require.NoError(t, err)
diff --git a/test/contracts/abi/claimmock.abi b/test/contracts/abi/claimmock.abi
index 2b75f6583..0fd2ea8e2 100644
--- a/test/contracts/abi/claimmock.abi
+++ b/test/contracts/abi/claimmock.abi
@@ -1 +1 @@
-[{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint256","name":"globalIndex","type":"uint256"},{"indexed":false,"internalType":"uint32","name":"originNetwork","type":"uint32"},{"indexed":false,"internalType":"address","name":"originAddress","type":"address"},{"indexed":false,"internalType":"address","name":"destinationAddress","type":"address"},{"indexed":false,"internalType":"uint256","name":"amount","type":"uint256"}],"name":"ClaimEvent","type":"event"},{"inputs":[{"internalType":"bytes32[32]","name":"smtProofLocalExitRoot","type":"bytes32[32]"},{"internalType":"bytes32[32]","name":"smtProofRollupExitRoot","type":"bytes32[32]"},{"internalType":"uint256","name":"globalIndex","type":"uint256"},{"internalType":"bytes32","name":"mainnetExitRoot","type":"bytes32"},{"internalType":"bytes32","name":"rollupExitRoot","type":"bytes32"},{"internalType":"uint32","name":"originNetwork","type":"uint32"},{"internalType":"address","name":"originTokenAddress","type":"address"},{"internalType":"uint32","name":"destinationNetwork","type":"uint32"},{"internalType":"address","name":"destinationAddress","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"},{"internalType":"bytes","name":"metadata","type":"bytes"}],"name":"claimAsset","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes32[32]","name":"smtProofLocalExitRoot","type":"bytes32[32]"},{"internalType":"bytes32[32]","name":"smtProofRollupExitRoot","type":"bytes32[32]"},{"internalType":"uint256","name":"globalIndex","type":"uint256"},{"internalType":"bytes32","name":"mainnetExitRoot","type":"bytes32"},{"internalType":"bytes32","name":"rollupExitRoot","type":"bytes32"},{"internalType":"uint32","name":"originNetwork","type":"uint32"},{"internalType":"address","name":"originAddress","type":"address"},{"internalType":"uint32","name":"destinationNetwork","type":"uint32"},{"internalType":"address","name":"destinationAddress","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"},{"internalType":"bytes","name":"metadata","type":"bytes"}],"name":"claimMessage","outputs":[],"stateMutability":"nonpayable","type":"function"}]
\ No newline at end of file
+[{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint256","name":"globalIndex","type":"uint256"},{"indexed":false,"internalType":"uint32","name":"originNetwork","type":"uint32"},{"indexed":false,"internalType":"address","name":"originAddress","type":"address"},{"indexed":false,"internalType":"address","name":"destinationAddress","type":"address"},{"indexed":false,"internalType":"uint256","name":"amount","type":"uint256"}],"name":"ClaimEvent","type":"event"},{"inputs":[{"internalType":"bytes32[32]","name":"smtProofLocalExitRoot","type":"bytes32[32]"},{"internalType":"bytes32[32]","name":"smtProofRollupExitRoot","type":"bytes32[32]"},{"internalType":"uint256","name":"globalIndex","type":"uint256"},{"internalType":"bytes32","name":"mainnetExitRoot","type":"bytes32"},{"internalType":"bytes32","name":"rollupExitRoot","type":"bytes32"},{"internalType":"uint32","name":"originNetwork","type":"uint32"},{"internalType":"address","name":"originTokenAddress","type":"address"},{"internalType":"uint32","name":"destinationNetwork","type":"uint32"},{"internalType":"address","name":"destinationAddress","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"},{"internalType":"bytes","name":"metadata","type":"bytes"}],"name":"claimAsset","outputs":[],"stateMutability":"payable","type":"function"},{"inputs":[{"internalType":"bytes32[32]","name":"smtProofLocalExitRoot","type":"bytes32[32]"},{"internalType":"bytes32[32]","name":"smtProofRollupExitRoot","type":"bytes32[32]"},{"internalType":"uint256","name":"globalIndex","type":"uint256"},{"internalType":"bytes32","name":"mainnetExitRoot","type":"bytes32"},{"internalType":"bytes32","name":"rollupExitRoot","type":"bytes32"},{"internalType":"uint32","name":"originNetwork","type":"uint32"},{"internalType":"address","name":"originAddress","type":"address"},{"internalType":"uint32","name":"destinationNetwork","type":"uint32"},{"internalType":"address","name":"destinationAddress","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"},{"internalType":"bytes","name":"metadata","type":"bytes"}],"name":"claimMessage","outputs":[],"stateMutability":"payable","type":"function"}]
\ No newline at end of file
diff --git a/test/contracts/abi/claimmockcaller.abi b/test/contracts/abi/claimmockcaller.abi
index b2c6e2b9a..21bf6ebc1 100644
--- a/test/contracts/abi/claimmockcaller.abi
+++ b/test/contracts/abi/claimmockcaller.abi
@@ -1 +1 @@
-[{"inputs":[{"internalType":"contract IClaimMock","name":"_claimMock","type":"address"}],"stateMutability":"nonpayable","type":"constructor"},{"inputs":[{"internalType":"bytes32[32]","name":"smtProofLocalExitRoot","type":"bytes32[32]"},{"internalType":"bytes32[32]","name":"smtProofRollupExitRoot","type":"bytes32[32]"},{"internalType":"uint256","name":"globalIndex","type":"uint256"},{"internalType":"bytes32","name":"mainnetExitRoot","type":"bytes32"},{"internalType":"bytes32","name":"rollupExitRoot","type":"bytes32"},{"internalType":"uint32","name":"originNetwork","type":"uint32"},{"internalType":"address","name":"originTokenAddress","type":"address"},{"internalType":"uint32","name":"destinationNetwork","type":"uint32"},{"internalType":"address","name":"destinationAddress","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"},{"internalType":"bytes","name":"metadata","type":"bytes"}],"name":"claimAsset","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes32[32]","name":"smtProofLocalExitRoot","type":"bytes32[32]"},{"internalType":"bytes32[32]","name":"smtProofRollupExitRoot","type":"bytes32[32]"},{"internalType":"uint256","name":"globalIndex","type":"uint256"},{"internalType":"bytes32","name":"mainnetExitRoot","type":"bytes32"},{"internalType":"bytes32","name":"rollupExitRoot","type":"bytes32"},{"internalType":"uint32","name":"originNetwork","type":"uint32"},{"internalType":"address","name":"originAddress","type":"address"},{"internalType":"uint32","name":"destinationNetwork","type":"uint32"},{"internalType":"address","name":"destinationAddress","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"},{"internalType":"bytes","name":"metadata","type":"bytes"}],"name":"claimMessage","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"claimMock","outputs":[{"internalType":"contract IClaimMock","name":"","type":"address"}],"stateMutability":"view","type":"function"}]
\ No newline at end of file
+[{"inputs":[{"internalType":"contract IClaimMock","name":"_claimMock","type":"address"}],"stateMutability":"nonpayable","type":"constructor"},{"inputs":[{"internalType":"bytes","name":"claim1","type":"bytes"},{"internalType":"bytes","name":"claim2","type":"bytes"},{"internalType":"bool[2]","name":"reverted","type":"bool[2]"}],"name":"claim2Bytes","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes32[32]","name":"smtProofLocalExitRoot","type":"bytes32[32]"},{"internalType":"bytes32[32]","name":"smtProofRollupExitRoot","type":"bytes32[32]"},{"internalType":"uint256","name":"globalIndex","type":"uint256"},{"internalType":"bytes32","name":"mainnetExitRoot","type":"bytes32"},{"internalType":"bytes32","name":"rollupExitRoot","type":"bytes32"},{"internalType":"uint32","name":"originNetwork","type":"uint32"},{"internalType":"address","name":"originTokenAddress","type":"address"},{"internalType":"uint32","name":"destinationNetwork","type":"uint32"},{"internalType":"address","name":"destinationAddress","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"},{"internalType":"bytes","name":"metadata","type":"bytes"},{"internalType":"bool","name":"reverted","type":"bool"}],"name":"claimAsset","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes","name":"claim","type":"bytes"},{"internalType":"bool","name":"reverted","type":"bool"}],"name":"claimBytes","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes32[32]","name":"smtProofLocalExitRoot","type":"bytes32[32]"},{"internalType":"bytes32[32]","name":"smtProofRollupExitRoot","type":"bytes32[32]"},{"internalType":"uint256","name":"globalIndex","type":"uint256"},{"internalType":"bytes32","name":"mainnetExitRoot","type":"bytes32"},{"internalType":"bytes32","name":"rollupExitRoot","type":"bytes32"},{"internalType":"uint32","name":"originNetwork","type":"uint32"},{"internalType":"address","name":"originAddress","type":"address"},{"internalType":"uint32","name":"destinationNetwork","type":"uint32"},{"internalType":"address","name":"destinationAddress","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"},{"internalType":"bytes","name":"metadata","type":"bytes"},{"internalType":"bool","name":"reverted","type":"bool"}],"name":"claimMessage","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"claimMock","outputs":[{"internalType":"contract IClaimMock","name":"","type":"address"}],"stateMutability":"view","type":"function"}]
\ No newline at end of file
diff --git a/test/contracts/bin/claimmock.bin b/test/contracts/bin/claimmock.bin
index 62d961e87..006fd65c3 100644
--- a/test/contracts/bin/claimmock.bin
+++ b/test/contracts/bin/claimmock.bin
@@ -1 +1 @@
-608060405234801561001057600080fd5b50610240806100206000396000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c8063ccaa2d111461003b578063f5efcd791461003b575b600080fd5b61004e610049366004610102565b610050565b005b604080518b815263ffffffff891660208201526001600160a01b0388811682840152861660608201526080810185905290517f1df3f2a973a00d6635911755c260704e95e8a5876997546798770f76396fda4d9181900360a00190a1505050505050505050505050565b8061040081018310156100cc57600080fd5b92915050565b803563ffffffff811681146100e657600080fd5b919050565b80356001600160a01b03811681146100e657600080fd5b6000806000806000806000806000806000806109208d8f03121561012557600080fd5b61012f8e8e6100ba565b9b5061013f8e6104008f016100ba565b9a506108008d013599506108208d013598506108408d013597506101666108608e016100d2565b96506101756108808e016100eb565b95506101846108a08e016100d2565b94506101936108c08e016100eb565b93506108e08d013592506109008d013567ffffffffffffffff808211156101b957600080fd5b818f0191508f601f8301126101cd57600080fd5b80823511156101db57600080fd5b508e6020823583010111156101ef57600080fd5b60208101925080359150509295989b509295989b509295989b56fea2646970667358221220ea3ccb4fef38083776607b84bdd7b00012029d7d1fee9fa7c300663fe761dcac64736f6c63430008120033
\ No newline at end of file
+6080806040523461001657610227908161001c8239f35b600080fdfe608080604052600436101561001357600080fd5b600090813560e01c908163ccaa2d11146100b5575063f5efcd791461003757600080fd5b6100403661012f565b5050945097505092509350600134146100b1576040805193845263ffffffff9490941660208401526001600160a01b039182169383019390935292909216606083015260808201527f1df3f2a973a00d6635911755c260704e95e8a5876997546798770f76396fda4d9060a090a180f35b8580fd5b7f1df3f2a973a00d6635911755c260704e95e8a5876997546798770f76396fda4d91508061011e6100e53661012f565b5050968b5263ffffffff90931660208b0152506001600160a01b0390811660408a015216606088015250506080850152505060a0820190565b0390a16001341461012c5780f35b80fd5b906109206003198301126101ec57610404908282116101ec57600492610804928184116101ec579235916108243591610844359163ffffffff906108643582811681036101ec57926001600160a01b03916108843583811681036101ec57936108a43590811681036101ec57926108c43590811681036101ec57916108e435916109043567ffffffffffffffff928382116101ec57806023830112156101ec57818e01359384116101ec57602484830101116101ec576024019190565b600080fdfea2646970667358221220360ea7019315ab59618e13d469f48b1816436744772ab76ff89153af49fb746a64736f6c63430008120033
\ No newline at end of file
diff --git a/test/contracts/bin/claimmockcaller.bin b/test/contracts/bin/claimmockcaller.bin
index 6a84c36fd..47d3dcdd1 100644
--- a/test/contracts/bin/claimmockcaller.bin
+++ b/test/contracts/bin/claimmockcaller.bin
@@ -1 +1 @@
-60a060405234801561001057600080fd5b5060405161047238038061047283398101604081905261002f91610040565b6001600160a01b0316608052610070565b60006020828403121561005257600080fd5b81516001600160a01b038116811461006957600080fd5b9392505050565b6080516103db61009760003960008181604b0152818160c8015261016a01526103db6000f3fe608060405234801561001057600080fd5b50600436106100415760003560e01c806383f5b00614610046578063ccaa2d1114610089578063f5efcd791461009e575b600080fd5b61006d7f000000000000000000000000000000000000000000000000000000000000000081565b6040516001600160a01b03909116815260200160405180910390f35b61009c6100973660046101fd565b6100b1565b005b61009c6100ac3660046101fd565b610153565b60405163ccaa2d1160e01b81526001600160a01b037f0000000000000000000000000000000000000000000000000000000000000000169063ccaa2d1190610113908f908f908f908f908f908f908f908f908f908f908f908f90600401610305565b600060405180830381600087803b15801561012d57600080fd5b505af1158015610141573d6000803e3d6000fd5b50505050505050505050505050505050565b60405163f5efcd7960e01b81526001600160a01b037f0000000000000000000000000000000000000000000000000000000000000000169063f5efcd7990610113908f908f908f908f908f908f908f908f908f908f908f908f90600401610305565b8061040081018310156101c757600080fd5b92915050565b803563ffffffff811681146101e157600080fd5b919050565b80356001600160a01b03811681146101e157600080fd5b6000806000806000806000806000806000806109208d8f03121561022057600080fd5b61022a8e8e6101b5565b9b5061023a8e6104008f016101b5565b9a506108008d013599506108208d013598506108408d013597506102616108608e016101cd565b96506102706108808e016101e6565b955061027f6108a08e016101cd565b945061028e6108c08e016101e6565b93506108e08d013592506109008d013567ffffffffffffffff808211156102b457600080fd5b818f0191508f601f8301126102c857600080fd5b80823511156102d657600080fd5b508e6020823583010111156102ea57600080fd5b60208101925080359150509295989b509295989b509295989b565b6000610400808f8437808e828501375061080082018c905261082082018b905261084082018a905263ffffffff8981166108608401526001600160a01b038981166108808501529088166108a084015286166108c08301526108e08201859052610920610900830181905282018390526109408385828501376000838501820152601f909301601f19169091019091019c9b50505050505050505050505056fea26469706673582212202321216f86560e0f29df639adc8713b3ce119002b4def8923caee0576ed8380564736f6c63430008120033
\ No newline at end of file
+60a03461008557601f61063738819003918201601f19168301916001600160401b0383118484101761008a5780849260209460405283398101031261008557516001600160a01b03811681036100855760805260405161059690816100a1823960805181818160d4015281816103bc01528181610407015281816104b701526104f80152f35b600080fd5b634e487b7160e01b600052604160045260246000fdfe6080604052600436101561001257600080fd5b6000803560e01c90816301beea651461006a575080631cf865cf1461006557806327e358431461006057806383f5b0061461005b5763a51061701461005657600080fd5b610436565b6103f1565b61036a565b6102d0565b3461010b57806020610aac608083610081366101a0565b929b939a949995989697969594939291506101029050575b63f5efcd7960e01b8c5260a00135610124528a013561050452610884526108a4526108c4526108e452610904526109245261094452610964527f00000000000000000000000000000000000000000000000000000000000000006001600160a01b03165af15080f35b60019a50610099565b80fd5b6108a4359063ffffffff8216820361012257565b600080fd5b61088435906001600160a01b038216820361012257565b6108c435906001600160a01b038216820361012257565b9181601f840112156101225782359167ffffffffffffffff8311610122576020838186019501011161012257565b6109243590811515820361012257565b3590811515820361012257565b906109406003198301126101225761040490828211610122576004926108049281841161012257923591610824359161084435916108643563ffffffff8116810361012257916101ee610127565b916101f761010e565b9161020061013e565b916108e43591610904359067ffffffffffffffff821161012257610225918d01610155565b909161022f610183565b90565b634e487b7160e01b600052604160045260246000fd5b604051906040820182811067ffffffffffffffff82111761026857604052565b610232565b81601f820112156101225780359067ffffffffffffffff928383116102685760405193601f8401601f19908116603f0116850190811185821017610268576040528284526020838301011161012257816000926020809301838601378301015290565b346101225760803660031901126101225767ffffffffffffffff6004358181116101225761030290369060040161026d565b906024359081116101225761031b90369060040161026d565b9036606312156101225761032d610248565b9182916084368111610122576044945b81861061035257505061035093506104ec565b005b6020809161035f88610193565b81520195019461033d565b346101225760403660031901126101225760043567ffffffffffffffff81116101225761039b90369060040161026d565b602435801515810361012257610aac60209160009384916103e8575b8301907f00000000000000000000000000000000000000000000000000000000000000006001600160a01b03165af1005b600191506103b7565b34610122576000366003190112610122576040517f00000000000000000000000000000000000000000000000000000000000000006001600160a01b03168152602090f35b346101225760006020610aac61044b366101a0565b9a9150508d989198979297969396959495996104e3575b60405163ccaa2d1160e01b815260a09b909b013560a48c0152608001356104848b01526108048a01526108248901526108448801526108648701526108848601526108a48501526108c48401526108e48301527f00000000000000000000000000000000000000000000000000000000000000006001600160a01b03165af1005b60019950610462565b825160009360209384937f00000000000000000000000000000000000000000000000000000000000000006001600160a01b0316938793929190610557575b858893015161054e575b858891819495610aac9889920190885af15001915af150565b60019250610535565b6001935061052b56fea2646970667358221220bbde05c8a8245c4319ff8aa0ce8d95e6c5dd5c5828fe085ba1491ea451b390ba64736f6c63430008120033
\ No newline at end of file
diff --git a/test/contracts/claimmock/ClaimMock.sol b/test/contracts/claimmock/ClaimMock.sol
index 14d94eaeb..adcea4fee 100644
--- a/test/contracts/claimmock/ClaimMock.sol
+++ b/test/contracts/claimmock/ClaimMock.sol
@@ -25,7 +25,7 @@ contract ClaimMock {
         address destinationAddress,
         uint256 amount,
         bytes calldata metadata
-    ) external {
+    ) external payable {
         emit ClaimEvent(
             globalIndex,
             originNetwork,
@@ -33,6 +33,9 @@ contract ClaimMock {
             destinationAddress,
             amount
         );
+        if(msg.value == 1) {
+            revert();
+        }
     }
 
     function claimMessage(
@@ -47,7 +50,10 @@ contract ClaimMock {
         address destinationAddress,
         uint256 amount,
         bytes calldata metadata
-    ) external {
+    ) external payable {
+        if(msg.value == 1) {
+            revert();
+        }
         emit ClaimEvent(
             globalIndex,
             originNetwork,
diff --git a/test/contracts/claimmock/claimmock.go b/test/contracts/claimmock/claimmock.go
index cc577908d..49a385465 100644
--- a/test/contracts/claimmock/claimmock.go
+++ b/test/contracts/claimmock/claimmock.go
@@ -31,8 +31,8 @@ var (
 
 // ClaimmockMetaData contains all meta data concerning the Claimmock contract.
 var ClaimmockMetaData = &bind.MetaData{
-	ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"globalIndex\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"originNetwork\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"originAddress\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"destinationAddress\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"ClaimEvent\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"bytes32[32]\",\"name\":\"smtProofLocalExitRoot\",\"type\":\"bytes32[32]\"},{\"internalType\":\"bytes32[32]\",\"name\":\"smtProofRollupExitRoot\",\"type\":\"bytes32[32]\"},{\"internalType\":\"uint256\",\"name\":\"globalIndex\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"mainnetExitRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"rollupExitRoot\",\"type\":\"bytes32\"},{\"internalType\":\"uint32\",\"name\":\"originNetwork\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"originTokenAddress\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"destinationNetwork\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"destinationAddress\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"metadata\",\"type\":\"bytes\"}],\"name\":\"claimAsset\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32[32]\",\"name\":\"smtProofLocalExitRoot\",\"type\":\"bytes32[32]\"},{\"internalType\":\"bytes32[32]\",\"name\":\"smtProofRollupExitRoot\",\"type\":\"bytes32[32]\"},{\"internalType\":\"uint256\",\"name\":\"globalIndex\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"mainnetExitRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"rollupExitRoot\",\"type\":\"bytes32\"},{\"internalType\":\"uint32\",\"name\":\"originNetwork\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"originAddress\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"destinationNetwork\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"destinationAddress\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"metadata\",\"type\":\"bytes\"}],\"name\":\"claimMessage\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]",
-	Bin: "0x608060405234801561001057600080fd5b50610240806100206000396000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c8063ccaa2d111461003b578063f5efcd791461003b575b600080fd5b61004e610049366004610102565b610050565b005b604080518b815263ffffffff891660208201526001600160a01b0388811682840152861660608201526080810185905290517f1df3f2a973a00d6635911755c260704e95e8a5876997546798770f76396fda4d9181900360a00190a1505050505050505050505050565b8061040081018310156100cc57600080fd5b92915050565b803563ffffffff811681146100e657600080fd5b919050565b80356001600160a01b03811681146100e657600080fd5b6000806000806000806000806000806000806109208d8f03121561012557600080fd5b61012f8e8e6100ba565b9b5061013f8e6104008f016100ba565b9a506108008d013599506108208d013598506108408d013597506101666108608e016100d2565b96506101756108808e016100eb565b95506101846108a08e016100d2565b94506101936108c08e016100eb565b93506108e08d013592506109008d013567ffffffffffffffff808211156101b957600080fd5b818f0191508f601f8301126101cd57600080fd5b80823511156101db57600080fd5b508e6020823583010111156101ef57600080fd5b60208101925080359150509295989b509295989b509295989b56fea2646970667358221220ea3ccb4fef38083776607b84bdd7b00012029d7d1fee9fa7c300663fe761dcac64736f6c63430008120033",
+	ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"globalIndex\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"originNetwork\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"originAddress\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"destinationAddress\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"ClaimEvent\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"bytes32[32]\",\"name\":\"smtProofLocalExitRoot\",\"type\":\"bytes32[32]\"},{\"internalType\":\"bytes32[32]\",\"name\":\"smtProofRollupExitRoot\",\"type\":\"bytes32[32]\"},{\"internalType\":\"uint256\",\"name\":\"globalIndex\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"mainnetExitRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"rollupExitRoot\",\"type\":\"bytes32\"},{\"internalType\":\"uint32\",\"name\":\"originNetwork\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"originTokenAddress\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"destinationNetwork\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"destinationAddress\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"metadata\",\"type\":\"bytes\"}],\"name\":\"claimAsset\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32[32]\",\"name\":\"smtProofLocalExitRoot\",\"type\":\"bytes32[32]\"},{\"internalType\":\"bytes32[32]\",\"name\":\"smtProofRollupExitRoot\",\"type\":\"bytes32[32]\"},{\"internalType\":\"uint256\",\"name\":\"globalIndex\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"mainnetExitRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"rollupExitRoot\",\"type\":\"bytes32\"},{\"internalType\":\"uint32\",\"name\":\"originNetwork\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"originAddress\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"destinationNetwork\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"destinationAddress\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"metadata\",\"type\":\"bytes\"}],\"name\":\"claimMessage\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"}]",
+	Bin: "0x6080806040523461001657610227908161001c8239f35b600080fdfe608080604052600436101561001357600080fd5b600090813560e01c908163ccaa2d11146100b5575063f5efcd791461003757600080fd5b6100403661012f565b5050945097505092509350600134146100b1576040805193845263ffffffff9490941660208401526001600160a01b039182169383019390935292909216606083015260808201527f1df3f2a973a00d6635911755c260704e95e8a5876997546798770f76396fda4d9060a090a180f35b8580fd5b7f1df3f2a973a00d6635911755c260704e95e8a5876997546798770f76396fda4d91508061011e6100e53661012f565b5050968b5263ffffffff90931660208b0152506001600160a01b0390811660408a015216606088015250506080850152505060a0820190565b0390a16001341461012c5780f35b80fd5b906109206003198301126101ec57610404908282116101ec57600492610804928184116101ec579235916108243591610844359163ffffffff906108643582811681036101ec57926001600160a01b03916108843583811681036101ec57936108a43590811681036101ec57926108c43590811681036101ec57916108e435916109043567ffffffffffffffff928382116101ec57806023830112156101ec57818e01359384116101ec57602484830101116101ec576024019190565b600080fdfea2646970667358221220360ea7019315ab59618e13d469f48b1816436744772ab76ff89153af49fb746a64736f6c63430008120033",
 }
 
 // ClaimmockABI is the input ABI used to generate the binding from.
@@ -204,42 +204,42 @@ func (_Claimmock *ClaimmockTransactorRaw) Transact(opts *bind.TransactOpts, meth
 
 // ClaimAsset is a paid mutator transaction binding the contract method 0xccaa2d11.
 //
-// Solidity: function claimAsset(bytes32[32] smtProofLocalExitRoot, bytes32[32] smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originTokenAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes metadata) returns()
+// Solidity: function claimAsset(bytes32[32] smtProofLocalExitRoot, bytes32[32] smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originTokenAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes metadata) payable returns()
 func (_Claimmock *ClaimmockTransactor) ClaimAsset(opts *bind.TransactOpts, smtProofLocalExitRoot [32][32]byte, smtProofRollupExitRoot [32][32]byte, globalIndex *big.Int, mainnetExitRoot [32]byte, rollupExitRoot [32]byte, originNetwork uint32, originTokenAddress common.Address, destinationNetwork uint32, destinationAddress common.Address, amount *big.Int, metadata []byte) (*types.Transaction, error) {
 	return _Claimmock.contract.Transact(opts, "claimAsset", smtProofLocalExitRoot, smtProofRollupExitRoot, globalIndex, mainnetExitRoot, rollupExitRoot, originNetwork, originTokenAddress, destinationNetwork, destinationAddress, amount, metadata)
 }
 
 // ClaimAsset is a paid mutator transaction binding the contract method 0xccaa2d11.
 //
-// Solidity: function claimAsset(bytes32[32] smtProofLocalExitRoot, bytes32[32] smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originTokenAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes metadata) returns()
+// Solidity: function claimAsset(bytes32[32] smtProofLocalExitRoot, bytes32[32] smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originTokenAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes metadata) payable returns()
 func (_Claimmock *ClaimmockSession) ClaimAsset(smtProofLocalExitRoot [32][32]byte, smtProofRollupExitRoot [32][32]byte, globalIndex *big.Int, mainnetExitRoot [32]byte, rollupExitRoot [32]byte, originNetwork uint32, originTokenAddress common.Address, destinationNetwork uint32, destinationAddress common.Address, amount *big.Int, metadata []byte) (*types.Transaction, error) {
 	return _Claimmock.Contract.ClaimAsset(&_Claimmock.TransactOpts, smtProofLocalExitRoot, smtProofRollupExitRoot, globalIndex, mainnetExitRoot, rollupExitRoot, originNetwork, originTokenAddress, destinationNetwork, destinationAddress, amount, metadata)
 }
 
 // ClaimAsset is a paid mutator transaction binding the contract method 0xccaa2d11.
 //
-// Solidity: function claimAsset(bytes32[32] smtProofLocalExitRoot, bytes32[32] smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originTokenAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes metadata) returns()
+// Solidity: function claimAsset(bytes32[32] smtProofLocalExitRoot, bytes32[32] smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originTokenAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes metadata) payable returns()
 func (_Claimmock *ClaimmockTransactorSession) ClaimAsset(smtProofLocalExitRoot [32][32]byte, smtProofRollupExitRoot [32][32]byte, globalIndex *big.Int, mainnetExitRoot [32]byte, rollupExitRoot [32]byte, originNetwork uint32, originTokenAddress common.Address, destinationNetwork uint32, destinationAddress common.Address, amount *big.Int, metadata []byte) (*types.Transaction, error) {
 	return _Claimmock.Contract.ClaimAsset(&_Claimmock.TransactOpts, smtProofLocalExitRoot, smtProofRollupExitRoot, globalIndex, mainnetExitRoot, rollupExitRoot, originNetwork, originTokenAddress, destinationNetwork, destinationAddress, amount, metadata)
 }
 
 // ClaimMessage is a paid mutator transaction binding the contract method 0xf5efcd79.
 //
-// Solidity: function claimMessage(bytes32[32] smtProofLocalExitRoot, bytes32[32] smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes metadata) returns()
+// Solidity: function claimMessage(bytes32[32] smtProofLocalExitRoot, bytes32[32] smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes metadata) payable returns()
 func (_Claimmock *ClaimmockTransactor) ClaimMessage(opts *bind.TransactOpts, smtProofLocalExitRoot [32][32]byte, smtProofRollupExitRoot [32][32]byte, globalIndex *big.Int, mainnetExitRoot [32]byte, rollupExitRoot [32]byte, originNetwork uint32, originAddress common.Address, destinationNetwork uint32, destinationAddress common.Address, amount *big.Int, metadata []byte) (*types.Transaction, error) {
 	return _Claimmock.contract.Transact(opts, "claimMessage", smtProofLocalExitRoot, smtProofRollupExitRoot, globalIndex, mainnetExitRoot, rollupExitRoot, originNetwork, originAddress, destinationNetwork, destinationAddress, amount, metadata)
 }
 
 // ClaimMessage is a paid mutator transaction binding the contract method 0xf5efcd79.
 //
-// Solidity: function claimMessage(bytes32[32] smtProofLocalExitRoot, bytes32[32] smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes metadata) returns()
+// Solidity: function claimMessage(bytes32[32] smtProofLocalExitRoot, bytes32[32] smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes metadata) payable returns()
 func (_Claimmock *ClaimmockSession) ClaimMessage(smtProofLocalExitRoot [32][32]byte, smtProofRollupExitRoot [32][32]byte, globalIndex *big.Int, mainnetExitRoot [32]byte, rollupExitRoot [32]byte, originNetwork uint32, originAddress common.Address, destinationNetwork uint32, destinationAddress common.Address, amount *big.Int, metadata []byte) (*types.Transaction, error) {
 	return _Claimmock.Contract.ClaimMessage(&_Claimmock.TransactOpts, smtProofLocalExitRoot, smtProofRollupExitRoot, globalIndex, mainnetExitRoot, rollupExitRoot, originNetwork, originAddress, destinationNetwork, destinationAddress, amount, metadata)
 }
 
 // ClaimMessage is a paid mutator transaction binding the contract method 0xf5efcd79.
 //
-// Solidity: function claimMessage(bytes32[32] smtProofLocalExitRoot, bytes32[32] smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes metadata) returns()
+// Solidity: function claimMessage(bytes32[32] smtProofLocalExitRoot, bytes32[32] smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes metadata) payable returns()
 func (_Claimmock *ClaimmockTransactorSession) ClaimMessage(smtProofLocalExitRoot [32][32]byte, smtProofRollupExitRoot [32][32]byte, globalIndex *big.Int, mainnetExitRoot [32]byte, rollupExitRoot [32]byte, originNetwork uint32, originAddress common.Address, destinationNetwork uint32, destinationAddress common.Address, amount *big.Int, metadata []byte) (*types.Transaction, error) {
 	return _Claimmock.Contract.ClaimMessage(&_Claimmock.TransactOpts, smtProofLocalExitRoot, smtProofRollupExitRoot, globalIndex, mainnetExitRoot, rollupExitRoot, originNetwork, originAddress, destinationNetwork, destinationAddress, amount, metadata)
 }
diff --git a/test/contracts/claimmockcaller/ClaimMockCaller.sol b/test/contracts/claimmockcaller/ClaimMockCaller.sol
index 3ab2f286a..5f82003e6 100644
--- a/test/contracts/claimmockcaller/ClaimMockCaller.sol
+++ b/test/contracts/claimmockcaller/ClaimMockCaller.sol
@@ -29,21 +29,32 @@ contract ClaimMockCaller {
         uint32 destinationNetwork,
         address destinationAddress,
         uint256 amount,
-        bytes calldata metadata
+        bytes calldata metadata,
+        bool reverted
     ) external {
-        claimMock.claimAsset(
-            smtProofLocalExitRoot,
-            smtProofRollupExitRoot,
-            globalIndex,
-            mainnetExitRoot,
-            rollupExitRoot,
-            originNetwork,
-            originTokenAddress,
-            destinationNetwork,
-            destinationAddress,
-            amount,
-            metadata
-        );
+        address addr = address(claimMock);
+        uint256 value = 0;
+        if(reverted) {
+            value = 1;
+        }
+        bytes4 argSig = bytes4(keccak256("claimAsset(bytes32[32],bytes32[32],uint256,bytes32,bytes32,uint32,address,uint32,address,uint256,bytes)"));
+        bytes32 value1 = smtProofLocalExitRoot[5];
+        bytes32 value2 = smtProofRollupExitRoot[4];
+        assembly {
+            let x := mload(0x40)   //Find empty storage location using "free memory pointer"
+            mstore(x,argSig)
+            mstore(add(x,164),value1)
+            mstore(add(x,1156),value2)
+            mstore(add(x,2052),globalIndex)
+            mstore(add(x,2084),mainnetExitRoot)
+            mstore(add(x,2116),rollupExitRoot)
+            mstore(add(x,2148),originNetwork)
+            mstore(add(x,2180),originTokenAddress)
+            mstore(add(x,2212),destinationNetwork)
+            mstore(add(x,2244),destinationAddress)
+            mstore(add(x,2276),amount)
+            let success := call(gas(), addr, value, x, 0xaac, 0x20, 0)
+        }
     }
 
     function claimMessage(
@@ -57,20 +68,68 @@ contract ClaimMockCaller {
         uint32 destinationNetwork,
         address destinationAddress,
         uint256 amount,
-        bytes calldata metadata
+        bytes calldata metadata,
+        bool reverted
     ) external {
-        claimMock.claimMessage(
-            smtProofLocalExitRoot,
-            smtProofRollupExitRoot,
-            globalIndex,
-            mainnetExitRoot,
-            rollupExitRoot,
-            originNetwork,
-            originAddress,
-            destinationNetwork,
-            destinationAddress,
-            amount,
-            metadata
-        );  
+        address addr = address(claimMock);
+        uint256 value = 0;
+        if(reverted) {
+            value = 1;
+        }
+        bytes4 argSig = bytes4(keccak256("claimMessage(bytes32[32],bytes32[32],uint256,bytes32,bytes32,uint32,address,uint32,address,uint256,bytes)"));
+        bytes32 value1 = smtProofLocalExitRoot[5];
+        bytes32 value2 = smtProofRollupExitRoot[4];
+        assembly {
+            let x := mload(0x40)   //Find empty storage location using "free memory pointer"
+            mstore(x,argSig)
+            mstore(add(x,164),value1)
+            mstore(add(x,1156),value2)
+            mstore(add(x,2052),globalIndex)
+            mstore(add(x,2084),mainnetExitRoot)
+            mstore(add(x,2116),rollupExitRoot)
+            mstore(add(x,2148),originNetwork)
+            mstore(add(x,2180),originAddress)
+            mstore(add(x,2212),destinationNetwork)
+            mstore(add(x,2244),destinationAddress)
+            mstore(add(x,2276),amount)
+            let success := call(gas(), addr, value, x, 0xaac, 0x20, 0)
+        }
     }
+
+    function claimBytes(
+        bytes memory claim,
+        bool reverted
+    ) external {
+        address addr = address(claimMock);
+        uint256 value = 0;
+        if(reverted) {
+            value = 1;
+        }
+        assembly {
+            let success := call(gas(), addr, value, add(claim, 32), 0xaac, 0x20, 0)
+        }
+    }
+
+    function claim2Bytes(
+        bytes memory claim1,
+        bytes memory claim2,
+        bool[2] memory reverted
+    ) external {
+        address addr = address(claimMock);
+        uint256 value1 = 0;
+        if(reverted[0]) {
+            value1 = 1;
+        }
+        uint256 value2 = 0;
+        if(reverted[1]) {
+            value2 = 1;
+        }
+        assembly {
+            let success1 := call(gas(), addr, value1, add(claim1, 32), 0xaac, 0x20, 0)
+        }
+        assembly {
+            let success2 :=  call(gas(), addr, value2, add(claim2, 32), 0xaac, 0x20, 0)
+        }
+    }
+
 }
\ No newline at end of file
diff --git a/test/contracts/claimmockcaller/claimmockcaller.go b/test/contracts/claimmockcaller/claimmockcaller.go
index 78acccadd..917ce4cc7 100644
--- a/test/contracts/claimmockcaller/claimmockcaller.go
+++ b/test/contracts/claimmockcaller/claimmockcaller.go
@@ -31,8 +31,8 @@ var (
 
 // ClaimmockcallerMetaData contains all meta data concerning the Claimmockcaller contract.
 var ClaimmockcallerMetaData = &bind.MetaData{
-	ABI: "[{\"inputs\":[{\"internalType\":\"contractIClaimMock\",\"name\":\"_claimMock\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"bytes32[32]\",\"name\":\"smtProofLocalExitRoot\",\"type\":\"bytes32[32]\"},{\"internalType\":\"bytes32[32]\",\"name\":\"smtProofRollupExitRoot\",\"type\":\"bytes32[32]\"},{\"internalType\":\"uint256\",\"name\":\"globalIndex\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"mainnetExitRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"rollupExitRoot\",\"type\":\"bytes32\"},{\"internalType\":\"uint32\",\"name\":\"originNetwork\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"originTokenAddress\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"destinationNetwork\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"destinationAddress\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"metadata\",\"type\":\"bytes\"}],\"name\":\"claimAsset\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32[32]\",\"name\":\"smtProofLocalExitRoot\",\"type\":\"bytes32[32]\"},{\"internalType\":\"bytes32[32]\",\"name\":\"smtProofRollupExitRoot\",\"type\":\"bytes32[32]\"},{\"internalType\":\"uint256\",\"name\":\"globalIndex\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"mainnetExitRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"rollupExitRoot\",\"type\":\"bytes32\"},{\"internalType\":\"uint32\",\"name\":\"originNetwork\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"originAddress\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"destinationNetwork\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"destinationAddress\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"metadata\",\"type\":\"bytes\"}],\"name\":\"claimMessage\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"claimMock\",\"outputs\":[{\"internalType\":\"contractIClaimMock\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]",
-	Bin: "0x60a060405234801561001057600080fd5b5060405161047238038061047283398101604081905261002f91610040565b6001600160a01b0316608052610070565b60006020828403121561005257600080fd5b81516001600160a01b038116811461006957600080fd5b9392505050565b6080516103db61009760003960008181604b0152818160c8015261016a01526103db6000f3fe608060405234801561001057600080fd5b50600436106100415760003560e01c806383f5b00614610046578063ccaa2d1114610089578063f5efcd791461009e575b600080fd5b61006d7f000000000000000000000000000000000000000000000000000000000000000081565b6040516001600160a01b03909116815260200160405180910390f35b61009c6100973660046101fd565b6100b1565b005b61009c6100ac3660046101fd565b610153565b60405163ccaa2d1160e01b81526001600160a01b037f0000000000000000000000000000000000000000000000000000000000000000169063ccaa2d1190610113908f908f908f908f908f908f908f908f908f908f908f908f90600401610305565b600060405180830381600087803b15801561012d57600080fd5b505af1158015610141573d6000803e3d6000fd5b50505050505050505050505050505050565b60405163f5efcd7960e01b81526001600160a01b037f0000000000000000000000000000000000000000000000000000000000000000169063f5efcd7990610113908f908f908f908f908f908f908f908f908f908f908f908f90600401610305565b8061040081018310156101c757600080fd5b92915050565b803563ffffffff811681146101e157600080fd5b919050565b80356001600160a01b03811681146101e157600080fd5b6000806000806000806000806000806000806109208d8f03121561022057600080fd5b61022a8e8e6101b5565b9b5061023a8e6104008f016101b5565b9a506108008d013599506108208d013598506108408d013597506102616108608e016101cd565b96506102706108808e016101e6565b955061027f6108a08e016101cd565b945061028e6108c08e016101e6565b93506108e08d013592506109008d013567ffffffffffffffff808211156102b457600080fd5b818f0191508f601f8301126102c857600080fd5b80823511156102d657600080fd5b508e6020823583010111156102ea57600080fd5b60208101925080359150509295989b509295989b509295989b565b6000610400808f8437808e828501375061080082018c905261082082018b905261084082018a905263ffffffff8981166108608401526001600160a01b038981166108808501529088166108a084015286166108c08301526108e08201859052610920610900830181905282018390526109408385828501376000838501820152601f909301601f19169091019091019c9b50505050505050505050505056fea26469706673582212202321216f86560e0f29df639adc8713b3ce119002b4def8923caee0576ed8380564736f6c63430008120033",
+	ABI: "[{\"inputs\":[{\"internalType\":\"contractIClaimMock\",\"name\":\"_claimMock\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"claim1\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"claim2\",\"type\":\"bytes\"},{\"internalType\":\"bool[2]\",\"name\":\"reverted\",\"type\":\"bool[2]\"}],\"name\":\"claim2Bytes\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32[32]\",\"name\":\"smtProofLocalExitRoot\",\"type\":\"bytes32[32]\"},{\"internalType\":\"bytes32[32]\",\"name\":\"smtProofRollupExitRoot\",\"type\":\"bytes32[32]\"},{\"internalType\":\"uint256\",\"name\":\"globalIndex\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"mainnetExitRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"rollupExitRoot\",\"type\":\"bytes32\"},{\"internalType\":\"uint32\",\"name\":\"originNetwork\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"originTokenAddress\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"destinationNetwork\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"destinationAddress\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"metadata\",\"type\":\"bytes\"},{\"internalType\":\"bool\",\"name\":\"reverted\",\"type\":\"bool\"}],\"name\":\"claimAsset\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"claim\",\"type\":\"bytes\"},{\"internalType\":\"bool\",\"name\":\"reverted\",\"type\":\"bool\"}],\"name\":\"claimBytes\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32[32]\",\"name\":\"smtProofLocalExitRoot\",\"type\":\"bytes32[32]\"},{\"internalType\":\"bytes32[32]\",\"name\":\"smtProofRollupExitRoot\",\"type\":\"bytes32[32]\"},{\"internalType\":\"uint256\",\"name\":\"globalIndex\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"mainnetExitRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"rollupExitRoot\",\"type\":\"bytes32\"},{\"internalType\":\"uint32\",\"name\":\"originNetwork\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"originAddress\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"destinationNetwork\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"destinationAddress\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"metadata\",\"type\":\"bytes\"},{\"internalType\":\"bool\",\"name\":\"reverted\",\"type\":\"bool\"}],\"name\":\"claimMessage\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"claimMock\",\"outputs\":[{\"internalType\":\"contractIClaimMock\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]",
+	Bin: "0x60a03461008557601f61063738819003918201601f19168301916001600160401b0383118484101761008a5780849260209460405283398101031261008557516001600160a01b03811681036100855760805260405161059690816100a1823960805181818160d4015281816103bc01528181610407015281816104b701526104f80152f35b600080fd5b634e487b7160e01b600052604160045260246000fdfe6080604052600436101561001257600080fd5b6000803560e01c90816301beea651461006a575080631cf865cf1461006557806327e358431461006057806383f5b0061461005b5763a51061701461005657600080fd5b610436565b6103f1565b61036a565b6102d0565b3461010b57806020610aac608083610081366101a0565b929b939a949995989697969594939291506101029050575b63f5efcd7960e01b8c5260a00135610124528a013561050452610884526108a4526108c4526108e452610904526109245261094452610964527f00000000000000000000000000000000000000000000000000000000000000006001600160a01b03165af15080f35b60019a50610099565b80fd5b6108a4359063ffffffff8216820361012257565b600080fd5b61088435906001600160a01b038216820361012257565b6108c435906001600160a01b038216820361012257565b9181601f840112156101225782359167ffffffffffffffff8311610122576020838186019501011161012257565b6109243590811515820361012257565b3590811515820361012257565b906109406003198301126101225761040490828211610122576004926108049281841161012257923591610824359161084435916108643563ffffffff8116810361012257916101ee610127565b916101f761010e565b9161020061013e565b916108e43591610904359067ffffffffffffffff821161012257610225918d01610155565b909161022f610183565b90565b634e487b7160e01b600052604160045260246000fd5b604051906040820182811067ffffffffffffffff82111761026857604052565b610232565b81601f820112156101225780359067ffffffffffffffff928383116102685760405193601f8401601f19908116603f0116850190811185821017610268576040528284526020838301011161012257816000926020809301838601378301015290565b346101225760803660031901126101225767ffffffffffffffff6004358181116101225761030290369060040161026d565b906024359081116101225761031b90369060040161026d565b9036606312156101225761032d610248565b9182916084368111610122576044945b81861061035257505061035093506104ec565b005b6020809161035f88610193565b81520195019461033d565b346101225760403660031901126101225760043567ffffffffffffffff81116101225761039b90369060040161026d565b602435801515810361012257610aac60209160009384916103e8575b8301907f00000000000000000000000000000000000000000000000000000000000000006001600160a01b03165af1005b600191506103b7565b34610122576000366003190112610122576040517f00000000000000000000000000000000000000000000000000000000000000006001600160a01b03168152602090f35b346101225760006020610aac61044b366101a0565b9a9150508d989198979297969396959495996104e3575b60405163ccaa2d1160e01b815260a09b909b013560a48c0152608001356104848b01526108048a01526108248901526108448801526108648701526108848601526108a48501526108c48401526108e48301527f00000000000000000000000000000000000000000000000000000000000000006001600160a01b03165af1005b60019950610462565b825160009360209384937f00000000000000000000000000000000000000000000000000000000000000006001600160a01b0316938793929190610557575b858893015161054e575b858891819495610aac9889920190885af15001915af150565b60019250610535565b6001935061052b56fea2646970667358221220bbde05c8a8245c4319ff8aa0ce8d95e6c5dd5c5828fe085ba1491ea451b390ba64736f6c63430008120033",
 }
 
 // ClaimmockcallerABI is the input ABI used to generate the binding from.
@@ -233,44 +233,86 @@ func (_Claimmockcaller *ClaimmockcallerCallerSession) ClaimMock() (common.Addres
 	return _Claimmockcaller.Contract.ClaimMock(&_Claimmockcaller.CallOpts)
 }
 
-// ClaimAsset is a paid mutator transaction binding the contract method 0xccaa2d11.
+// Claim2Bytes is a paid mutator transaction binding the contract method 0x1cf865cf.
 //
-// Solidity: function claimAsset(bytes32[32] smtProofLocalExitRoot, bytes32[32] smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originTokenAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes metadata) returns()
-func (_Claimmockcaller *ClaimmockcallerTransactor) ClaimAsset(opts *bind.TransactOpts, smtProofLocalExitRoot [32][32]byte, smtProofRollupExitRoot [32][32]byte, globalIndex *big.Int, mainnetExitRoot [32]byte, rollupExitRoot [32]byte, originNetwork uint32, originTokenAddress common.Address, destinationNetwork uint32, destinationAddress common.Address, amount *big.Int, metadata []byte) (*types.Transaction, error) {
-	return _Claimmockcaller.contract.Transact(opts, "claimAsset", smtProofLocalExitRoot, smtProofRollupExitRoot, globalIndex, mainnetExitRoot, rollupExitRoot, originNetwork, originTokenAddress, destinationNetwork, destinationAddress, amount, metadata)
+// Solidity: function claim2Bytes(bytes claim1, bytes claim2, bool[2] reverted) returns()
+func (_Claimmockcaller *ClaimmockcallerTransactor) Claim2Bytes(opts *bind.TransactOpts, claim1 []byte, claim2 []byte, reverted [2]bool) (*types.Transaction, error) {
+	return _Claimmockcaller.contract.Transact(opts, "claim2Bytes", claim1, claim2, reverted)
 }
 
-// ClaimAsset is a paid mutator transaction binding the contract method 0xccaa2d11.
+// Claim2Bytes is a paid mutator transaction binding the contract method 0x1cf865cf.
 //
-// Solidity: function claimAsset(bytes32[32] smtProofLocalExitRoot, bytes32[32] smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originTokenAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes metadata) returns()
-func (_Claimmockcaller *ClaimmockcallerSession) ClaimAsset(smtProofLocalExitRoot [32][32]byte, smtProofRollupExitRoot [32][32]byte, globalIndex *big.Int, mainnetExitRoot [32]byte, rollupExitRoot [32]byte, originNetwork uint32, originTokenAddress common.Address, destinationNetwork uint32, destinationAddress common.Address, amount *big.Int, metadata []byte) (*types.Transaction, error) {
-	return _Claimmockcaller.Contract.ClaimAsset(&_Claimmockcaller.TransactOpts, smtProofLocalExitRoot, smtProofRollupExitRoot, globalIndex, mainnetExitRoot, rollupExitRoot, originNetwork, originTokenAddress, destinationNetwork, destinationAddress, amount, metadata)
+// Solidity: function claim2Bytes(bytes claim1, bytes claim2, bool[2] reverted) returns()
+func (_Claimmockcaller *ClaimmockcallerSession) Claim2Bytes(claim1 []byte, claim2 []byte, reverted [2]bool) (*types.Transaction, error) {
+	return _Claimmockcaller.Contract.Claim2Bytes(&_Claimmockcaller.TransactOpts, claim1, claim2, reverted)
 }
 
-// ClaimAsset is a paid mutator transaction binding the contract method 0xccaa2d11.
+// Claim2Bytes is a paid mutator transaction binding the contract method 0x1cf865cf.
 //
-// Solidity: function claimAsset(bytes32[32] smtProofLocalExitRoot, bytes32[32] smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originTokenAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes metadata) returns()
-func (_Claimmockcaller *ClaimmockcallerTransactorSession) ClaimAsset(smtProofLocalExitRoot [32][32]byte, smtProofRollupExitRoot [32][32]byte, globalIndex *big.Int, mainnetExitRoot [32]byte, rollupExitRoot [32]byte, originNetwork uint32, originTokenAddress common.Address, destinationNetwork uint32, destinationAddress common.Address, amount *big.Int, metadata []byte) (*types.Transaction, error) {
-	return _Claimmockcaller.Contract.ClaimAsset(&_Claimmockcaller.TransactOpts, smtProofLocalExitRoot, smtProofRollupExitRoot, globalIndex, mainnetExitRoot, rollupExitRoot, originNetwork, originTokenAddress, destinationNetwork, destinationAddress, amount, metadata)
+// Solidity: function claim2Bytes(bytes claim1, bytes claim2, bool[2] reverted) returns()
+func (_Claimmockcaller *ClaimmockcallerTransactorSession) Claim2Bytes(claim1 []byte, claim2 []byte, reverted [2]bool) (*types.Transaction, error) {
+	return _Claimmockcaller.Contract.Claim2Bytes(&_Claimmockcaller.TransactOpts, claim1, claim2, reverted)
 }
 
-// ClaimMessage is a paid mutator transaction binding the contract method 0xf5efcd79.
+// ClaimAsset is a paid mutator transaction binding the contract method 0xa5106170.
 //
-// Solidity: function claimMessage(bytes32[32] smtProofLocalExitRoot, bytes32[32] smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes metadata) returns()
-func (_Claimmockcaller *ClaimmockcallerTransactor) ClaimMessage(opts *bind.TransactOpts, smtProofLocalExitRoot [32][32]byte, smtProofRollupExitRoot [32][32]byte, globalIndex *big.Int, mainnetExitRoot [32]byte, rollupExitRoot [32]byte, originNetwork uint32, originAddress common.Address, destinationNetwork uint32, destinationAddress common.Address, amount *big.Int, metadata []byte) (*types.Transaction, error) {
-	return _Claimmockcaller.contract.Transact(opts, "claimMessage", smtProofLocalExitRoot, smtProofRollupExitRoot, globalIndex, mainnetExitRoot, rollupExitRoot, originNetwork, originAddress, destinationNetwork, destinationAddress, amount, metadata)
+// Solidity: function claimAsset(bytes32[32] smtProofLocalExitRoot, bytes32[32] smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originTokenAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes metadata, bool reverted) returns()
+func (_Claimmockcaller *ClaimmockcallerTransactor) ClaimAsset(opts *bind.TransactOpts, smtProofLocalExitRoot [32][32]byte, smtProofRollupExitRoot [32][32]byte, globalIndex *big.Int, mainnetExitRoot [32]byte, rollupExitRoot [32]byte, originNetwork uint32, originTokenAddress common.Address, destinationNetwork uint32, destinationAddress common.Address, amount *big.Int, metadata []byte, reverted bool) (*types.Transaction, error) {
+	return _Claimmockcaller.contract.Transact(opts, "claimAsset", smtProofLocalExitRoot, smtProofRollupExitRoot, globalIndex, mainnetExitRoot, rollupExitRoot, originNetwork, originTokenAddress, destinationNetwork, destinationAddress, amount, metadata, reverted)
 }
 
-// ClaimMessage is a paid mutator transaction binding the contract method 0xf5efcd79.
+// ClaimAsset is a paid mutator transaction binding the contract method 0xa5106170.
 //
-// Solidity: function claimMessage(bytes32[32] smtProofLocalExitRoot, bytes32[32] smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes metadata) returns()
-func (_Claimmockcaller *ClaimmockcallerSession) ClaimMessage(smtProofLocalExitRoot [32][32]byte, smtProofRollupExitRoot [32][32]byte, globalIndex *big.Int, mainnetExitRoot [32]byte, rollupExitRoot [32]byte, originNetwork uint32, originAddress common.Address, destinationNetwork uint32, destinationAddress common.Address, amount *big.Int, metadata []byte) (*types.Transaction, error) {
-	return _Claimmockcaller.Contract.ClaimMessage(&_Claimmockcaller.TransactOpts, smtProofLocalExitRoot, smtProofRollupExitRoot, globalIndex, mainnetExitRoot, rollupExitRoot, originNetwork, originAddress, destinationNetwork, destinationAddress, amount, metadata)
+// Solidity: function claimAsset(bytes32[32] smtProofLocalExitRoot, bytes32[32] smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originTokenAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes metadata, bool reverted) returns()
+func (_Claimmockcaller *ClaimmockcallerSession) ClaimAsset(smtProofLocalExitRoot [32][32]byte, smtProofRollupExitRoot [32][32]byte, globalIndex *big.Int, mainnetExitRoot [32]byte, rollupExitRoot [32]byte, originNetwork uint32, originTokenAddress common.Address, destinationNetwork uint32, destinationAddress common.Address, amount *big.Int, metadata []byte, reverted bool) (*types.Transaction, error) {
+	return _Claimmockcaller.Contract.ClaimAsset(&_Claimmockcaller.TransactOpts, smtProofLocalExitRoot, smtProofRollupExitRoot, globalIndex, mainnetExitRoot, rollupExitRoot, originNetwork, originTokenAddress, destinationNetwork, destinationAddress, amount, metadata, reverted)
 }
 
-// ClaimMessage is a paid mutator transaction binding the contract method 0xf5efcd79.
+// ClaimAsset is a paid mutator transaction binding the contract method 0xa5106170.
 //
-// Solidity: function claimMessage(bytes32[32] smtProofLocalExitRoot, bytes32[32] smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes metadata) returns()
-func (_Claimmockcaller *ClaimmockcallerTransactorSession) ClaimMessage(smtProofLocalExitRoot [32][32]byte, smtProofRollupExitRoot [32][32]byte, globalIndex *big.Int, mainnetExitRoot [32]byte, rollupExitRoot [32]byte, originNetwork uint32, originAddress common.Address, destinationNetwork uint32, destinationAddress common.Address, amount *big.Int, metadata []byte) (*types.Transaction, error) {
-	return _Claimmockcaller.Contract.ClaimMessage(&_Claimmockcaller.TransactOpts, smtProofLocalExitRoot, smtProofRollupExitRoot, globalIndex, mainnetExitRoot, rollupExitRoot, originNetwork, originAddress, destinationNetwork, destinationAddress, amount, metadata)
+// Solidity: function claimAsset(bytes32[32] smtProofLocalExitRoot, bytes32[32] smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originTokenAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes metadata, bool reverted) returns()
+func (_Claimmockcaller *ClaimmockcallerTransactorSession) ClaimAsset(smtProofLocalExitRoot [32][32]byte, smtProofRollupExitRoot [32][32]byte, globalIndex *big.Int, mainnetExitRoot [32]byte, rollupExitRoot [32]byte, originNetwork uint32, originTokenAddress common.Address, destinationNetwork uint32, destinationAddress common.Address, amount *big.Int, metadata []byte, reverted bool) (*types.Transaction, error) {
+	return _Claimmockcaller.Contract.ClaimAsset(&_Claimmockcaller.TransactOpts, smtProofLocalExitRoot, smtProofRollupExitRoot, globalIndex, mainnetExitRoot, rollupExitRoot, originNetwork, originTokenAddress, destinationNetwork, destinationAddress, amount, metadata, reverted)
+}
+
+// ClaimBytes is a paid mutator transaction binding the contract method 0x27e35843.
+//
+// Solidity: function claimBytes(bytes claim, bool reverted) returns()
+func (_Claimmockcaller *ClaimmockcallerTransactor) ClaimBytes(opts *bind.TransactOpts, claim []byte, reverted bool) (*types.Transaction, error) {
+	return _Claimmockcaller.contract.Transact(opts, "claimBytes", claim, reverted)
+}
+
+// ClaimBytes is a paid mutator transaction binding the contract method 0x27e35843.
+//
+// Solidity: function claimBytes(bytes claim, bool reverted) returns()
+func (_Claimmockcaller *ClaimmockcallerSession) ClaimBytes(claim []byte, reverted bool) (*types.Transaction, error) {
+	return _Claimmockcaller.Contract.ClaimBytes(&_Claimmockcaller.TransactOpts, claim, reverted)
+}
+
+// ClaimBytes is a paid mutator transaction binding the contract method 0x27e35843.
+//
+// Solidity: function claimBytes(bytes claim, bool reverted) returns()
+func (_Claimmockcaller *ClaimmockcallerTransactorSession) ClaimBytes(claim []byte, reverted bool) (*types.Transaction, error) {
+	return _Claimmockcaller.Contract.ClaimBytes(&_Claimmockcaller.TransactOpts, claim, reverted)
+}
+
+// ClaimMessage is a paid mutator transaction binding the contract method 0x01beea65.
+//
+// Solidity: function claimMessage(bytes32[32] smtProofLocalExitRoot, bytes32[32] smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes metadata, bool reverted) returns()
+func (_Claimmockcaller *ClaimmockcallerTransactor) ClaimMessage(opts *bind.TransactOpts, smtProofLocalExitRoot [32][32]byte, smtProofRollupExitRoot [32][32]byte, globalIndex *big.Int, mainnetExitRoot [32]byte, rollupExitRoot [32]byte, originNetwork uint32, originAddress common.Address, destinationNetwork uint32, destinationAddress common.Address, amount *big.Int, metadata []byte, reverted bool) (*types.Transaction, error) {
+	return _Claimmockcaller.contract.Transact(opts, "claimMessage", smtProofLocalExitRoot, smtProofRollupExitRoot, globalIndex, mainnetExitRoot, rollupExitRoot, originNetwork, originAddress, destinationNetwork, destinationAddress, amount, metadata, reverted)
+}
+
+// ClaimMessage is a paid mutator transaction binding the contract method 0x01beea65.
+//
+// Solidity: function claimMessage(bytes32[32] smtProofLocalExitRoot, bytes32[32] smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes metadata, bool reverted) returns()
+func (_Claimmockcaller *ClaimmockcallerSession) ClaimMessage(smtProofLocalExitRoot [32][32]byte, smtProofRollupExitRoot [32][32]byte, globalIndex *big.Int, mainnetExitRoot [32]byte, rollupExitRoot [32]byte, originNetwork uint32, originAddress common.Address, destinationNetwork uint32, destinationAddress common.Address, amount *big.Int, metadata []byte, reverted bool) (*types.Transaction, error) {
+	return _Claimmockcaller.Contract.ClaimMessage(&_Claimmockcaller.TransactOpts, smtProofLocalExitRoot, smtProofRollupExitRoot, globalIndex, mainnetExitRoot, rollupExitRoot, originNetwork, originAddress, destinationNetwork, destinationAddress, amount, metadata, reverted)
+}
+
+// ClaimMessage is a paid mutator transaction binding the contract method 0x01beea65.
+//
+// Solidity: function claimMessage(bytes32[32] smtProofLocalExitRoot, bytes32[32] smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes metadata, bool reverted) returns()
+func (_Claimmockcaller *ClaimmockcallerTransactorSession) ClaimMessage(smtProofLocalExitRoot [32][32]byte, smtProofRollupExitRoot [32][32]byte, globalIndex *big.Int, mainnetExitRoot [32]byte, rollupExitRoot [32]byte, originNetwork uint32, originAddress common.Address, destinationNetwork uint32, destinationAddress common.Address, amount *big.Int, metadata []byte, reverted bool) (*types.Transaction, error) {
+	return _Claimmockcaller.Contract.ClaimMessage(&_Claimmockcaller.TransactOpts, smtProofLocalExitRoot, smtProofRollupExitRoot, globalIndex, mainnetExitRoot, rollupExitRoot, originNetwork, originAddress, destinationNetwork, destinationAddress, amount, metadata, reverted)
 }
diff --git a/test/contracts/compile.sh b/test/contracts/compile.sh
index d4f423e8d..faeba1250 100755
--- a/test/contracts/compile.sh
+++ b/test/contracts/compile.sh
@@ -6,11 +6,11 @@ rm -f IBasePolygonZkEVMGlobalExitRoot.bin
 rm -f IPolygonZkEVMGlobalExitRootV2.abi
 rm -f IPolygonZkEVMGlobalExitRootV2.bin
 
-docker run --rm -v $(pwd):/contracts ethereum/solc:0.8.18-alpine - /contracts/claimmock/ClaimMock.sol -o /contracts --abi --bin --overwrite --optimize
+docker run --rm -v $(pwd):/contracts ethereum/solc:0.8.18-alpine - /contracts/claimmock/ClaimMock.sol -o /contracts --abi --bin --overwrite --optimize --via-ir
 mv -f ClaimMock.abi abi/claimmock.abi
 mv -f ClaimMock.bin bin/claimmock.bin
 
-docker run --rm -v $(pwd):/contracts ethereum/solc:0.8.18-alpine - /contracts/claimmockcaller/ClaimMockCaller.sol -o /contracts --abi --bin --overwrite --optimize
+docker run --rm -v $(pwd):/contracts ethereum/solc:0.8.18-alpine - /contracts/claimmockcaller/ClaimMockCaller.sol -o /contracts --abi --bin --overwrite --optimize --via-ir
 mv -f ClaimMockCaller.abi abi/claimmockcaller.abi
 mv -f ClaimMockCaller.bin bin/claimmockcaller.bin
 rm -f IClaimMock.abi

From bb3586a65015cb0049032b83f52d1770d72d286b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?=
 <93934272+Stefan-Ethernal@users.noreply.github.com>
Date: Thu, 12 Sep 2024 10:28:44 +0200
Subject: [PATCH 5/6] fix: CodeQL Github workflow (#72)

* fix: codeql gh action

* fix: remove comments

* fix: remove bash as language
---
 .github/workflows/codeql.yml | 88 ++++++++++--------------------------
 1 file changed, 25 insertions(+), 63 deletions(-)

diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml
index 75c0ab874..f6205e617 100644
--- a/.github/workflows/codeql.yml
+++ b/.github/workflows/codeql.yml
@@ -1,35 +1,17 @@
-# For most projects, this workflow file will not need changing; you simply need
-# to commit it to your repository.
-#
-# You may wish to alter this file to override the set of languages analyzed,
-# or to provide custom queries or build logic.
-#
-# ******** NOTE ********
-# We have attempted to detect the languages in your repository. Please check
-# the `language` matrix defined below to confirm you have the correct set of
-# supported CodeQL languages.
-#
 name: "CodeQL"
 
 on:
   push:
-    branches: [ $default-branch, $protected-branches ]
+    branches:
+      - develop
+      - main
   pull_request:
-    # The branches below must be a subset of the branches above
-    branches: [ $default-branch ]
-  schedule:
-    - cron: $cron-weekly
 
 jobs:
   analyze:
     name: Analyze
-    # Runner size impacts CodeQL analysis time. To learn more, please see:
-    #   - https://gh.io/recommended-hardware-resources-for-running-codeql
-    #   - https://gh.io/supported-runners-and-hardware-resources
-    #   - https://gh.io/using-larger-runners
-    # Consider using larger runners for possible analysis time improvements.
-    runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }}
-    timeout-minutes: ${{ (matrix.language == 'swift' && 120) || 360 }}
+    runs-on: ${{ matrix.language == 'swift' && 'macos-latest' || 'ubuntu-latest' }}
+    timeout-minutes: ${{ matrix.language == 'swift' && 120 || 360 }}
     permissions:
       actions: read
       contents: read
@@ -38,45 +20,25 @@ jobs:
     strategy:
       fail-fast: false
       matrix:
-        language: [ $detected-codeql-languages ]
-        # CodeQL supports [ $supported-codeql-languages ]
-        # Use only 'java-kotlin' to analyze code written in Java, Kotlin or both
-        # Use only 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both
-        # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support
+        language:
+          - go
 
     steps:
-    - name: Checkout repository
-      uses: actions/checkout@v4
-
-    # Initializes the CodeQL tools for scanning.
-    - name: Initialize CodeQL
-      uses: github/codeql-action/init@v2
-      with:
-        languages: ${{ matrix.language }}
-        # If you wish to specify custom queries, you can do so here or in a config file.
-        # By default, queries listed here will override any specified in a config file.
-        # Prefix the list here with "+" to use these queries and those in the config file.
-
-        # For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
-        # queries: security-extended,security-and-quality
-
-
-    # Autobuild attempts to build any compiled languages (C/C++, C#, Go, Java, or Swift).
-    # If this step fails, then you should remove it and run the build manually (see below)
-    - name: Autobuild
-      uses: github/codeql-action/autobuild@v2
-
-    # ℹī¸ Command-line programs to run using the OS shell.
-    # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
-
-    #   If the Autobuild fails above, remove it and uncomment the following three lines.
-    #   modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance.
-
-    # - run: |
-    #     echo "Run, Build Application using script"
-    #     ./location_of_script_within_repo/buildscript.sh
-
-    - name: Perform CodeQL Analysis
-      uses: github/codeql-action/analyze@v2
-      with:
-        category: "/language:${{matrix.language}}"
+      - name: Checkout repository
+        uses: actions/checkout@v4
+
+      # Initializes the CodeQL tools for scanning.
+      - name: Initialize CodeQL
+        uses: github/codeql-action/init@v2
+        with:
+          languages: ${{ matrix.language }}
+
+      # Autobuild attempts to build any compiled languages (C/C++, C#, Go, Java, or Swift).
+      # If this step fails, then you should remove it and run the build manually (see below)
+      - name: Autobuild
+        uses: github/codeql-action/autobuild@v2
+
+      - name: Perform CodeQL Analysis
+        uses: github/codeql-action/analyze@v2
+        with:
+          category: "/language:${{ matrix.language }}"

From 0959afc7a9689f685e1aad2ca6f988193a510872 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Toni=20Ram=C3=ADrez?=
 <58293609+ToniRamirezM@users.noreply.github.com>
Date: Thu, 12 Sep 2024 12:29:30 +0200
Subject: [PATCH 6/6] feat: remove final proof sanity check (#71)

---
 aggregator/aggregator.go | 22 ----------------------
 aggregator/config.go     |  3 ---
 config/default.go        |  1 -
 3 files changed, 26 deletions(-)

diff --git a/aggregator/aggregator.go b/aggregator/aggregator.go
index f3e039f99..0ba92c25c 100644
--- a/aggregator/aggregator.go
+++ b/aggregator/aggregator.go
@@ -1045,28 +1045,6 @@ func (a *Aggregator) buildFinalProof(
 		finalProof.Public.NewLocalExitRoot = finalDBBatch.Batch.LocalExitRoot.Bytes()
 	}
 
-	// Sanity Check: state root from the proof must match the one from the final batch
-	if a.cfg.FinalProofSanityCheckEnabled {
-		finalDBBatch, err := a.state.GetBatch(ctx, proof.BatchNumberFinal, nil)
-		if err != nil {
-			return nil, fmt.Errorf("failed to retrieve batch with number [%d]", proof.BatchNumberFinal)
-		}
-
-		if common.BytesToHash(finalProof.Public.NewStateRoot).String() != finalDBBatch.Batch.StateRoot.String() {
-			for {
-				log.Errorf(
-					"State root from the final proof does not match the expected for batch %d: Proof = [%s] Expected = [%s]",
-					proof.BatchNumberFinal,
-					common.BytesToHash(finalProof.Public.NewStateRoot).String(),
-					finalDBBatch.Batch.StateRoot.String(),
-				)
-				time.Sleep(a.cfg.RetryTime.Duration)
-			}
-		} else {
-			log.Infof("State root sanity check from the final proof for batch %d passed", proof.BatchNumberFinal)
-		}
-	}
-
 	return finalProof, nil
 }
 
diff --git a/aggregator/config.go b/aggregator/config.go
index e17a24d1f..4550c6376 100644
--- a/aggregator/config.go
+++ b/aggregator/config.go
@@ -82,9 +82,6 @@ type Config struct {
 	// BatchProofSanityCheckEnabled is a flag to enable the sanity check of the batch proof
 	BatchProofSanityCheckEnabled bool `mapstructure:"BatchProofSanityCheckEnabled"`
 
-	// FinalProofSanityCheckEnabled is a flag to enable the sanity check of the final proof
-	FinalProofSanityCheckEnabled bool `mapstructure:"FinalProofSanityCheckEnabled"`
-
 	// ChainID is the L2 ChainID provided by the Network Config
 	ChainID uint64
 
diff --git a/config/default.go b/config/default.go
index 0cd33d798..ce76abc43 100644
--- a/config/default.go
+++ b/config/default.go
@@ -61,7 +61,6 @@ SenderAddress = ""
 CleanupLockedProofsInterval = "2m"
 GeneratingProofCleanupThreshold = "10m"
 BatchProofSanityCheckEnabled = true
-FinalProofSanityCheckEnabled = true
 ForkId = 9
 GasOffset = 0
 WitnessURL = "localhost:8123"