diff --git a/aggoracle/config.go b/aggoracle/config.go
index 2dd394032..e60977070 100644
--- a/aggoracle/config.go
+++ b/aggoracle/config.go
@@ -18,8 +18,8 @@ var (
 type Config struct {
 	TargetChainType TargetChainType `mapstructure:"TargetChainType"`
 	URLRPCL1        string          `mapstructure:"URLRPCL1"`
-	// TODO: BlockFinality doesnt work as per the jsonschema
-	BlockFinality     string                   `jsonschema:"enum=latest,enum=safe, enum=pending, enum=finalized" mapstructure:"BlockFinality"`
+	// BlockFinality indicates the status of the blocks that will be queried in order to sync
+	BlockFinality     string                   `jsonschema:"enum=LatestBlock, enum=SafeBlock, enum=PendingBlock, enum=FinalizedBlock, enum=EarliestBlock" mapstructure:"BlockFinality"`
 	WaitPeriodNextGER types.Duration           `mapstructure:"WaitPeriodNextGER"`
 	EVMSender         chaingersender.EVMConfig `mapstructure:"EVMSender"`
 }
diff --git a/aggoracle/e2e_test.go b/aggoracle/e2e_test.go
index 600021e07..1eae07279 100644
--- a/aggoracle/e2e_test.go
+++ b/aggoracle/e2e_test.go
@@ -1,196 +1,23 @@
 package aggoracle_test
 
 import (
-	"context"
-	"errors"
 	"fmt"
-	"math/big"
 	"strconv"
 	"testing"
 	"time"
 
 	gerContractL1 "github.com/0xPolygon/cdk-contracts-tooling/contracts/manual/globalexitrootnopush0"
-	gerContractEVMChain "github.com/0xPolygon/cdk-contracts-tooling/contracts/manual/pessimisticglobalexitrootnopush0"
 	"github.com/0xPolygon/cdk/aggoracle"
-	"github.com/0xPolygon/cdk/aggoracle/chaingersender"
-	"github.com/0xPolygon/cdk/etherman"
-	"github.com/0xPolygon/cdk/l1infotreesync"
-	"github.com/0xPolygon/cdk/log"
-	"github.com/0xPolygon/cdk/reorgdetector"
-	ethtxmanager "github.com/0xPolygonHermez/zkevm-ethtx-manager/ethtxmanager"
-	"github.com/ethereum/go-ethereum"
+	"github.com/0xPolygon/cdk/test/helpers"
 	"github.com/ethereum/go-ethereum/accounts/abi/bind"
 	"github.com/ethereum/go-ethereum/common"
-	"github.com/ethereum/go-ethereum/core/types"
-	"github.com/ethereum/go-ethereum/crypto"
 	"github.com/ethereum/go-ethereum/ethclient/simulated"
-	mock "github.com/stretchr/testify/mock"
 	"github.com/stretchr/testify/require"
 )
 
 func TestEVM(t *testing.T) {
-	ctx := context.Background()
-	l1Client, syncer, gerL1Contract, authL1 := commonSetup(t)
-	sender := evmSetup(t)
-	oracle, err := aggoracle.New(sender, l1Client.Client(), syncer, etherman.LatestBlock, time.Millisecond)
-	require.NoError(t, err)
-	go oracle.Start(ctx)
-
-	runTest(t, gerL1Contract, sender, l1Client, authL1)
-}
-
-func commonSetup(t *testing.T) (
-	*simulated.Backend,
-	*l1infotreesync.L1InfoTreeSync,
-	*gerContractL1.Globalexitrootnopush0,
-	*bind.TransactOpts,
-) {
-	// Config and spin up
-	ctx := context.Background()
-	// Simulated L1
-	privateKeyL1, err := crypto.GenerateKey()
-	require.NoError(t, err)
-	authL1, err := bind.NewKeyedTransactorWithChainID(privateKeyL1, big.NewInt(1337))
-	require.NoError(t, err)
-	l1Client, gerL1Addr, gerL1Contract, err := newSimulatedL1(authL1)
-	require.NoError(t, err)
-	// Reorg detector
-	dbPathReorgDetector := t.TempDir()
-	reorg, err := reorgdetector.New(ctx, l1Client.Client(), dbPathReorgDetector)
-	require.NoError(t, err)
-	// Syncer
-	dbPathSyncer := t.TempDir()
-	syncer, err := l1infotreesync.New(ctx, dbPathSyncer, gerL1Addr, common.Address{}, 10, etherman.LatestBlock, reorg, l1Client.Client(), time.Millisecond, 0, 100*time.Millisecond, 3)
-	require.NoError(t, err)
-	go syncer.Start(ctx)
-
-	return l1Client, syncer, gerL1Contract, authL1
-}
-
-func evmSetup(t *testing.T) aggoracle.ChainSender {
-	privateKeyL2, err := crypto.GenerateKey()
-	require.NoError(t, err)
-	authL2, err := bind.NewKeyedTransactorWithChainID(privateKeyL2, big.NewInt(1337))
-	require.NoError(t, err)
-	l2Client, gerL2Addr, _, err := newSimulatedEVMAggSovereignChain(authL2)
-	require.NoError(t, err)
-	ethTxManMock := aggoracle.NewEthTxManagerMock(t)
-	// id, err := c.ethTxMan.Add(ctx, &c.gerAddr, nil, big.NewInt(0), tx.Data(), c.gasOffset, nil)
-	ethTxManMock.On("Add", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).
-		Run(func(args mock.Arguments) {
-			ctx := context.Background()
-			nonce, err := l2Client.Client().PendingNonceAt(ctx, authL2.From)
-			if err != nil {
-				log.Error(err)
-				return
-			}
-			gas, err := l2Client.Client().EstimateGas(ctx, ethereum.CallMsg{
-				From:  authL2.From,
-				To:    args.Get(1).(*common.Address),
-				Value: big.NewInt(0),
-				Data:  args.Get(4).([]byte),
-			})
-			if err != nil {
-				log.Error(err)
-				res, err := l2Client.Client().CallContract(ctx, ethereum.CallMsg{
-					From:  authL2.From,
-					To:    args.Get(1).(*common.Address),
-					Value: big.NewInt(0),
-					Data:  args.Get(4).([]byte),
-				}, nil)
-				log.Debugf("contract call: %s", res)
-				if err != nil {
-					log.Error(err)
-				}
-				return
-			}
-			price, err := l2Client.Client().SuggestGasPrice(ctx)
-			if err != nil {
-				log.Error(err)
-			}
-			tx := types.NewTx(&types.LegacyTx{
-				To:       args.Get(1).(*common.Address),
-				Nonce:    nonce,
-				Value:    big.NewInt(0),
-				Data:     args.Get(4).([]byte),
-				Gas:      gas,
-				GasPrice: price,
-			})
-			tx.Gas()
-			signedTx, err := authL2.Signer(authL2.From, tx)
-			if err != nil {
-				log.Error(err)
-				return
-			}
-			err = l2Client.Client().SendTransaction(ctx, signedTx)
-			if err != nil {
-				log.Error(err)
-				return
-			}
-			l2Client.Commit()
-		}).
-		Return(common.Hash{}, nil)
-	// res, err := c.ethTxMan.Result(ctx, id)
-	ethTxManMock.On("Result", mock.Anything, mock.Anything).
-		Return(ethtxmanager.MonitoredTxResult{Status: ethtxmanager.MonitoredTxStatusMined}, nil)
-	sender, err := chaingersender.NewEVMChainGERSender(gerL2Addr, authL2.From, l2Client.Client(), ethTxManMock, 0, time.Millisecond*50)
-	require.NoError(t, err)
-
-	return sender
-}
-
-func newSimulatedL1(auth *bind.TransactOpts) (
-	client *simulated.Backend,
-	gerAddr common.Address,
-	gerContract *gerContractL1.Globalexitrootnopush0,
-	err error,
-) {
-	balance, _ := new(big.Int).SetString("10000000000000000000000000", 10) //nolint:gomnd
-	address := auth.From
-	genesisAlloc := map[common.Address]types.Account{
-		address: {
-			Balance: balance,
-		},
-	}
-	blockGasLimit := uint64(999999999999999999) //nolint:gomnd
-	client = simulated.NewBackend(genesisAlloc, simulated.WithBlockGasLimit(blockGasLimit))
-
-	gerAddr, _, gerContract, err = gerContractL1.DeployGlobalexitrootnopush0(auth, client.Client(), auth.From, auth.From)
-
-	client.Commit()
-	return
-}
-
-func newSimulatedEVMAggSovereignChain(auth *bind.TransactOpts) (
-	client *simulated.Backend,
-	gerAddr common.Address,
-	gerContract *gerContractEVMChain.Pessimisticglobalexitrootnopush0,
-	err error,
-) {
-	balance, _ := new(big.Int).SetString("10000000000000000000000000", 10) //nolint:gomnd
-	address := auth.From
-	genesisAlloc := map[common.Address]types.Account{
-		address: {
-			Balance: balance,
-		},
-	}
-	blockGasLimit := uint64(999999999999999999) //nolint:gomnd
-	client = simulated.NewBackend(genesisAlloc, simulated.WithBlockGasLimit(blockGasLimit))
-
-	gerAddr, _, gerContract, err = gerContractEVMChain.DeployPessimisticglobalexitrootnopush0(auth, client.Client(), auth.From)
-	if err != nil {
-		return
-	}
-	client.Commit()
-
-	_GLOBAL_EXIT_ROOT_SETTER_ROLE := common.HexToHash("0x7b95520991dfda409891be0afa2635b63540f92ee996fda0bf695a166e5c5176")
-	_, err = gerContract.GrantRole(auth, _GLOBAL_EXIT_ROOT_SETTER_ROLE, auth.From)
-	client.Commit()
-	hasRole, _ := gerContract.HasRole(&bind.CallOpts{Pending: false}, _GLOBAL_EXIT_ROOT_SETTER_ROLE, auth.From)
-	if !hasRole {
-		err = errors.New("failed to set role")
-	}
-	return
+	env := helpers.SetupAggoracleWithEVMChain(t)
+	runTest(t, env.GERL1Contract, env.AggOracleSender, env.L1Client, env.AuthL1)
 }
 
 func runTest(
diff --git a/bridgesync/bridgesync.go b/bridgesync/bridgesync.go
index 063d06f31..e417abc37 100644
--- a/bridgesync/bridgesync.go
+++ b/bridgesync/bridgesync.go
@@ -15,12 +15,7 @@ const (
 	downloadBufferSize = 1000
 )
 
-var (
-	retryAfterErrorPeriod      = time.Second * 10
-	maxRetryAttemptsAfterError = 5
-)
-
-type LocalBridgeSync struct {
+type BridgeSync struct {
 	processor *processor
 	driver    *sync.EVMDriver
 }
@@ -35,8 +30,11 @@ func NewL1(
 	rd sync.ReorgDetector,
 	ethClient EthClienter,
 	initialBlock uint64,
-) (*LocalBridgeSync, error) {
-	return new(
+	waitForNewBlocksPeriod time.Duration,
+	retryAfterErrorPeriod time.Duration,
+	maxRetryAttemptsAfterError int,
+) (*BridgeSync, error) {
+	return newBridgeSync(
 		ctx,
 		dbPath,
 		bridge,
@@ -46,6 +44,9 @@ func NewL1(
 		ethClient,
 		initialBlock,
 		bridgeSyncL1,
+		waitForNewBlocksPeriod,
+		retryAfterErrorPeriod,
+		maxRetryAttemptsAfterError,
 	)
 }
 
@@ -59,8 +60,11 @@ func NewL2(
 	rd sync.ReorgDetector,
 	ethClient EthClienter,
 	initialBlock uint64,
-) (*LocalBridgeSync, error) {
-	return new(
+	waitForNewBlocksPeriod time.Duration,
+	retryAfterErrorPeriod time.Duration,
+	maxRetryAttemptsAfterError int,
+) (*BridgeSync, error) {
+	return newBridgeSync(
 		ctx,
 		dbPath,
 		bridge,
@@ -70,10 +74,13 @@ func NewL2(
 		ethClient,
 		initialBlock,
 		bridgeSyncL2,
+		waitForNewBlocksPeriod,
+		retryAfterErrorPeriod,
+		maxRetryAttemptsAfterError,
 	)
 }
 
-func new(
+func newBridgeSync(
 	ctx context.Context,
 	dbPath string,
 	bridge common.Address,
@@ -83,7 +90,10 @@ func new(
 	ethClient EthClienter,
 	initialBlock uint64,
 	l1OrL2ID string,
-) (*LocalBridgeSync, error) {
+	waitForNewBlocksPeriod time.Duration,
+	retryAfterErrorPeriod time.Duration,
+	maxRetryAttemptsAfterError int,
+) (*BridgeSync, error) {
 	processor, err := newProcessor(ctx, dbPath, l1OrL2ID)
 	if err != nil {
 		return nil, err
@@ -110,6 +120,7 @@ func new(
 		return nil, err
 	}
 	downloader, err := sync.NewEVMDownloader(
+		l1OrL2ID,
 		ethClient,
 		syncBlockChunkSize,
 		blockFinalityType,
@@ -126,13 +137,29 @@ func new(
 	if err != nil {
 		return nil, err
 	}
-	return &LocalBridgeSync{
+	return &BridgeSync{
 		processor: processor,
 		driver:    driver,
 	}, nil
 }
 
 // Start starts the synchronization process
-func (s *LocalBridgeSync) Start(ctx context.Context) {
+func (s *BridgeSync) Start(ctx context.Context) {
 	s.driver.Sync(ctx)
 }
+
+func (s *BridgeSync) GetLastProcessedBlock(ctx context.Context) (uint64, error) {
+	return s.processor.GetLastProcessedBlock(ctx)
+}
+
+func (s *BridgeSync) GetBridgeIndexByRoot(ctx context.Context, root common.Hash) (uint32, error) {
+	return s.processor.exitTree.GetIndexByRoot(ctx, root)
+}
+
+func (s *BridgeSync) GetClaimsAndBridges(ctx context.Context, fromBlock, toBlock uint64) ([]Event, error) {
+	return s.processor.GetClaimsAndBridges(ctx, fromBlock, toBlock)
+}
+
+func (s *BridgeSync) GetProof(ctx context.Context, depositCount uint32, localExitRoot common.Hash) ([32]common.Hash, error) {
+	return s.processor.exitTree.GetProof(ctx, depositCount, localExitRoot)
+}
diff --git a/bridgesync/config.go b/bridgesync/config.go
new file mode 100644
index 000000000..9aa849e2c
--- /dev/null
+++ b/bridgesync/config.go
@@ -0,0 +1,27 @@
+package bridgesync
+
+import (
+	"github.com/0xPolygon/cdk/config/types"
+	"github.com/ethereum/go-ethereum/common"
+)
+
+type Config struct {
+	// DBPath path of the DB
+	DBPath string `mapstructure:"DBPath"`
+	// BlockFinality indicates the status of the blocks that will be queried in order to sync
+	BlockFinality string `jsonschema:"enum=LatestBlock, enum=SafeBlock, enum=PendingBlock, enum=FinalizedBlock, enum=EarliestBlock" mapstructure:"BlockFinality"`
+	// InitialBlockNum is the first block that will be queried when starting the synchronization from scratch.
+	// It should be a number equal or bellow the creation of the bridge contract
+	InitialBlockNum uint64 `mapstructure:"InitialBlockNum"`
+	// BridgeAddr is the address of the bridge smart contract
+	BridgeAddr common.Address `mapstructure:"BridgeAddr"`
+	// SyncBlockChunkSize is the amount of blocks that will be queried to the client on each request
+	SyncBlockChunkSize uint64 `mapstructure:"SyncBlockChunkSize"`
+	// RetryAfterErrorPeriod is the time that will be waited when an unexpected error happens before retry
+	RetryAfterErrorPeriod types.Duration `mapstructure:"RetryAfterErrorPeriod"`
+	// MaxRetryAttemptsAfterError is the maximum number of consecutive attempts that will happen before panicing.
+	// Any number smaller than zero will be considered as unlimited retries
+	MaxRetryAttemptsAfterError int `mapstructure:"MaxRetryAttemptsAfterError"`
+	// WaitForNewBlocksPeriod time that will be waited when the synchronizer has reached the latest block
+	WaitForNewBlocksPeriod types.Duration `mapstructure:"WaitForNewBlocksPeriod"`
+}
diff --git a/bridgesync/downloader.go b/bridgesync/downloader.go
index d2ef4e7c5..9ed031b59 100644
--- a/bridgesync/downloader.go
+++ b/bridgesync/downloader.go
@@ -3,7 +3,6 @@ package bridgesync
 import (
 	"fmt"
 	"math/big"
-	"time"
 
 	"github.com/0xPolygon/cdk-contracts-tooling/contracts/etrog/polygonzkevmbridge"
 	"github.com/0xPolygon/cdk-contracts-tooling/contracts/etrog/polygonzkevmbridgev2"
@@ -15,10 +14,6 @@ import (
 	"github.com/ethereum/go-ethereum/crypto"
 )
 
-const (
-	waitForNewBlocksPeriod = time.Millisecond * 100
-)
-
 var (
 	bridgeEventSignature        = crypto.Keccak256Hash([]byte("BridgeEvent(uint8,uint32,address,uint32,address,uint256,bytes,uint32)"))
 	claimEventSignature         = crypto.Keccak256Hash([]byte("ClaimEvent(uint256,uint32,address,address,uint256)"))
@@ -82,7 +77,7 @@ func buildAppender(client EthClienter, bridge common.Address) (sync.LogAppenderM
 		return nil
 	}
 
-	appender[claimEventSignature] = func(b *sync.EVMBlock, l types.Log) error {
+	appender[claimEventSignaturePreEtrog] = func(b *sync.EVMBlock, l types.Log) error {
 		claim, err := bridgeContractV1.ParseClaimEvent(l)
 		if err != nil {
 			return fmt.Errorf(
diff --git a/bridgesync/e2e_test.go b/bridgesync/e2e_test.go
index b7834fdfd..d733a53eb 100644
--- a/bridgesync/e2e_test.go
+++ b/bridgesync/e2e_test.go
@@ -1,3 +1,121 @@
-package bridgesync
+package bridgesync_test
 
-// TODO: add E2E test, prolly need a mock contract
+import (
+	"context"
+	"fmt"
+	"math/big"
+	"testing"
+	"time"
+
+	"github.com/0xPolygon/cdk-contracts-tooling/contracts/elderberry-paris/polygonzkevmbridgev2"
+	"github.com/0xPolygon/cdk/bridgesync"
+	"github.com/0xPolygon/cdk/etherman"
+	"github.com/0xPolygon/cdk/reorgdetector"
+	"github.com/ethereum/go-ethereum/accounts/abi/bind"
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/core/types"
+	"github.com/ethereum/go-ethereum/crypto"
+	"github.com/ethereum/go-ethereum/ethclient/simulated"
+	"github.com/stretchr/testify/require"
+)
+
+func newSimulatedClient(t *testing.T, auth *bind.TransactOpts) (
+	client *simulated.Backend,
+	bridgeAddr common.Address,
+	bridgeContract *polygonzkevmbridgev2.Polygonzkevmbridgev2,
+) {
+	t.Helper()
+	var err error
+	balance, _ := big.NewInt(0).SetString("10000000000000000000000000", 10) //nolint:gomnd
+	address := auth.From
+	genesisAlloc := map[common.Address]types.Account{
+		address: {
+			Balance: balance,
+		},
+	}
+	blockGasLimit := uint64(999999999999999999) //nolint:gomnd
+	client = simulated.NewBackend(genesisAlloc, simulated.WithBlockGasLimit(blockGasLimit))
+
+	bridgeAddr, _, bridgeContract, err = polygonzkevmbridgev2.DeployPolygonzkevmbridgev2(auth, client.Client())
+	require.NoError(t, err)
+	client.Commit()
+	return
+}
+
+func TestBridgeEventE2E(t *testing.T) {
+	ctx := context.Background()
+	dbPathSyncer := t.TempDir()
+	dbPathReorg := t.TempDir()
+	privateKey, err := crypto.GenerateKey()
+	require.NoError(t, err)
+	auth, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(1337))
+	require.NoError(t, err)
+	client, bridgeAddr, bridgeSc := newSimulatedClient(t, auth)
+	rd, err := reorgdetector.New(ctx, client.Client(), dbPathReorg)
+	require.NoError(t, err)
+	go rd.Start(ctx)
+
+	syncer, err := bridgesync.NewL1(ctx, dbPathSyncer, bridgeAddr, 10, etherman.LatestBlock, rd, client.Client(), 0, time.Millisecond*10, 0, 0)
+	require.NoError(t, err)
+	go syncer.Start(ctx)
+
+	// Send bridge txs
+	expectedBridges := []bridgesync.Bridge{}
+	for i := 0; i < 100; i++ {
+		bridge := bridgesync.Bridge{
+			Amount:             big.NewInt(0),
+			DepositCount:       uint32(i),
+			DestinationNetwork: 3,
+			DestinationAddress: common.HexToAddress("f00"),
+			Metadata:           []byte{},
+		}
+		tx, err := bridgeSc.BridgeAsset(
+			auth,
+			bridge.DestinationNetwork,
+			bridge.DestinationAddress,
+			bridge.Amount,
+			bridge.OriginAddress,
+			false, nil,
+		)
+		require.NoError(t, err)
+		client.Commit()
+		receipt, err := client.Client().TransactionReceipt(ctx, tx.Hash())
+		require.NoError(t, err)
+		require.Equal(t, receipt.Status, types.ReceiptStatusSuccessful)
+		expectedBridges = append(expectedBridges, bridge)
+	}
+
+	// Wait for syncer to catch up
+	syncerUpToDate := false
+	var errMsg string
+	lb, err := client.Client().BlockNumber(ctx)
+	require.NoError(t, err)
+	for i := 0; i < 10; i++ {
+		lpb, err := syncer.GetLastProcessedBlock(ctx)
+		require.NoError(t, err)
+		if lpb == lb {
+			syncerUpToDate = true
+			break
+		}
+		time.Sleep(time.Millisecond * 100)
+		errMsg = fmt.Sprintf("last block from client: %d, last block from syncer: %d", lb, lpb)
+	}
+	require.True(t, syncerUpToDate, errMsg)
+
+	// Get bridges
+	lastBlock, err := client.Client().BlockNumber(ctx)
+	require.NoError(t, err)
+	events, err := syncer.GetClaimsAndBridges(ctx, 0, lastBlock)
+	require.NoError(t, err)
+	actualBridges := []bridgesync.Bridge{}
+	for _, event := range events {
+		if event.Bridge != nil {
+			actualBridges = append(actualBridges, *event.Bridge)
+		}
+	}
+
+	// Assert bridges
+	require.Equal(t, expectedBridges, actualBridges)
+}
+
+// TODO: test claims and claims + bridges combined
diff --git a/bridgesync/processor.go b/bridgesync/processor.go
index 8bdb1167d..b8e15e521 100644
--- a/bridgesync/processor.go
+++ b/bridgesync/processor.go
@@ -8,6 +8,7 @@ import (
 	"math/big"
 
 	dbCommon "github.com/0xPolygon/cdk/common"
+	"github.com/0xPolygon/cdk/log"
 	"github.com/0xPolygon/cdk/sync"
 	"github.com/0xPolygon/cdk/tree"
 	"github.com/ethereum/go-ethereum/common"
@@ -46,9 +47,9 @@ func (b *Bridge) Hash() common.Hash {
 		bigIntSize     = 32
 	)
 	origNet := make([]byte, uint32ByteSize)
-	binary.BigEndian.PutUint32(origNet, uint32(b.OriginNetwork))
+	binary.BigEndian.PutUint32(origNet, b.OriginNetwork)
 	destNet := make([]byte, uint32ByteSize)
-	binary.BigEndian.PutUint32(destNet, uint32(b.DestinationNetwork))
+	binary.BigEndian.PutUint32(destNet, b.DestinationNetwork)
 
 	metaHash := keccak256.Hash(b.Metadata)
 	var buf [bigIntSize]byte
@@ -86,11 +87,13 @@ type processor struct {
 	eventsTable    string
 	lastBlockTable string
 	exitTree       *tree.AppendOnlyTree
+	log            *log.Logger
 }
 
 func newProcessor(ctx context.Context, dbPath, dbPrefix string) (*processor, error) {
 	eventsTable := dbPrefix + eventsTableSufix
 	lastBlockTable := dbPrefix + lastBlockTableSufix
+	logger := log.WithFields("bridge-syncer", dbPrefix)
 	tableCfgFunc := func(defaultBuckets kv.TableCfg) kv.TableCfg {
 		cfg := kv.TableCfg{
 			eventsTable:    {},
@@ -115,6 +118,7 @@ func newProcessor(ctx context.Context, dbPath, dbPrefix string) (*processor, err
 		eventsTable:    eventsTable,
 		lastBlockTable: lastBlockTable,
 		exitTree:       exitTree,
+		log:            logger,
 	}, nil
 }
 
@@ -189,6 +193,7 @@ func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error {
 	if err != nil {
 		return err
 	}
+	defer tx.Rollback()
 	c, err := tx.Cursor(p.eventsTable)
 	if err != nil {
 		return err
@@ -284,6 +289,7 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error {
 		exitTreeRollback()
 		return err
 	}
+	p.log.Debugf("processed %d events until block %d", len(block.Events), block.Num)
 	return nil
 }
 
@@ -291,3 +297,21 @@ func (p *processor) updateLastProcessedBlock(tx kv.RwTx, blockNum uint64) error
 	blockNumBytes := dbCommon.Uint64ToBytes(blockNum)
 	return tx.Put(p.lastBlockTable, lastBlockKey, blockNumBytes)
 }
+
+func GenerateGlobalIndex(mainnetFlag bool, rollupIndex uint32, localExitRootIndex uint32) *big.Int {
+	var (
+		globalIndexBytes []byte
+		buf              [4]byte
+	)
+	if mainnetFlag {
+		globalIndexBytes = append(globalIndexBytes, big.NewInt(1).Bytes()...)
+		ri := big.NewInt(0).FillBytes(buf[:])
+		globalIndexBytes = append(globalIndexBytes, ri...)
+	} else {
+		ri := big.NewInt(0).SetUint64(uint64(rollupIndex)).FillBytes(buf[:])
+		globalIndexBytes = append(globalIndexBytes, ri...)
+	}
+	leri := big.NewInt(0).SetUint64(uint64(localExitRootIndex)).FillBytes(buf[:])
+	globalIndexBytes = append(globalIndexBytes, leri...)
+	return big.NewInt(0).SetBytes(globalIndexBytes)
+}
diff --git a/claimsponsor/claimsponsor.go b/claimsponsor/claimsponsor.go
new file mode 100644
index 000000000..e0d8e7b83
--- /dev/null
+++ b/claimsponsor/claimsponsor.go
@@ -0,0 +1,356 @@
+package claimsponsor
+
+import (
+	"context"
+	"encoding/json"
+	"errors"
+	"math"
+	"math/big"
+	"time"
+
+	dbCommon "github.com/0xPolygon/cdk/common"
+	"github.com/0xPolygon/cdk/log"
+	"github.com/0xPolygon/cdk/sync"
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ledgerwatch/erigon-lib/kv"
+	"github.com/ledgerwatch/erigon-lib/kv/iter"
+	"github.com/ledgerwatch/erigon-lib/kv/mdbx"
+)
+
+type ClaimStatus string
+
+const (
+	PendingClaimStatus = "pending"
+	WIPStatus          = "work in progress"
+	SuccessClaimStatus = "success"
+	FailedClaimStatus  = "failed"
+
+	claimTable = "claimsponsor-tx"
+	queueTable = "claimsponsor-queue"
+)
+
+var (
+	ErrInvalidClaim = errors.New("invalid claim")
+	ErrNotFound     = errors.New("not found")
+)
+
+// Claim representation of a claim event
+type Claim struct {
+	LeafType            uint8
+	ProofLocalExitRoot  [32]common.Hash
+	ProofRollupExitRoot [32]common.Hash
+	GlobalIndex         *big.Int
+	MainnetExitRoot     common.Hash
+	RollupExitRoot      common.Hash
+	OriginNetwork       uint32
+	OriginTokenAddress  common.Address
+	DestinationNetwork  uint32
+	DestinationAddress  common.Address
+	Amount              *big.Int
+	Metadata            []byte
+
+	Status ClaimStatus
+	TxID   string
+}
+
+func (c *Claim) Key() []byte {
+	return c.GlobalIndex.Bytes()
+}
+
+type ClaimSender interface {
+	checkClaim(ctx context.Context, claim *Claim) error
+	sendClaim(ctx context.Context, claim *Claim) (string, error)
+	claimStatus(ctx context.Context, id string) (ClaimStatus, error)
+}
+
+type ClaimSponsor struct {
+	db                    kv.RwDB
+	sender                ClaimSender
+	rh                    *sync.RetryHandler
+	waitTxToBeMinedPeriod time.Duration
+	waitOnEmptyQueue      time.Duration
+}
+
+func newClaimSponsor(
+	dbPath string,
+	sender ClaimSender,
+	retryAfterErrorPeriod time.Duration,
+	maxRetryAttemptsAfterError int,
+	waitTxToBeMinedPeriod time.Duration,
+	waitOnEmptyQueue time.Duration,
+) (*ClaimSponsor, error) {
+	tableCfgFunc := func(defaultBuckets kv.TableCfg) kv.TableCfg {
+		cfg := kv.TableCfg{
+			claimTable: {},
+			queueTable: {},
+		}
+		return cfg
+	}
+	db, err := mdbx.NewMDBX(nil).
+		Path(dbPath).
+		WithTableCfg(tableCfgFunc).
+		Open()
+	if err != nil {
+		return nil, err
+	}
+	rh := &sync.RetryHandler{
+		MaxRetryAttemptsAfterError: maxRetryAttemptsAfterError,
+		RetryAfterErrorPeriod:      retryAfterErrorPeriod,
+	}
+	return &ClaimSponsor{
+		db:                    db,
+		sender:                sender,
+		rh:                    rh,
+		waitTxToBeMinedPeriod: waitTxToBeMinedPeriod,
+		waitOnEmptyQueue:      waitOnEmptyQueue,
+	}, nil
+}
+
+func (c *ClaimSponsor) Start(ctx context.Context) {
+	var (
+		attempts int
+		err      error
+	)
+	for {
+		if err != nil {
+			attempts++
+			c.rh.Handle("claimsponsor main loop", attempts)
+		}
+		tx, err2 := c.db.BeginRw(ctx)
+		if err2 != nil {
+			err = err2
+			log.Errorf("error calling BeginRw: %v", err)
+			continue
+		}
+		queueIndex, globalIndex, err2 := getFirstQueueIndex(tx)
+		if err2 != nil {
+			err = err2
+			tx.Rollback()
+			if err == ErrNotFound {
+				log.Debugf("queue is empty")
+				err = nil
+				time.Sleep(c.waitOnEmptyQueue)
+				continue
+			}
+			log.Errorf("error calling getFirstQueueIndex: %v", err)
+			continue
+		}
+		claim, err2 := getClaim(tx, globalIndex)
+		if err2 != nil {
+			err = err2
+			tx.Rollback()
+			log.Errorf("error calling getClaim with globalIndex %s: %v", globalIndex.String(), err)
+			continue
+		}
+		if claim.TxID == "" {
+			txID, err2 := c.sender.sendClaim(ctx, claim)
+			if err2 != nil {
+				err = err2
+				tx.Rollback()
+				log.Errorf("error calling sendClaim with globalIndex %s: %v", globalIndex.String(), err)
+				continue
+			}
+			claim.TxID = txID
+			claim.Status = WIPStatus
+			err2 = putClaim(tx, claim)
+			if err2 != nil {
+				err = err2
+				tx.Rollback()
+				log.Errorf("error calling putClaim with globalIndex %s: %v", globalIndex.String(), err)
+				continue
+			}
+		}
+		err2 = tx.Commit()
+		if err2 != nil {
+			err = err2
+			log.Errorf("error calling tx.Commit after putting claim: %v", err)
+			continue
+		}
+
+		log.Infof("waiting for tx %s with global index %s to succeed or fail", claim.TxID, globalIndex.String())
+		status, err2 := c.waitTxToBeSuccessOrFail(ctx, claim.TxID)
+		if err2 != nil {
+			err = err2
+			log.Errorf("error calling waitTxToBeSuccessOrFail for tx %s: %v", claim.TxID, err)
+			continue
+		}
+		log.Infof("tx %s with global index %s concluded with status: %s", claim.TxID, globalIndex.String(), status)
+		tx, err2 = c.db.BeginRw(ctx)
+		if err2 != nil {
+			err = err2
+			log.Errorf("error calling BeginRw: %v", err)
+			continue
+		}
+		claim.Status = status
+		err2 = putClaim(tx, claim)
+		if err2 != nil {
+			err = err2
+			tx.Rollback()
+			log.Errorf("error calling putClaim with globalIndex %s: %v", globalIndex.String(), err)
+			continue
+		}
+		err2 = tx.Delete(queueTable, dbCommon.Uint64ToBytes(queueIndex))
+		if err2 != nil {
+			err = err2
+			tx.Rollback()
+			log.Errorf("error calling delete on the queue table with index %d: %v", queueIndex, err)
+			continue
+		}
+		err2 = tx.Commit()
+		if err2 != nil {
+			err = err2
+			log.Errorf("error calling tx.Commit after putting claim: %v", err)
+			continue
+		}
+
+		attempts = 0
+	}
+}
+
+func (c *ClaimSponsor) waitTxToBeSuccessOrFail(ctx context.Context, txID string) (ClaimStatus, error) {
+	t := time.NewTicker(c.waitTxToBeMinedPeriod)
+	for {
+		select {
+		case <-ctx.Done():
+			return "", errors.New("context cancelled")
+		case <-t.C:
+			status, err := c.sender.claimStatus(ctx, txID)
+			if err != nil {
+				return "", err
+			}
+			if status == FailedClaimStatus || status == SuccessClaimStatus {
+				return status, nil
+			}
+		}
+	}
+}
+
+func (c *ClaimSponsor) AddClaimToQueue(ctx context.Context, claim *Claim) error {
+	if claim.GlobalIndex == nil {
+		return ErrInvalidClaim
+	}
+	claim.Status = PendingClaimStatus
+	tx, err := c.db.BeginRw(ctx)
+	if err != nil {
+		return err
+	}
+
+	_, err = getClaim(tx, claim.GlobalIndex)
+	if err != ErrNotFound {
+		if err != nil {
+			tx.Rollback()
+			return err
+		} else {
+			tx.Rollback()
+			return errors.New("claim already added")
+		}
+	}
+
+	err = putClaim(tx, claim)
+	if err != nil {
+		tx.Rollback()
+		return err
+	}
+
+	var queuePosition uint64
+	lastQueuePosition, _, err := getLastQueueIndex(tx)
+	if err == ErrNotFound {
+		queuePosition = 0
+	} else if err != nil {
+		tx.Rollback()
+		return err
+	} else {
+		queuePosition = lastQueuePosition + 1
+	}
+	err = tx.Put(queueTable, dbCommon.Uint64ToBytes(queuePosition), claim.Key())
+	if err != nil {
+		tx.Rollback()
+		return err
+	}
+
+	return tx.Commit()
+}
+
+func putClaim(tx kv.RwTx, claim *Claim) error {
+	value, err := json.Marshal(claim)
+	if err != nil {
+		return err
+	}
+	return tx.Put(claimTable, claim.Key(), value)
+}
+
+func (c *ClaimSponsor) getClaimByQueueIndex(ctx context.Context, queueIndex uint64) (*Claim, error) {
+	tx, err := c.db.BeginRo(ctx)
+	if err != nil {
+		return nil, err
+	}
+	defer tx.Rollback()
+
+	globalIndexBytes, err := tx.GetOne(queueTable, dbCommon.Uint64ToBytes(queueIndex))
+	if err != nil {
+		return nil, err
+	}
+	if globalIndexBytes == nil {
+		return nil, ErrNotFound
+	}
+
+	return getClaim(tx, new(big.Int).SetBytes(globalIndexBytes))
+}
+
+func getLastQueueIndex(tx kv.Tx) (uint64, *big.Int, error) {
+	iter, err := tx.RangeDescend(
+		queueTable,
+		dbCommon.Uint64ToBytes(math.MaxUint64),
+		dbCommon.Uint64ToBytes(0), 1,
+	)
+	if err != nil {
+		return 0, nil, err
+	}
+	return getIndex(iter)
+}
+
+func getFirstQueueIndex(tx kv.Tx) (uint64, *big.Int, error) {
+	iter, err := tx.RangeAscend(
+		queueTable,
+		dbCommon.Uint64ToBytes(0),
+		nil, 1,
+	)
+	if err != nil {
+		return 0, nil, err
+	}
+	return getIndex(iter)
+}
+
+func getIndex(iter iter.KV) (uint64, *big.Int, error) {
+	k, v, err := iter.Next()
+	if err != nil {
+		return 0, nil, err
+	}
+	if k == nil {
+		return 0, nil, ErrNotFound
+	}
+	globalIndex := new(big.Int).SetBytes(v)
+	return dbCommon.BytesToUint64(k), globalIndex, nil
+}
+
+func (c *ClaimSponsor) GetClaim(ctx context.Context, globalIndex *big.Int) (*Claim, error) {
+	tx, err := c.db.BeginRo(ctx)
+	if err != nil {
+		return nil, err
+	}
+	defer tx.Rollback()
+	return getClaim(tx, globalIndex)
+}
+
+func getClaim(tx kv.Tx, globalIndex *big.Int) (*Claim, error) {
+	claimBytes, err := tx.GetOne(claimTable, globalIndex.Bytes())
+	if err != nil {
+		return nil, err
+	}
+	if claimBytes == nil {
+		return nil, ErrNotFound
+	}
+	claim := &Claim{}
+	err = json.Unmarshal(claimBytes, claim)
+	return claim, err
+}
diff --git a/claimsponsor/e2e_test.go b/claimsponsor/e2e_test.go
new file mode 100644
index 000000000..533de1137
--- /dev/null
+++ b/claimsponsor/e2e_test.go
@@ -0,0 +1,105 @@
+package claimsponsor_test
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"math/big"
+	"testing"
+	"time"
+
+	"github.com/0xPolygon/cdk/bridgesync"
+	"github.com/0xPolygon/cdk/claimsponsor"
+	"github.com/0xPolygon/cdk/etherman"
+	"github.com/0xPolygon/cdk/test/helpers"
+	"github.com/ethereum/go-ethereum/accounts/abi/bind"
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/stretchr/testify/require"
+)
+
+func TestE2EL1toEVML2(t *testing.T) {
+	// start other needed components
+	ctx := context.Background()
+	env := helpers.SetupAggoracleWithEVMChain(t)
+	dbPathBridgeSyncL1 := t.TempDir()
+	bridgeSyncL1, err := bridgesync.NewL1(ctx, dbPathBridgeSyncL1, env.BridgeL1Addr, 10, etherman.LatestBlock, env.ReorgDetector, env.L1Client.Client(), 0, time.Millisecond*10, 0, 0)
+	require.NoError(t, err)
+	go bridgeSyncL1.Start(ctx)
+
+	// start claim sponsor
+	dbPathClaimSponsor := t.TempDir()
+	claimer, err := claimsponsor.NewEVMClaimSponsor(
+		dbPathClaimSponsor,
+		env.L2Client.Client(),
+		env.BridgeL2Addr,
+		env.AuthL2.From,
+		200_000,
+		0,
+		env.EthTxManMockL2,
+		0, 0, time.Millisecond*10, time.Millisecond*10,
+	)
+	require.NoError(t, err)
+	go claimer.Start(ctx)
+
+	// test
+	for i := 0; i < 3; i++ {
+		// Send bridges to L2, wait for GER to be injected on L2
+		amount := big.NewInt(int64(i) + 1)
+		env.AuthL1.Value = amount
+		_, err := env.BridgeL1Contract.BridgeAsset(env.AuthL1, env.NetworkIDL2, env.AuthL2.From, amount, common.Address{}, true, nil)
+		require.NoError(t, err)
+		env.L1Client.Commit()
+		time.Sleep(time.Millisecond * 300)
+		expectedGER, err := env.GERL1Contract.GetLastGlobalExitRoot(&bind.CallOpts{Pending: false})
+		require.NoError(t, err)
+		isInjected, err := env.AggOracleSender.IsGERAlreadyInjected(expectedGER)
+		require.NoError(t, err)
+		require.True(t, isInjected, fmt.Sprintf("iteration %d, GER: %s", i, common.Bytes2Hex(expectedGER[:])))
+
+		// Build MP using bridgeSyncL1 & env.L1InfoTreeSync
+		info, err := env.L1InfoTreeSync.GetInfoByIndex(ctx, uint32(i))
+		require.NoError(t, err)
+		localProof, err := bridgeSyncL1.GetProof(ctx, uint32(i), info.MainnetExitRoot)
+		require.NoError(t, err)
+		rollupProof, err := env.L1InfoTreeSync.GetRollupExitTreeMerkleProof(ctx, 0, common.Hash{})
+		require.NoError(t, err)
+
+		// Request to sponsor claim
+		globalIndex := bridgesync.GenerateGlobalIndex(true, 0, uint32(i))
+		err = claimer.AddClaimToQueue(ctx, &claimsponsor.Claim{
+			LeafType:            0,
+			ProofLocalExitRoot:  localProof,
+			ProofRollupExitRoot: rollupProof,
+			GlobalIndex:         globalIndex,
+			MainnetExitRoot:     info.MainnetExitRoot,
+			RollupExitRoot:      info.RollupExitRoot,
+			OriginNetwork:       0,
+			OriginTokenAddress:  common.Address{},
+			DestinationNetwork:  env.NetworkIDL2,
+			DestinationAddress:  env.AuthL2.From,
+			Amount:              amount,
+			Metadata:            nil,
+		})
+		require.NoError(t, err)
+
+		// Wait until success
+		succeed := false
+		for i := 0; i < 10; i++ {
+			claim, err := claimer.GetClaim(ctx, globalIndex)
+			require.NoError(t, err)
+			if claim.Status == claimsponsor.FailedClaimStatus {
+				require.NoError(t, errors.New("claim failed"))
+			} else if claim.Status == claimsponsor.SuccessClaimStatus {
+				succeed = true
+				break
+			}
+			time.Sleep(100 * time.Millisecond)
+		}
+		require.True(t, succeed)
+
+		// Check on contract that is claimed
+		isClaimed, err := env.BridgeL2Contract.IsClaimed(&bind.CallOpts{Pending: false}, uint32(i), 0)
+		require.NoError(t, err)
+		require.True(t, isClaimed)
+	}
+}
diff --git a/claimsponsor/evmclaimsponsor.go b/claimsponsor/evmclaimsponsor.go
new file mode 100644
index 000000000..e7b94b200
--- /dev/null
+++ b/claimsponsor/evmclaimsponsor.go
@@ -0,0 +1,209 @@
+package claimsponsor
+
+import (
+	"context"
+	"fmt"
+	"math/big"
+	"time"
+
+	"github.com/0xPolygon/cdk-contracts-tooling/contracts/etrog/polygonzkevmbridgev2"
+	configTypes "github.com/0xPolygon/cdk/config/types"
+	"github.com/0xPolygonHermez/zkevm-ethtx-manager/ethtxmanager"
+	"github.com/ethereum/go-ethereum"
+	"github.com/ethereum/go-ethereum/accounts/abi"
+	"github.com/ethereum/go-ethereum/accounts/abi/bind"
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/core/types"
+)
+
+const (
+	// LeafTypeAsset represents a bridge asset
+	LeafTypeAsset uint8 = 0
+	// LeafTypeMessage represents a bridge message
+	LeafTypeMessage       uint8 = 1
+	gasTooHighErrTemplate       = "Claim tx estimated to consume more gas than the maximum allowed by the service. Estimated %d, maximum allowed: %d"
+)
+
+type EthClienter interface {
+	ethereum.GasEstimator
+	bind.ContractBackend
+}
+
+type EthTxManager interface {
+	Remove(ctx context.Context, id common.Hash) error
+	ResultsByStatus(ctx context.Context, statuses []ethtxmanager.MonitoredTxStatus) ([]ethtxmanager.MonitoredTxResult, error)
+	Result(ctx context.Context, id common.Hash) (ethtxmanager.MonitoredTxResult, error)
+	Add(ctx context.Context, to *common.Address, forcedNonce *uint64, value *big.Int, data []byte, gasOffset uint64, sidecar *types.BlobTxSidecar) (common.Hash, error)
+}
+
+type EVMClaimSponsor struct {
+	*ClaimSponsor
+	l2Client       EthClienter
+	bridgeABI      *abi.ABI
+	bridgeAddr     common.Address
+	bridgeContract *polygonzkevmbridgev2.Polygonzkevmbridgev2
+	ethTxManager   EthTxManager
+	sender         common.Address
+	gasOffest      uint64
+	maxGas         uint64
+}
+
+type EVMClaimSponsorConfig struct {
+	// DBPath path of the DB
+	DBPath string `mapstructure:"DBPath"`
+	// Enabled indicates if the sponsor should be run or not
+	Enabled bool `mapstructure:"Enabled"`
+	// SenderAddr is the address that will be used to send the claim txs
+	SenderAddr common.Address `mapstructure:"SenderAddr"`
+	// BridgeAddrL2 is the address of the bridge smart contract on L2
+	BridgeAddrL2 common.Address `mapstructure:"BridgeAddrL2"`
+	// MaxGas is the max gas (limit) allowed for a claim to be sponsored
+	MaxGas uint64 `mapstructure:"MaxGas"`
+	// RetryAfterErrorPeriod is the time that will be waited when an unexpected error happens before retry
+	RetryAfterErrorPeriod configTypes.Duration `mapstructure:"RetryAfterErrorPeriod"`
+	// MaxRetryAttemptsAfterError is the maximum number of consecutive attempts that will happen before panicing.
+	// Any number smaller than zero will be considered as unlimited retries
+	MaxRetryAttemptsAfterError int `mapstructure:"MaxRetryAttemptsAfterError"`
+	// WaitTxToBeMinedPeriod is the period that will be used to ask if a given tx has been mined (or failed)
+	WaitTxToBeMinedPeriod configTypes.Duration `mapstructure:"WaitTxToBeMinedPeriod"`
+	// WaitOnEmptyQueue is the time that will be waited before trying to send the next claim of the queue
+	// if the queue is empty
+	WaitOnEmptyQueue configTypes.Duration `mapstructure:"WaitOnEmptyQueue"`
+	// EthTxManager is the configuration of the EthTxManager to be used by the claim sponsor
+	EthTxManager ethtxmanager.Config `mapstructure:"EthTxManager"`
+	// GasOffset is the gas to add on top of the estimated gas when sending the claim txs
+	GasOffset uint64 `mapstructure:"GasOffset"`
+}
+
+func NewEVMClaimSponsor(
+	dbPath string,
+	l2Client EthClienter,
+	bridge common.Address,
+	sender common.Address,
+	maxGas, gasOffset uint64,
+	ethTxManager EthTxManager,
+	retryAfterErrorPeriod time.Duration,
+	maxRetryAttemptsAfterError int,
+	waitTxToBeMinedPeriod time.Duration,
+	waitOnEmptyQueue time.Duration,
+) (*ClaimSponsor, error) {
+	contract, err := polygonzkevmbridgev2.NewPolygonzkevmbridgev2(bridge, l2Client)
+	if err != nil {
+		return nil, err
+	}
+	abi, err := polygonzkevmbridgev2.Polygonzkevmbridgev2MetaData.GetAbi()
+	if err != nil {
+		return nil, err
+	}
+	evmSponsor := &EVMClaimSponsor{
+		l2Client:       l2Client,
+		bridgeABI:      abi,
+		bridgeAddr:     bridge,
+		bridgeContract: contract,
+		sender:         sender,
+		gasOffest:      gasOffset,
+		maxGas:         maxGas,
+		ethTxManager:   ethTxManager,
+	}
+	baseSponsor, err := newClaimSponsor(
+		dbPath,
+		evmSponsor,
+		retryAfterErrorPeriod,
+		maxRetryAttemptsAfterError,
+		waitTxToBeMinedPeriod,
+		waitOnEmptyQueue,
+	)
+	if err != nil {
+		return nil, err
+	}
+	evmSponsor.ClaimSponsor = baseSponsor
+	return baseSponsor, nil
+}
+
+func (c *EVMClaimSponsor) checkClaim(ctx context.Context, claim *Claim) error {
+	data, err := c.buildClaimTxData(claim)
+	if err != nil {
+		return err
+	}
+	gas, err := c.l2Client.EstimateGas(ctx, ethereum.CallMsg{
+		From: c.sender,
+		To:   &c.bridgeAddr,
+		Data: data,
+	})
+	if err != nil {
+		return err
+	}
+	if gas > c.maxGas {
+		return fmt.Errorf(gasTooHighErrTemplate, gas, c.maxGas)
+	}
+	return nil
+}
+
+func (c *EVMClaimSponsor) sendClaim(ctx context.Context, claim *Claim) (string, error) {
+	data, err := c.buildClaimTxData(claim)
+	if err != nil {
+		return "", err
+	}
+	id, err := c.ethTxManager.Add(ctx, &c.bridgeAddr, nil, big.NewInt(0), data, c.gasOffest, nil)
+	if err != nil {
+		return "", err
+	}
+	return id.Hex(), nil
+}
+
+func (c *EVMClaimSponsor) claimStatus(ctx context.Context, id string) (ClaimStatus, error) {
+	res, err := c.ethTxManager.Result(ctx, common.HexToHash(id))
+	if err != nil {
+		return "", err
+	}
+	switch res.Status {
+	case ethtxmanager.MonitoredTxStatusCreated,
+		ethtxmanager.MonitoredTxStatusSent:
+		return WIPStatus, nil
+	case ethtxmanager.MonitoredTxStatusFailed:
+		return FailedClaimStatus, nil
+	case ethtxmanager.MonitoredTxStatusMined,
+		ethtxmanager.MonitoredTxStatusSafe,
+		ethtxmanager.MonitoredTxStatusFinalized:
+		return SuccessClaimStatus, nil
+	default:
+		return "", fmt.Errorf("unexpected tx status: %v", res.Status)
+	}
+}
+
+func (c *EVMClaimSponsor) buildClaimTxData(claim *Claim) ([]byte, error) {
+	switch claim.LeafType {
+	case LeafTypeAsset:
+		return c.bridgeABI.Pack(
+			"claimAsset",
+			claim.ProofLocalExitRoot,  // bytes32[32] smtProofLocalExitRoot
+			claim.ProofRollupExitRoot, // bytes32[32] smtProofRollupExitRoot
+			claim.GlobalIndex,         // uint256 globalIndex
+			claim.MainnetExitRoot,     // bytes32 mainnetExitRoot
+			claim.RollupExitRoot,      // bytes32 rollupExitRoot
+			claim.OriginNetwork,       // uint32 originNetwork
+			claim.OriginTokenAddress,  // address originTokenAddress,
+			claim.DestinationNetwork,  // uint32 destinationNetwork
+			claim.DestinationAddress,  // address destinationAddress
+			claim.Amount,              // uint256 amount
+			claim.Metadata,            // bytes metadata
+		)
+	case LeafTypeMessage:
+		return c.bridgeABI.Pack(
+			"claimMessage",
+			claim.ProofLocalExitRoot,  // bytes32[32] smtProofLocalExitRoot
+			claim.ProofRollupExitRoot, // bytes32[32] smtProofRollupExitRoot
+			claim.GlobalIndex,         // uint256 globalIndex
+			claim.MainnetExitRoot,     // bytes32 mainnetExitRoot
+			claim.RollupExitRoot,      // bytes32 rollupExitRoot
+			claim.OriginNetwork,       // uint32 originNetwork
+			claim.OriginTokenAddress,  // address originTokenAddress,
+			claim.DestinationNetwork,  // uint32 destinationNetwork
+			claim.DestinationAddress,  // address destinationAddress
+			claim.Amount,              // uint256 amount
+			claim.Metadata,            // bytes metadata
+		)
+	default:
+		return nil, fmt.Errorf("unexpected leaf type %d", claim.LeafType)
+	}
+}
diff --git a/cmd/main.go b/cmd/main.go
index a13f43e18..4686902f9 100644
--- a/cmd/main.go
+++ b/cmd/main.go
@@ -18,6 +18,8 @@ const (
 	AGGREGATOR = "aggregator"
 	// AGGORACLE name to identify the aggoracle component
 	AGGORACLE = "aggoracle"
+	// RPC name to identify the rpc component
+	RPC = "rpc"
 )
 
 const (
@@ -49,7 +51,7 @@ var (
 		Aliases:  []string{"co"},
 		Usage:    "List of components to run",
 		Required: false,
-		Value:    cli.NewStringSlice(SEQUENCE_SENDER, AGGREGATOR, AGGORACLE),
+		Value:    cli.NewStringSlice(SEQUENCE_SENDER, AGGREGATOR, AGGORACLE, RPC),
 	}
 )
 
diff --git a/cmd/run.go b/cmd/run.go
index 0f2021f80..c17c46767 100644
--- a/cmd/run.go
+++ b/cmd/run.go
@@ -11,19 +11,25 @@ import (
 
 	zkevm "github.com/0xPolygon/cdk"
 	dataCommitteeClient "github.com/0xPolygon/cdk-data-availability/client"
+	jRPC "github.com/0xPolygon/cdk-rpc/rpc"
 	"github.com/0xPolygon/cdk/aggoracle"
 	"github.com/0xPolygon/cdk/aggoracle/chaingersender"
 	"github.com/0xPolygon/cdk/aggregator"
 	"github.com/0xPolygon/cdk/aggregator/db"
+	"github.com/0xPolygon/cdk/bridgesync"
+	"github.com/0xPolygon/cdk/claimsponsor"
 	"github.com/0xPolygon/cdk/config"
 	"github.com/0xPolygon/cdk/dataavailability"
 	"github.com/0xPolygon/cdk/dataavailability/datacommittee"
 	"github.com/0xPolygon/cdk/etherman"
 	ethermanconfig "github.com/0xPolygon/cdk/etherman/config"
 	"github.com/0xPolygon/cdk/etherman/contracts"
+	"github.com/0xPolygon/cdk/l1bridge2infoindexsync"
 	"github.com/0xPolygon/cdk/l1infotreesync"
+	"github.com/0xPolygon/cdk/lastgersync"
 	"github.com/0xPolygon/cdk/log"
 	"github.com/0xPolygon/cdk/reorgdetector"
+	"github.com/0xPolygon/cdk/rpc"
 	"github.com/0xPolygon/cdk/sequencesender"
 	"github.com/0xPolygon/cdk/sequencesender/txbuilder"
 	"github.com/0xPolygon/cdk/state"
@@ -54,9 +60,17 @@ func start(cliCtx *cli.Context) error {
 	}
 
 	components := cliCtx.StringSlice(config.FlagComponents)
-	l1Client := runL1ClientIfNeeded(components, c.SequenceSender.EthTxManager.Etherman.URL)
+	l1Client := runL1ClientIfNeeded(components, c.Etherman.URL)
+	l2Client := runL2ClientIfNeeded(components, c.AggOracle.EVMSender.URLRPCL2)
 	reorgDetectorL1 := runReorgDetectorL1IfNeeded(cliCtx.Context, components, l1Client, c.ReorgDetectorL1.DBPath)
+	reorgDetectorL2 := runReorgDetectorL2IfNeeded(cliCtx.Context, components, l2Client, c.ReorgDetectorL2.DBPath)
 	l1InfoTreeSync := runL1InfoTreeSyncerIfNeeded(cliCtx.Context, components, *c, l1Client, reorgDetectorL1)
+	claimSponsor := runClaimSponsorIfNeeded(cliCtx.Context, components, l2Client, c.ClaimSponsor)
+	l1BridgeSync := runBridgeSyncL1IfNeeded(cliCtx.Context, components, c.BridgeL1Sync, reorgDetectorL1, l1Client)
+	l2BridgeSync := runBridgeSyncL2IfNeeded(cliCtx.Context, components, c.BridgeL2Sync, reorgDetectorL2, l2Client)
+	l1Bridge2InfoIndexSync := runL1Bridge2InfoIndexSyncIfNeeded(cliCtx.Context, components, c.L1Bridge2InfoIndexSync, l1BridgeSync, l1InfoTreeSync, l1Client)
+	lastGERSync := runLastGERSyncIfNeeded(cliCtx.Context, components, c.LastGERSync, reorgDetectorL2, l2Client, l1InfoTreeSync)
+
 	for _, component := range components {
 		switch component {
 		case SEQUENCE_SENDER:
@@ -74,8 +88,24 @@ func start(cliCtx *cli.Context) error {
 				}
 			}()
 		case AGGORACLE:
-			aggOracle := createAggoracle(*c, l1Client, l1InfoTreeSync)
+			aggOracle := createAggoracle(*c, l1Client, l2Client, l1InfoTreeSync)
 			go aggOracle.Start(cliCtx.Context)
+		case RPC:
+			server := createRPC(
+				c.RPC,
+				c.Common.NetworkID,
+				claimSponsor,
+				l1InfoTreeSync,
+				l1Bridge2InfoIndexSync,
+				lastGERSync,
+				l1BridgeSync,
+				l2BridgeSync,
+			)
+			go func() {
+				if err := server.Start(); err != nil {
+					log.Fatal(err)
+				}
+			}()
 		}
 	}
 
@@ -222,7 +252,7 @@ func newTxBuilder(
 	return txBuilder, err
 }
 
-func createAggoracle(cfg config.Config, l1Client *ethclient.Client, syncer *l1infotreesync.L1InfoTreeSync) *aggoracle.AggOracle {
+func createAggoracle(cfg config.Config, l1Client, l2Client *ethclient.Client, syncer *l1infotreesync.L1InfoTreeSync) *aggoracle.AggOracle {
 	var sender aggoracle.ChainSender
 	switch cfg.AggOracle.TargetChainType {
 	case aggoracle.EVMChain:
@@ -236,14 +266,10 @@ func createAggoracle(cfg config.Config, l1Client *ethclient.Client, syncer *l1in
 			log.Fatal(err)
 		}
 		go ethTxManager.Start()
-		l2CLient, err := ethclient.Dial(cfg.AggOracle.EVMSender.URLRPCL2)
-		if err != nil {
-			log.Fatal(err)
-		}
 		sender, err = chaingersender.NewEVMChainGERSender(
 			cfg.AggOracle.EVMSender.GlobalExitRootL2Addr,
 			cfg.AggOracle.EVMSender.SenderAddr,
-			l2CLient,
+			l2Client,
 			ethTxManager,
 			cfg.AggOracle.EVMSender.GasOffset,
 			cfg.AggOracle.EVMSender.WaitPeriodMonitorTx.Duration,
@@ -381,44 +407,18 @@ func newState(c *config.Config, l2ChainID uint64, sqlDB *pgxpool.Pool) *state.St
 	return st
 }
 
-func newReorgDetectorL1(
+func newReorgDetector(
 	ctx context.Context,
-	cfg config.Config,
-	l1Client *ethclient.Client,
+	dbPath string,
+	client *ethclient.Client,
 ) *reorgdetector.ReorgDetector {
-	rd, err := reorgdetector.New(ctx, l1Client, cfg.ReorgDetectorL1.DBPath)
+	rd, err := reorgdetector.New(ctx, client, dbPath)
 	if err != nil {
 		log.Fatal(err)
 	}
 	return rd
 }
 
-func newL1InfoTreeSyncer(
-	ctx context.Context,
-	cfg config.Config,
-	l1Client *ethclient.Client,
-	reorgDetector *reorgdetector.ReorgDetector,
-) *l1infotreesync.L1InfoTreeSync {
-	syncer, err := l1infotreesync.New(
-		ctx,
-		cfg.L1InfoTreeSync.DBPath,
-		cfg.L1InfoTreeSync.GlobalExitRootAddr,
-		cfg.L1InfoTreeSync.RollupManagerAddr,
-		cfg.L1InfoTreeSync.SyncBlockChunkSize,
-		etherman.BlockNumberFinality(cfg.L1InfoTreeSync.BlockFinality),
-		reorgDetector,
-		l1Client,
-		cfg.L1InfoTreeSync.WaitForNewBlocksPeriod.Duration,
-		cfg.L1InfoTreeSync.InitialBlock,
-		cfg.L1InfoTreeSync.RetryAfterErrorPeriod.Duration,
-		cfg.L1InfoTreeSync.MaxRetryAttemptsAfterError,
-	)
-	if err != nil {
-		log.Fatal(err)
-	}
-	return syncer
-}
-
 func isNeeded(casesWhereNeeded, actualCases []string) bool {
 	for _, actaulCase := range actualCases {
 		for _, caseWhereNeeded := range casesWhereNeeded {
@@ -437,7 +437,7 @@ func runL1InfoTreeSyncerIfNeeded(
 	l1Client *ethclient.Client,
 	reorgDetector *reorgdetector.ReorgDetector,
 ) *l1infotreesync.L1InfoTreeSync {
-	if !isNeeded([]string{AGGORACLE, SEQUENCE_SENDER}, components) {
+	if !isNeeded([]string{AGGORACLE, RPC}, components) {
 		return nil
 	}
 	l1InfoTreeSync, err := l1infotreesync.New(
@@ -462,7 +462,7 @@ func runL1InfoTreeSyncerIfNeeded(
 }
 
 func runL1ClientIfNeeded(components []string, urlRPCL1 string) *ethclient.Client {
-	if !isNeeded([]string{SEQUENCE_SENDER, AGGREGATOR, AGGORACLE}, components) {
+	if !isNeeded([]string{SEQUENCE_SENDER, AGGREGATOR, AGGORACLE, RPC}, components) {
 		return nil
 	}
 	log.Debugf("dialing L1 client at: %s", urlRPCL1)
@@ -473,8 +473,20 @@ func runL1ClientIfNeeded(components []string, urlRPCL1 string) *ethclient.Client
 	return l1CLient
 }
 
+func runL2ClientIfNeeded(components []string, urlRPCL2 string) *ethclient.Client {
+	if !isNeeded([]string{AGGORACLE, RPC}, components) {
+		return nil
+	}
+	log.Debugf("dialing L2 client at: %s", urlRPCL2)
+	l2CLient, err := ethclient.Dial(urlRPCL2)
+	if err != nil {
+		log.Fatal(err)
+	}
+	return l2CLient
+}
+
 func runReorgDetectorL1IfNeeded(ctx context.Context, components []string, l1Client *ethclient.Client, dbPath string) *reorgdetector.ReorgDetector {
-	if !isNeeded([]string{SEQUENCE_SENDER, AGGREGATOR, AGGORACLE}, components) {
+	if !isNeeded([]string{SEQUENCE_SENDER, AGGREGATOR, AGGORACLE, RPC}, components) {
 		return nil
 	}
 	rd := newReorgDetector(ctx, dbPath, l1Client)
@@ -482,14 +494,194 @@ func runReorgDetectorL1IfNeeded(ctx context.Context, components []string, l1Clie
 	return rd
 }
 
-func newReorgDetector(
+func runReorgDetectorL2IfNeeded(ctx context.Context, components []string, l2Client *ethclient.Client, dbPath string) *reorgdetector.ReorgDetector {
+	if !isNeeded([]string{AGGORACLE, RPC}, components) {
+		return nil
+	}
+	rd := newReorgDetector(ctx, dbPath, l2Client)
+	go rd.Start(ctx)
+	return rd
+}
+
+func runClaimSponsorIfNeeded(
 	ctx context.Context,
-	dbPath string,
-	client *ethclient.Client,
-) *reorgdetector.ReorgDetector {
-	rd, err := reorgdetector.New(ctx, client, dbPath)
+	components []string,
+	l2Client *ethclient.Client,
+	cfg claimsponsor.EVMClaimSponsorConfig,
+) *claimsponsor.ClaimSponsor {
+	if !isNeeded([]string{RPC}, components) || !cfg.Enabled {
+		return nil
+	}
+	// In the future there may support different backends other than EVM, and this will require different config.
+	// But today only EVM is supported
+	ethTxManagerL2, err := ethtxmanager.New(cfg.EthTxManager)
 	if err != nil {
 		log.Fatal(err)
 	}
-	return rd
+	go ethTxManagerL2.Start()
+	cs, err := claimsponsor.NewEVMClaimSponsor(
+		cfg.DBPath,
+		l2Client,
+		cfg.BridgeAddrL2,
+		cfg.SenderAddr,
+		cfg.MaxGas,
+		cfg.GasOffset,
+		ethTxManagerL2,
+		cfg.RetryAfterErrorPeriod.Duration,
+		cfg.MaxRetryAttemptsAfterError,
+		cfg.WaitTxToBeMinedPeriod.Duration,
+		cfg.WaitTxToBeMinedPeriod.Duration,
+	)
+	if err != nil {
+		log.Fatalf("error creating claim sponsor: %s", err)
+	}
+	go cs.Start(ctx)
+	return cs
+}
+
+func runL1Bridge2InfoIndexSyncIfNeeded(
+	ctx context.Context,
+	components []string,
+	cfg l1bridge2infoindexsync.Config,
+	l1BridgeSync *bridgesync.BridgeSync,
+	l1InfoTreeSync *l1infotreesync.L1InfoTreeSync,
+	l1Client *ethclient.Client,
+) *l1bridge2infoindexsync.L1Bridge2InfoIndexSync {
+	if !isNeeded([]string{RPC}, components) {
+		return nil
+	}
+	l1Bridge2InfoIndexSync, err := l1bridge2infoindexsync.New(
+		cfg.DBPath,
+		l1BridgeSync,
+		l1InfoTreeSync,
+		l1Client,
+		cfg.RetryAfterErrorPeriod.Duration,
+		cfg.MaxRetryAttemptsAfterError,
+		cfg.WaitForSyncersPeriod.Duration,
+	)
+	if err != nil {
+		log.Fatalf("error creating l1Bridge2InfoIndexSync: %s", err)
+	}
+	go l1Bridge2InfoIndexSync.Start(ctx)
+	return l1Bridge2InfoIndexSync
+}
+
+func runLastGERSyncIfNeeded(
+	ctx context.Context,
+	components []string,
+	cfg lastgersync.Config,
+	reorgDetectorL2 *reorgdetector.ReorgDetector,
+	l2Client *ethclient.Client,
+	l1InfoTreeSync *l1infotreesync.L1InfoTreeSync,
+) *lastgersync.LastGERSync {
+	if !isNeeded([]string{RPC}, components) {
+		return nil
+	}
+	lastGERSync, err := lastgersync.New(
+		ctx,
+		cfg.DBPath,
+		reorgDetectorL2,
+		l2Client,
+		cfg.GlobalExitRootL2Addr,
+		l1InfoTreeSync,
+		cfg.RetryAfterErrorPeriod.Duration,
+		cfg.MaxRetryAttemptsAfterError,
+		etherman.BlockNumberFinality(cfg.BlockFinality),
+		cfg.WaitForNewBlocksPeriod.Duration,
+		cfg.DownloadBufferSize,
+	)
+	if err != nil {
+		log.Fatalf("error creating lastGERSync: %s", err)
+	}
+	go lastGERSync.Start(ctx)
+	return lastGERSync
+}
+
+func runBridgeSyncL1IfNeeded(
+	ctx context.Context,
+	components []string,
+	cfg bridgesync.Config,
+	reorgDetectorL1 *reorgdetector.ReorgDetector,
+	l1Client *ethclient.Client,
+) *bridgesync.BridgeSync {
+	if !isNeeded([]string{RPC}, components) {
+		return nil
+	}
+	bridgeSyncL1, err := bridgesync.NewL1(
+		ctx,
+		cfg.DBPath,
+		cfg.BridgeAddr,
+		cfg.SyncBlockChunkSize,
+		etherman.BlockNumberFinality(cfg.BlockFinality),
+		reorgDetectorL1,
+		l1Client,
+		cfg.InitialBlockNum,
+		cfg.WaitForNewBlocksPeriod.Duration,
+		cfg.RetryAfterErrorPeriod.Duration,
+		cfg.MaxRetryAttemptsAfterError,
+	)
+	if err != nil {
+		log.Fatalf("error creating bridgeSyncL1: %s", err)
+	}
+	go bridgeSyncL1.Start(ctx)
+	return bridgeSyncL1
+}
+
+func runBridgeSyncL2IfNeeded(
+	ctx context.Context,
+	components []string,
+	cfg bridgesync.Config,
+	reorgDetectorL2 *reorgdetector.ReorgDetector,
+	l2Client *ethclient.Client,
+) *bridgesync.BridgeSync {
+	// TODO: will be needed by AGGSENDER
+	if !isNeeded([]string{RPC}, components) {
+		return nil
+	}
+	bridgeSyncL2, err := bridgesync.NewL2(
+		ctx,
+		cfg.DBPath,
+		cfg.BridgeAddr,
+		cfg.SyncBlockChunkSize,
+		etherman.BlockNumberFinality(cfg.BlockFinality),
+		reorgDetectorL2,
+		l2Client,
+		cfg.InitialBlockNum,
+		cfg.WaitForNewBlocksPeriod.Duration,
+		cfg.RetryAfterErrorPeriod.Duration,
+		cfg.MaxRetryAttemptsAfterError,
+	)
+	if err != nil {
+		log.Fatalf("error creating bridgeSyncL2: %s", err)
+	}
+	go bridgeSyncL2.Start(ctx)
+	return bridgeSyncL2
+}
+
+func createRPC(
+	cfg jRPC.Config,
+	cdkNetworkID uint32,
+	sponsor *claimsponsor.ClaimSponsor,
+	l1InfoTree *l1infotreesync.L1InfoTreeSync,
+	l1Bridge2Index *l1bridge2infoindexsync.L1Bridge2InfoIndexSync,
+	injectedGERs *lastgersync.LastGERSync,
+	bridgeL1 *bridgesync.BridgeSync,
+	bridgeL2 *bridgesync.BridgeSync,
+) *jRPC.Server {
+	return jRPC.NewServer(cfg, []jRPC.Service{
+		{
+			Name: rpc.BRIDGE,
+			Service: rpc.NewBridgeEndpoints(
+				cfg.WriteTimeout.Duration,
+				cfg.ReadTimeout.Duration,
+				cdkNetworkID,
+				sponsor,
+				l1InfoTree,
+				l1Bridge2Index,
+				injectedGERs,
+				bridgeL1,
+				bridgeL2,
+			),
+		},
+	})
 }
diff --git a/common/config.go b/common/config.go
index d8f2d1cef..62670c6f5 100644
--- a/common/config.go
+++ b/common/config.go
@@ -5,6 +5,8 @@ import "github.com/0xPolygon/cdk/translator"
 type Config struct {
 	// IsValidiumMode has the value true if the sequence sender is running in validium mode.
 	IsValidiumMode bool `mapstructure:"IsValidiumMode"`
+	// NetworkID is the networkID of the CDK being run
+	NetworkID uint32 `mapstructure:"NetworkID"`
 	// Contract Versions: elderberry, banana
 	ContractVersions string            `mapstructure:"ContractVersions"`
 	Translator       translator.Config `mapstructure:"Translator"`
diff --git a/config/config.go b/config/config.go
index 5759e7a25..76abbf203 100644
--- a/config/config.go
+++ b/config/config.go
@@ -6,11 +6,16 @@ import (
 	"path/filepath"
 	"strings"
 
+	jRPC "github.com/0xPolygon/cdk-rpc/rpc"
 	"github.com/0xPolygon/cdk/aggoracle"
 	"github.com/0xPolygon/cdk/aggregator"
+	"github.com/0xPolygon/cdk/bridgesync"
+	"github.com/0xPolygon/cdk/claimsponsor"
 	"github.com/0xPolygon/cdk/common"
 	ethermanconfig "github.com/0xPolygon/cdk/etherman/config"
+	"github.com/0xPolygon/cdk/l1bridge2infoindexsync"
 	"github.com/0xPolygon/cdk/l1infotreesync"
+	"github.com/0xPolygon/cdk/lastgersync"
 	"github.com/0xPolygon/cdk/log"
 	"github.com/0xPolygon/cdk/reorgdetector"
 	"github.com/0xPolygon/cdk/sequencesender"
@@ -76,10 +81,32 @@ type Config struct {
 	Common common.Config
 	// Configuration of the reorg detector service to be used for the L1
 	ReorgDetectorL1 reorgdetector.Config
+	// Configuration of the reorg detector service to be used for the L2
+	ReorgDetectorL2 reorgdetector.Config
 	// Configuration of the aggOracle service
 	AggOracle aggoracle.Config
 	// Configuration of the L1 Info Treee Sync service
 	L1InfoTreeSync l1infotreesync.Config
+
+	// RPC is the config for the RPC server
+	RPC jRPC.Config
+
+	// ClaimSponsor is the config for the claim sponsor
+	ClaimSponsor claimsponsor.EVMClaimSponsorConfig
+
+	// L1Bridge2InfoIndexSync is the config for the synchronizers that maintains the relation of
+	// bridge from L1 --> L1 Info tree index. Needed for the bridge service (RPC)
+	L1Bridge2InfoIndexSync l1bridge2infoindexsync.Config
+
+	// BridgeL1Sync is the configuration for the synchronizer of the bridge of the L1
+	BridgeL1Sync bridgesync.Config
+
+	// BridgeL2Sync is the configuration for the synchronizer of the bridge of the L2
+	BridgeL2Sync bridgesync.Config
+
+	// LastGERSync is the config for the synchronizer in charge of syncing the last GER injected on L2.
+	// Needed for the bridge service (RPC)
+	LastGERSync lastgersync.Config
 }
 
 // Default parses the default configuration values.
diff --git a/config/default.go b/config/default.go
index c32e43d95..c193491f8 100644
--- a/config/default.go
+++ b/config/default.go
@@ -6,6 +6,7 @@ ForkUpgradeBatchNumber = 0
 ForkUpgradeNewForkId = 0
 
 [Common]
+NetworkID = 1
 IsValidiumMode = false
 ContractVersions = "banana"
 
@@ -125,13 +126,16 @@ SequencerPrivateKey = {}
 				Enabled = false
 
 [ReorgDetectorL1]
-DBPath = "/tmp/reorgdetector"
+DBPath = "/tmp/reorgdetectorl1"
+
+[ReorgDetectorL2]
+DBPath = "/tmp/reorgdetectorl2"
 
 [L1InfoTreeSync]
 DBPath = "/tmp/L1InfoTreeSync"
 GlobalExitRootAddr="0x8464135c8F25Da09e49BC8782676a84730C318bC"
 SyncBlockChunkSize=10
-BlockFinality="latest"
+BlockFinality="LatestBlock"
 URLRPCL1="http://test-aggoracle-l1:8545"
 WaitForNewBlocksPeriod="100ms"
 InitialBlock=0
@@ -139,7 +143,7 @@ InitialBlock=0
 [AggOracle]
 TargetChainType="EVM"
 URLRPCL1="http://test-aggoracle-l1:8545"
-BlockFinality="latest"
+BlockFinality="FinalizedBlock"
 WaitPeriodNextGER="100ms"
 	[AggOracle.EVMSender]
 		GlobalExitRootL2="0x8464135c8F25Da09e49BC8782676a84730C318bC"
@@ -159,7 +163,7 @@ WaitPeriodNextGER="100ms"
 				ForcedGas = 0
 				GasPriceMarginFactor = 1
 				MaxGasPriceLimit = 0
-				PersistenceFilename = "/tmp/ethtxmanager.json"
+				PersistenceFilename = "/tmp/ethtxmanager-sequencesender.json"
 				ReadPendingL1Txs = false
 				SafeStatusL1NumberOfBlocks = 5
 				FinalizedStatusL1NumberOfBlocks = 10
@@ -168,4 +172,79 @@ WaitPeriodNextGER="100ms"
 						MultiGasProvider = false
 						L1ChainID = 1337
 						HTTPHeaders = []
+
+[RPC]
+Host = "0.0.0.0"
+Port = 5576
+ReadTimeout = "2s"
+WriteTimeout = "2s"
+MaxRequestsPerIPAndSecond = 10
+
+[ClaimSponsor]
+DBPath = "/tmp/claimsopnsor"
+Enabled = true
+SenderAddr = "0xfa3b44587990f97ba8b6ba7e230a5f0e95d14b3d"
+BridgeAddrL2 = "0xB7098a13a48EcE087d3DA15b2D28eCE0f89819B8"
+MaxGas = 200000
+RetryAfterErrorPeriod = "1s"
+MaxRetryAttemptsAfterError = -1
+WaitTxToBeMinedPeriod = "3s"
+WaitOnEmptyQueue = "3s"
+GasOffset = 0
+	[ClaimSponsor.EthTxManager]
+		FrequencyToMonitorTxs = "1s"
+		WaitTxToBeMined = "2s"
+		GetReceiptMaxTime = "250ms"
+		GetReceiptWaitInterval = "1s"
+		PrivateKeys = [
+			{Path = "/app/keystore/claimsopnsor.keystore", Password = "testonly"},
+		]
+		ForcedGas = 0
+		GasPriceMarginFactor = 1
+		MaxGasPriceLimit = 0
+		PersistenceFilename = "/tmp/ethtxmanager-claimsopnsor.json"
+		ReadPendingL1Txs = false
+		SafeStatusL1NumberOfBlocks = 5
+		FinalizedStatusL1NumberOfBlocks = 10
+			[ClaimSponsor.EthTxManager.Etherman]
+				URL = "http://test-aggoracle-l2"
+				MultiGasProvider = false
+				L1ChainID = 1337
+				HTTPHeaders = []
+
+[L1Bridge2InfoIndexSync]
+DBPath = "/tmp/l1bridge2infoindexsync"
+RetryAfterErrorPeriod = "1s"
+MaxRetryAttemptsAfterError = -1
+WaitForSyncersPeriod = "3s"
+
+[BridgeL1Sync]
+DBPath = "/tmp/bridgel1sync"
+BlockFinality = "LatestBlock"
+InitialBlockNum = 0
+BridgeAddr = "0xB7098a13a48EcE087d3DA15b2D28eCE0f89819B8"
+SyncBlockChunkSize = 100
+RetryAfterErrorPeriod = "1s"
+MaxRetryAttemptsAfterError = -1
+WaitForNewBlocksPeriod = "3s"
+
+[BridgeL2Sync]
+DBPath = "/tmp/bridgel2sync"
+BlockFinality = "LatestBlock"
+InitialBlockNum = 0
+BridgeAddr = "0xB7098a13a48EcE087d3DA15b2D28eCE0f89819B8"
+SyncBlockChunkSize = 100
+RetryAfterErrorPeriod = "1s"
+MaxRetryAttemptsAfterError = -1
+WaitForNewBlocksPeriod = "3s"
+
+[LastGERSync]
+DBPath = "/tmp/lastgersync"
+BlockFinality = "LatestBlock"
+InitialBlockNum = 0
+GlobalExitRootL2Addr = "0xa40d5f56745a118d0906a34e69aec8c0db1cb8fa"
+RetryAfterErrorPeriod = "1s"
+MaxRetryAttemptsAfterError = -1
+WaitForNewBlocksPeriod = "1s"
+DownloadBufferSize = 100
 `
diff --git a/go.mod b/go.mod
index db589d912..559d39853 100644
--- a/go.mod
+++ b/go.mod
@@ -21,6 +21,8 @@ require (
 	github.com/spf13/viper v1.19.0
 	github.com/stretchr/testify v1.9.0
 	github.com/urfave/cli/v2 v2.27.2
+	go.opentelemetry.io/otel v1.24.0
+	go.opentelemetry.io/otel/metric v1.24.0
 	go.uber.org/zap v1.27.0
 	golang.org/x/crypto v0.24.0
 	golang.org/x/net v0.26.0
@@ -63,6 +65,8 @@ require (
 	github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 // indirect
 	github.com/getsentry/sentry-go v0.18.0 // indirect
 	github.com/go-gorp/gorp/v3 v3.1.0 // indirect
+	github.com/go-logr/logr v1.4.1 // indirect
+	github.com/go-logr/stdr v1.2.2 // indirect
 	github.com/go-ole/go-ole v1.3.0 // indirect
 	github.com/go-pkgz/expirable-cache v0.0.3 // indirect
 	github.com/go-stack/stack v1.8.1 // indirect
@@ -133,6 +137,7 @@ require (
 	github.com/valyala/histogram v1.2.0 // indirect
 	github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect
 	github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 // indirect
+	go.opentelemetry.io/otel/trace v1.24.0 // indirect
 	go.uber.org/multierr v1.10.0 // indirect
 	golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa // indirect
 	golang.org/x/sync v0.7.0 // indirect
diff --git a/go.sum b/go.sum
index 6f3298bd0..1857b5416 100644
--- a/go.sum
+++ b/go.sum
@@ -20,10 +20,6 @@ github.com/0xPolygonHermez/zkevm-data-streamer v0.2.3 h1:zJ06KCGLMDOap4slop/QmiM
 github.com/0xPolygonHermez/zkevm-data-streamer v0.2.3/go.mod h1:bv7DjATsczN2WvFt26jv34TWv6rfvYM1SqegrgrFwfI=
 github.com/0xPolygonHermez/zkevm-ethtx-manager v0.1.10-0.20240716105056-c051c96d0234 h1:QElCysO7f2xaknY/RDjxcs7IVmcgORfsCX2g+YD0Ko4=
 github.com/0xPolygonHermez/zkevm-ethtx-manager v0.1.10-0.20240716105056-c051c96d0234/go.mod h1:zBZWxwOHKlw+ghd9roQLgIkDZWA7e7qO3EsfQQT/+oQ=
-github.com/0xPolygonHermez/zkevm-synchronizer-l1 v0.6.3-0.20240712085301-0310358abb59 h1:Qwh92vFEXnpmDggQaZA3648viEQfLdMnAw/WFSY+2i8=
-github.com/0xPolygonHermez/zkevm-synchronizer-l1 v0.6.3-0.20240712085301-0310358abb59/go.mod h1:/LHf8jPQeBYKABM1xUmN1dKaFVIJc9jMQDSGBDJ7CS0=
-github.com/0xPolygonHermez/zkevm-synchronizer-l1 v0.6.3 h1:C+jNYr/CDMMn8wn3HqZqLTPU0luNYIB35pnxVf9O8TM=
-github.com/0xPolygonHermez/zkevm-synchronizer-l1 v0.6.3/go.mod h1:/LHf8jPQeBYKABM1xUmN1dKaFVIJc9jMQDSGBDJ7CS0=
 github.com/0xPolygonHermez/zkevm-synchronizer-l1 v0.6.4 h1:6hk1NCKyR+JycmRFG7Uy7Ko3GghZ3DXYMf5muo3F29Q=
 github.com/0xPolygonHermez/zkevm-synchronizer-l1 v0.6.4/go.mod h1:/LHf8jPQeBYKABM1xUmN1dKaFVIJc9jMQDSGBDJ7CS0=
 github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
@@ -132,6 +128,11 @@ github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs
 github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw=
 github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
 github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
+github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
+github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
 github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
 github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
 github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
@@ -440,6 +441,12 @@ github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913/go.mod h1:4aEEwZQut
 github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
 github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
 github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
+go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo=
+go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo=
+go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI=
+go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco=
+go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI=
+go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU=
 go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
 go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
 go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
diff --git a/l1bridge2infoindexsync/config.go b/l1bridge2infoindexsync/config.go
new file mode 100644
index 000000000..ef37f7382
--- /dev/null
+++ b/l1bridge2infoindexsync/config.go
@@ -0,0 +1,15 @@
+package l1bridge2infoindexsync
+
+import "github.com/0xPolygon/cdk/config/types"
+
+type Config struct {
+	// DBPath path of the DB
+	DBPath string `mapstructure:"DBPath"`
+	// RetryAfterErrorPeriod is the time that will be waited when an unexpected error happens before retry
+	RetryAfterErrorPeriod types.Duration `mapstructure:"RetryAfterErrorPeriod"`
+	// MaxRetryAttemptsAfterError is the maximum number of consecutive attempts that will happen before panicing.
+	// Any number smaller than zero will be considered as unlimited retries
+	MaxRetryAttemptsAfterError int `mapstructure:"MaxRetryAttemptsAfterError"`
+	// WaitForSyncersPeriod time that will be waited when the synchronizer has reached the latest state
+	WaitForSyncersPeriod types.Duration `mapstructure:"WaitForSyncersPeriod"`
+}
diff --git a/l1bridge2infoindexsync/downloader.go b/l1bridge2infoindexsync/downloader.go
new file mode 100644
index 000000000..f14fcf8e4
--- /dev/null
+++ b/l1bridge2infoindexsync/downloader.go
@@ -0,0 +1,66 @@
+package l1bridge2infoindexsync
+
+import (
+	"context"
+	"math/big"
+
+	"github.com/0xPolygon/cdk/bridgesync"
+	"github.com/0xPolygon/cdk/l1infotreesync"
+	"github.com/ethereum/go-ethereum"
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/rpc"
+)
+
+type downloader struct {
+	l1Bridge *bridgesync.BridgeSync
+	l1Info   *l1infotreesync.L1InfoTreeSync
+	l1Client ethereum.ChainReader
+}
+
+func newDownloader(
+	l1Bridge *bridgesync.BridgeSync,
+	l1Info *l1infotreesync.L1InfoTreeSync,
+	l1Client ethereum.ChainReader,
+) *downloader {
+	return &downloader{
+		l1Bridge: l1Bridge,
+		l1Info:   l1Info,
+		l1Client: l1Client,
+	}
+}
+
+func (d *downloader) getLastFinalizedL1Block(ctx context.Context) (uint64, error) {
+	b, err := d.l1Client.BlockByNumber(ctx, big.NewInt(int64(rpc.FinalizedBlockNumber)))
+	if err != nil {
+		return 0, err
+	}
+	return b.NumberU64(), nil
+}
+
+func (d *downloader) getLastProcessedBlockBridge(ctx context.Context) (uint64, error) {
+	return d.l1Bridge.GetLastProcessedBlock(ctx)
+}
+
+func (d *downloader) getLastProcessedBlockL1InfoTree(ctx context.Context) (uint64, error) {
+	return d.l1Info.GetLastProcessedBlock(ctx)
+}
+
+func (d *downloader) getLastL1InfoIndexUntilBlock(ctx context.Context, blockNum uint64) (uint32, error) {
+	info, err := d.l1Info.GetLatestInfoUntilBlock(ctx, blockNum)
+	if err != nil {
+		return 0, err
+	}
+	return info.L1InfoTreeIndex, nil
+}
+
+func (d *downloader) getMainnetExitRootAtL1InfoTreeIndex(ctx context.Context, index uint32) (common.Hash, error) {
+	leaf, err := d.l1Info.GetInfoByIndex(ctx, index)
+	if err != nil {
+		return common.Hash{}, err
+	}
+	return leaf.MainnetExitRoot, nil
+}
+
+func (d *downloader) getBridgeIndex(ctx context.Context, mainnetExitRoot common.Hash) (uint32, error) {
+	return d.l1Bridge.GetBridgeIndexByRoot(ctx, mainnetExitRoot)
+}
diff --git a/l1bridge2infoindexsync/driver.go b/l1bridge2infoindexsync/driver.go
new file mode 100644
index 000000000..ce681bf08
--- /dev/null
+++ b/l1bridge2infoindexsync/driver.go
@@ -0,0 +1,201 @@
+package l1bridge2infoindexsync
+
+import (
+	"context"
+	"time"
+
+	"github.com/0xPolygon/cdk/l1infotreesync"
+	"github.com/0xPolygon/cdk/log"
+	"github.com/0xPolygon/cdk/sync"
+)
+
+type driver struct {
+	downloader           *downloader
+	processor            *processor
+	rh                   *sync.RetryHandler
+	waitForSyncersPeriod time.Duration
+}
+
+func newDriver(
+	downloader *downloader,
+	processor *processor,
+	rh *sync.RetryHandler,
+	waitForSyncersPeriod time.Duration,
+) *driver {
+	return &driver{
+		downloader:           downloader,
+		processor:            processor,
+		rh:                   rh,
+		waitForSyncersPeriod: waitForSyncersPeriod,
+	}
+}
+
+func (d *driver) sync(ctx context.Context) {
+	var (
+		attempts                 int
+		lpbProcessor             uint64
+		lastProcessedL1InfoIndex uint32
+		err                      error
+	)
+	for {
+		lpbProcessor, lastProcessedL1InfoIndex, err = d.processor.GetLastProcessedBlockAndL1InfoTreeIndex(ctx)
+		if err != nil {
+			attempts++
+			log.Errorf("error getting last processed block and index: %v", err)
+			d.rh.Handle("GetLastProcessedBlockAndL1InfoTreeIndex", attempts)
+			continue
+		}
+		break
+	}
+	for {
+		attempts = 0
+		var (
+			syncUntilBlock uint64
+			shouldWait     bool
+		)
+		for {
+			syncUntilBlock, shouldWait, err = d.getTargetSynchronizationBlock(ctx, lpbProcessor)
+			if err != nil {
+				attempts++
+				log.Errorf("error getting target sync block: %v", err)
+				d.rh.Handle("getTargetSynchronizationBlock", attempts)
+				continue
+			}
+			break
+		}
+		if shouldWait {
+			log.Debugf("waiting for syncers to catch up")
+			time.Sleep(d.waitForSyncersPeriod)
+			continue
+		}
+
+		attempts = 0
+		var lastL1InfoTreeIndex uint32
+		found := false
+		for {
+			lastL1InfoTreeIndex, err = d.downloader.getLastL1InfoIndexUntilBlock(ctx, syncUntilBlock)
+			if err != nil {
+				if err == l1infotreesync.ErrNotFound || err == l1infotreesync.ErrBlockNotProcessed {
+					log.Debugf("l1 info tree index not ready, querying until block %d: %s", syncUntilBlock, err)
+					break
+				}
+				attempts++
+				log.Errorf("error getting last l1 info tree index: %v", err)
+				d.rh.Handle("getLastL1InfoIndexUntilBlock", attempts)
+				continue
+			}
+			found = true
+			break
+		}
+		if !found {
+			time.Sleep(d.waitForSyncersPeriod)
+			continue
+		}
+
+		relations := []bridge2L1InfoRelation{}
+		var init uint32
+		if lastProcessedL1InfoIndex > 0 {
+			init = lastProcessedL1InfoIndex + 1
+		}
+		if init <= lastL1InfoTreeIndex {
+			log.Debugf("getting relations from index %d to %d", init, lastL1InfoTreeIndex)
+		}
+		for i := init; i <= lastL1InfoTreeIndex; i++ {
+			attempts = 0
+			for {
+				relation, err := d.getRelation(ctx, i)
+				if err != nil {
+					attempts++
+					log.Errorf("error getting relation: %v", err)
+					d.rh.Handle("getRelation", attempts)
+					continue
+				}
+				relations = append(relations, relation)
+				break
+			}
+		}
+
+		attempts = 0
+		log.Debugf("processing until block %d: %+v", syncUntilBlock, relations)
+		for {
+			if err := d.processor.processUntilBlock(ctx, syncUntilBlock, relations); err != nil {
+				attempts++
+				log.Errorf("error processing block: %v", err)
+				d.rh.Handle("processUntilBlock", attempts)
+				continue
+			}
+			break
+		}
+
+		lpbProcessor = syncUntilBlock
+		if len(relations) > 0 {
+			lastProcessedL1InfoIndex = relations[len(relations)-1].l1InfoTreeIndex
+			log.Debugf("last processed index %d", lastProcessedL1InfoIndex)
+		}
+	}
+}
+
+func (d *driver) getTargetSynchronizationBlock(ctx context.Context, lpbProcessor uint64) (syncUntilBlock uint64, shouldWait bool, err error) {
+	lastFinalised, err := d.downloader.getLastFinalizedL1Block(ctx) // NOTE: if this had configurable finality, it would be needed to deal with reorgs
+	if err != nil {
+		return
+	}
+	checkProcessedBlockFn := func(blockToCheck, lastProcessed uint64, blockType string) bool {
+		if blockToCheck >= lastProcessed {
+			log.Debugf(
+				"should wait because the last processed block (%d) is greater or equal than the %s (%d)",
+				blockToCheck, blockType, lastProcessed)
+			shouldWait = true
+			return true
+		}
+		return false
+	}
+	if checkProcessedBlockFn(lpbProcessor, lastFinalised, "last finalised") {
+		return
+	}
+	lpbInfo, err := d.downloader.getLastProcessedBlockL1InfoTree(ctx)
+	if err != nil {
+		return
+	}
+	if checkProcessedBlockFn(lpbProcessor, lastFinalised, "last block from L1 Info tree sync") {
+		return
+	}
+	lpbBridge, err := d.downloader.getLastProcessedBlockBridge(ctx)
+	if err != nil {
+		return
+	}
+	if checkProcessedBlockFn(lpbProcessor, lastFinalised, "last block from l1 bridge sync") {
+		return
+	}
+
+	// Bridge, L1Info and L1 ahead of procesor. Pick the smallest block num as target
+	if lastFinalised <= lpbInfo {
+		log.Debugf("target sync block is the last finalised block (%d)", lastFinalised)
+		syncUntilBlock = lastFinalised
+	} else {
+		log.Debugf("target sync block is the last processed block from L1 info tree (%d)", lpbInfo)
+		syncUntilBlock = lpbInfo
+	}
+	if lpbBridge < syncUntilBlock {
+		log.Debugf("target sync block is the last processed block from bridge (%d)", lpbBridge)
+		syncUntilBlock = lpbBridge
+	}
+	return
+}
+
+func (d *driver) getRelation(ctx context.Context, l1InfoIndex uint32) (bridge2L1InfoRelation, error) {
+	mer, err := d.downloader.getMainnetExitRootAtL1InfoTreeIndex(ctx, l1InfoIndex)
+	if err != nil {
+		return bridge2L1InfoRelation{}, err
+	}
+
+	bridgeIndex, err := d.downloader.getBridgeIndex(ctx, mer)
+	if err != nil {
+		return bridge2L1InfoRelation{}, err
+	}
+
+	return bridge2L1InfoRelation{
+		bridgeIndex:     bridgeIndex,
+		l1InfoTreeIndex: l1InfoIndex,
+	}, nil
+}
diff --git a/l1bridge2infoindexsync/e2e_test.go b/l1bridge2infoindexsync/e2e_test.go
new file mode 100644
index 000000000..deb613f32
--- /dev/null
+++ b/l1bridge2infoindexsync/e2e_test.go
@@ -0,0 +1,224 @@
+package l1bridge2infoindexsync_test
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"math/big"
+	"strconv"
+	"testing"
+	"time"
+
+	"github.com/0xPolygon/cdk-contracts-tooling/contracts/elderberry-paris/polygonzkevmbridgev2"
+	"github.com/0xPolygon/cdk-contracts-tooling/contracts/elderberry-paris/polygonzkevmglobalexitrootv2"
+	"github.com/0xPolygon/cdk/bridgesync"
+	"github.com/0xPolygon/cdk/etherman"
+	"github.com/0xPolygon/cdk/l1bridge2infoindexsync"
+	"github.com/0xPolygon/cdk/l1infotreesync"
+	"github.com/0xPolygon/cdk/reorgdetector"
+	"github.com/0xPolygon/cdk/test/contracts/transparentupgradableproxy"
+	"github.com/ethereum/go-ethereum/accounts/abi/bind"
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/core/types"
+	"github.com/ethereum/go-ethereum/crypto"
+	"github.com/ethereum/go-ethereum/ethclient/simulated"
+	"github.com/ethereum/go-ethereum/rpc"
+	"github.com/stretchr/testify/require"
+)
+
+func newSimulatedClient(authDeployer, authCaller *bind.TransactOpts) (
+	client *simulated.Backend,
+	gerAddr common.Address,
+	bridgeAddr common.Address,
+	gerContract *polygonzkevmglobalexitrootv2.Polygonzkevmglobalexitrootv2,
+	bridgeContract *polygonzkevmbridgev2.Polygonzkevmbridgev2,
+	err error,
+) {
+	ctx := context.Background()
+	balance, _ := new(big.Int).SetString("10000000000000000000000000", 10) //nolint:gomnd
+	genesisAlloc := map[common.Address]types.Account{
+		authDeployer.From: {
+			Balance: balance,
+		},
+		authCaller.From: {
+			Balance: balance,
+		},
+	}
+	blockGasLimit := uint64(999999999999999999) //nolint:gomnd
+	client = simulated.NewBackend(genesisAlloc, simulated.WithBlockGasLimit(blockGasLimit))
+
+	bridgeImplementationAddr, _, _, err := polygonzkevmbridgev2.DeployPolygonzkevmbridgev2(authDeployer, client.Client())
+	if err != nil {
+		return
+	}
+	client.Commit()
+
+	nonce, err := client.Client().PendingNonceAt(ctx, authDeployer.From)
+	if err != nil {
+		return
+	}
+	precalculatedAddr := crypto.CreateAddress(authDeployer.From, nonce+1)
+	bridgeABI, err := polygonzkevmbridgev2.Polygonzkevmbridgev2MetaData.GetAbi()
+	if err != nil {
+		return
+	}
+	if bridgeABI == nil {
+		err = errors.New("GetABI returned nil")
+		return
+	}
+	dataCallProxy, err := bridgeABI.Pack("initialize",
+		uint32(0),        // networkIDMainnet
+		common.Address{}, // gasTokenAddressMainnet"
+		uint32(0),        // gasTokenNetworkMainnet
+		precalculatedAddr,
+		common.Address{},
+		[]byte{}, // gasTokenMetadata
+	)
+	if err != nil {
+		return
+	}
+	bridgeAddr, _, _, err = transparentupgradableproxy.DeployTransparentupgradableproxy(
+		authDeployer,
+		client.Client(),
+		bridgeImplementationAddr,
+		authDeployer.From,
+		dataCallProxy,
+	)
+	if err != nil {
+		return
+	}
+	client.Commit()
+	bridgeContract, err = polygonzkevmbridgev2.NewPolygonzkevmbridgev2(bridgeAddr, client.Client())
+	if err != nil {
+		return
+	}
+	checkGERAddr, err := bridgeContract.GlobalExitRootManager(&bind.CallOpts{})
+	if err != nil {
+		return
+	}
+	if precalculatedAddr != checkGERAddr {
+		err = errors.New("error deploying bridge")
+	}
+
+	gerAddr, _, gerContract, err = polygonzkevmglobalexitrootv2.DeployPolygonzkevmglobalexitrootv2(
+		authDeployer, client.Client(), authCaller.From, bridgeAddr,
+	)
+	if err != nil {
+		return
+	}
+	client.Commit()
+
+	if precalculatedAddr != gerAddr {
+		err = errors.New("error calculating addr")
+	}
+	return
+}
+
+func TestE2E(t *testing.T) {
+	ctx := context.Background()
+	dbPathBridgeSync := t.TempDir()
+	dbPathL1Sync := t.TempDir()
+	dbPathReorg := t.TempDir()
+	dbPathL12InfoSync := t.TempDir()
+
+	privateKey, err := crypto.GenerateKey()
+	require.NoError(t, err)
+	authDeployer, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(1337))
+	require.NoError(t, err)
+	privateKey, err = crypto.GenerateKey()
+	require.NoError(t, err)
+	auth, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(1337))
+	require.NoError(t, err)
+	require.NotEqual(t, authDeployer.From, auth.From)
+	client, gerAddr, bridgeAddr, gerSc, bridgeSc, err := newSimulatedClient(authDeployer, auth)
+	require.NoError(t, err)
+	rd, err := reorgdetector.New(ctx, client.Client(), dbPathReorg)
+	go rd.Start(ctx)
+
+	bridgeSync, err := bridgesync.NewL1(ctx, dbPathBridgeSync, bridgeAddr, 10, etherman.LatestBlock, rd, client.Client(), 0, time.Millisecond*10, 0, 0)
+	require.NoError(t, err)
+	go bridgeSync.Start(ctx)
+
+	l1Sync, err := l1infotreesync.New(
+		ctx,
+		dbPathL1Sync,
+		gerAddr,
+		common.Address{},
+		10,
+		etherman.SafeBlock,
+		rd,
+		client.Client(),
+		time.Millisecond,
+		0,
+		time.Millisecond,
+		3,
+	)
+	require.NoError(t, err)
+	go l1Sync.Start(ctx)
+
+	bridge2InfoSync, err := l1bridge2infoindexsync.New(dbPathL12InfoSync, bridgeSync, l1Sync, client.Client(), 0, 0, time.Millisecond)
+	require.NoError(t, err)
+	go bridge2InfoSync.Start(ctx)
+
+	// Send bridge txs
+	expectedIndex := -1
+	for i := 0; i < 10; i++ {
+		bridge := bridgesync.Bridge{
+			Amount:             big.NewInt(0),
+			DestinationNetwork: 3,
+			DestinationAddress: common.HexToAddress("f00"),
+		}
+		_, err := bridgeSc.BridgeAsset(
+			auth,
+			bridge.DestinationNetwork,
+			bridge.DestinationAddress,
+			bridge.Amount,
+			bridge.OriginAddress,
+			true, nil,
+		)
+		require.NoError(t, err)
+		expectedIndex++
+		client.Commit()
+
+		// Wait for block to be finalised
+		updateAtBlock, err := client.Client().BlockNumber(ctx)
+		for {
+			lastFinalisedBlock, err := client.Client().BlockByNumber(ctx, big.NewInt(int64(rpc.FinalizedBlockNumber)))
+			require.NoError(t, err)
+			if lastFinalisedBlock.NumberU64() >= updateAtBlock {
+				break
+			}
+			client.Commit()
+			time.Sleep(time.Microsecond)
+		}
+
+		// Wait for syncer to catch up
+		syncerUpToDate := false
+		var errMsg string
+		for i := 0; i < 10; i++ {
+			lpb, err := bridge2InfoSync.GetLastProcessedBlock(ctx)
+			require.NoError(t, err)
+			lb, err := client.Client().BlockByNumber(ctx, big.NewInt(int64(rpc.FinalizedBlockNumber)))
+			require.NoError(t, err)
+			if lpb == lb.NumberU64() {
+				syncerUpToDate = true
+				break
+			}
+			time.Sleep(time.Millisecond * 10)
+			errMsg = fmt.Sprintf("last block from client: %d, last block from syncer: %d", lb.NumberU64(), lpb)
+		}
+		require.True(t, syncerUpToDate, errMsg)
+
+		actualIndex, err := bridge2InfoSync.GetL1InfoTreeIndexByDepositCount(ctx, uint32(i))
+		require.NoError(t, err)
+		require.Equal(t, uint32(expectedIndex), actualIndex)
+
+		if i%2 == 1 {
+			// Update L1 info tree without a bridge on L1
+			_, err = gerSc.UpdateExitRoot(auth, common.HexToHash(strconv.Itoa(i)))
+			require.NoError(t, err)
+			expectedIndex++
+			client.Commit()
+		}
+	}
+}
diff --git a/l1bridge2infoindexsync/l1bridge2infoindexsync.go b/l1bridge2infoindexsync/l1bridge2infoindexsync.go
new file mode 100644
index 000000000..b1c8fc551
--- /dev/null
+++ b/l1bridge2infoindexsync/l1bridge2infoindexsync.go
@@ -0,0 +1,57 @@
+package l1bridge2infoindexsync
+
+import (
+	"context"
+	"time"
+
+	"github.com/0xPolygon/cdk/bridgesync"
+	"github.com/0xPolygon/cdk/l1infotreesync"
+	"github.com/0xPolygon/cdk/sync"
+	"github.com/ethereum/go-ethereum"
+)
+
+type L1Bridge2InfoIndexSync struct {
+	processor *processor
+	driver    *driver
+}
+
+func New(
+	dbPath string,
+	l1Bridge *bridgesync.BridgeSync,
+	l1Info *l1infotreesync.L1InfoTreeSync,
+	l1Client ethereum.ChainReader,
+	retryAfterErrorPeriod time.Duration,
+	maxRetryAttemptsAfterError int,
+	waitForSyncersPeriod time.Duration,
+) (*L1Bridge2InfoIndexSync, error) {
+	dwn := newDownloader(l1Bridge, l1Info, l1Client)
+
+	prc, err := newProcessor(dbPath)
+	if err != nil {
+		return nil, err
+	}
+
+	rh := &sync.RetryHandler{
+		RetryAfterErrorPeriod:      retryAfterErrorPeriod,
+		MaxRetryAttemptsAfterError: maxRetryAttemptsAfterError,
+	}
+	drv := newDriver(dwn, prc, rh, waitForSyncersPeriod)
+
+	return &L1Bridge2InfoIndexSync{
+		driver:    drv,
+		processor: prc,
+	}, nil
+}
+
+func (s *L1Bridge2InfoIndexSync) Start(ctx context.Context) {
+	s.driver.sync(ctx)
+}
+
+func (s *L1Bridge2InfoIndexSync) GetLastProcessedBlock(ctx context.Context) (uint64, error) {
+	lpb, _, err := s.processor.GetLastProcessedBlockAndL1InfoTreeIndex(ctx)
+	return lpb, err
+}
+
+func (s *L1Bridge2InfoIndexSync) GetL1InfoTreeIndexByDepositCount(ctx context.Context, depositCount uint32) (uint32, error) {
+	return s.processor.getL1InfoTreeIndexByBridgeIndex(ctx, depositCount)
+}
diff --git a/l1bridge2infoindexsync/processor.go b/l1bridge2infoindexsync/processor.go
new file mode 100644
index 000000000..9b86ad9b8
--- /dev/null
+++ b/l1bridge2infoindexsync/processor.go
@@ -0,0 +1,188 @@
+package l1bridge2infoindexsync
+
+import (
+	"context"
+	"errors"
+	"fmt"
+
+	"github.com/0xPolygon/cdk/common"
+	"github.com/ledgerwatch/erigon-lib/kv"
+	"github.com/ledgerwatch/erigon-lib/kv/mdbx"
+)
+
+const (
+	lastProcessedTable = "l1bridge2infoindexsync-lastProcessed"
+	relationTable      = "l1bridge2infoindexsync-relation"
+)
+
+var (
+	lastProcessedKey = []byte("lp")
+	ErrNotFound      = errors.New("not found")
+)
+
+type processor struct {
+	db kv.RwDB
+}
+
+type bridge2L1InfoRelation struct {
+	bridgeIndex     uint32
+	l1InfoTreeIndex uint32
+}
+
+type lastProcessed struct {
+	block uint64
+	index uint32
+}
+
+func (lp *lastProcessed) MarshalBinary() ([]byte, error) {
+	return append(common.Uint64ToBytes(lp.block), common.Uint32ToBytes(lp.index)...), nil
+}
+
+func (lp *lastProcessed) UnmarshalBinary(data []byte) error {
+	if len(data) != 12 {
+		return fmt.Errorf("expected len %d, actual len %d", 12, len(data))
+	}
+	lp.block = common.BytesToUint64(data[:8])
+	lp.index = common.BytesToUint32(data[8:])
+	return nil
+}
+
+func newProcessor(dbPath string) (*processor, error) {
+	tableCfgFunc := func(defaultBuckets kv.TableCfg) kv.TableCfg {
+		return kv.TableCfg{
+			lastProcessedTable: {},
+			relationTable:      {},
+		}
+	}
+	db, err := mdbx.NewMDBX(nil).
+		Path(dbPath).
+		WithTableCfg(tableCfgFunc).
+		Open()
+	if err != nil {
+		return nil, err
+	}
+	return &processor{
+		db: db,
+	}, nil
+}
+
+// GetLastProcessedBlockAndL1InfoTreeIndex returns the last processed block oby the processor, including blocks
+// that don't have events
+func (p *processor) GetLastProcessedBlockAndL1InfoTreeIndex(ctx context.Context) (uint64, uint32, error) {
+	tx, err := p.db.BeginRo(ctx)
+	if err != nil {
+		return 0, 0, err
+	}
+	defer tx.Rollback()
+	return p.getLastProcessedBlockAndL1InfoTreeIndexWithTx(tx)
+}
+
+func (p *processor) getLastProcessedBlockAndL1InfoTreeIndexWithTx(tx kv.Tx) (uint64, uint32, error) {
+	if lastProcessedBytes, err := tx.GetOne(lastProcessedTable, lastProcessedKey); err != nil {
+		return 0, 0, err
+	} else if lastProcessedBytes == nil {
+		return 0, 0, nil
+	} else {
+		lp := &lastProcessed{}
+		if err := lp.UnmarshalBinary(lastProcessedBytes); err != nil {
+			return 0, 0, err
+		}
+		return lp.block, lp.index, nil
+	}
+}
+
+func (p *processor) updateLastProcessedBlockAndL1InfoTreeIndex(ctx context.Context, blockNum uint64, index uint32) error {
+	tx, err := p.db.BeginRw(ctx)
+	if err != nil {
+		return err
+	}
+	if err := p.updateLastProcessedBlockAndL1InfoTreeIndexWithTx(tx, blockNum, index); err != nil {
+		tx.Rollback()
+		return err
+	}
+	return tx.Commit()
+}
+
+func (p *processor) updateLastProcessedBlockAndL1InfoTreeIndexWithTx(tx kv.RwTx, blockNum uint64, index uint32) error {
+	lp := &lastProcessed{
+		block: blockNum,
+		index: index,
+	}
+	value, err := lp.MarshalBinary()
+	if err != nil {
+		return err
+	}
+	return tx.Put(lastProcessedTable, lastProcessedKey, value)
+}
+
+func (p *processor) processUntilBlock(ctx context.Context, lastProcessedBlock uint64, relations []bridge2L1InfoRelation) error {
+	tx, err := p.db.BeginRw(ctx)
+	if err != nil {
+		return err
+	}
+
+	if len(relations) == 0 {
+		_, lastIndex, err := p.getLastProcessedBlockAndL1InfoTreeIndexWithTx(tx)
+		if err != nil {
+			tx.Rollback()
+			return err
+		}
+		if err := p.updateLastProcessedBlockAndL1InfoTreeIndexWithTx(
+			tx,
+			lastProcessedBlock,
+			lastIndex,
+		); err != nil {
+			tx.Rollback()
+			return err
+		}
+		return tx.Commit()
+	}
+
+	for _, relation := range relations {
+		if _, err := p.getL1InfoTreeIndexByBridgeIndexWithTx(tx, relation.bridgeIndex); err != ErrNotFound {
+			// Note that indexes could be repeated as the L1 Info tree update can be produced by a rollup and not mainnet.
+			// Hence if the index already exist, do not update as it's better to have the lowest index possible for the relation
+			continue
+		}
+		if err := tx.Put(
+			relationTable,
+			common.Uint32ToBytes(relation.bridgeIndex),
+			common.Uint32ToBytes(relation.l1InfoTreeIndex),
+		); err != nil {
+			tx.Rollback()
+			return err
+		}
+	}
+
+	if err := p.updateLastProcessedBlockAndL1InfoTreeIndexWithTx(
+		tx,
+		lastProcessedBlock,
+		relations[len(relations)-1].l1InfoTreeIndex,
+	); err != nil {
+		tx.Rollback()
+		return err
+	}
+
+	return tx.Commit()
+}
+
+func (p *processor) getL1InfoTreeIndexByBridgeIndex(ctx context.Context, depositCount uint32) (uint32, error) {
+	tx, err := p.db.BeginRo(ctx)
+	if err != nil {
+		return 0, err
+	}
+	defer tx.Rollback()
+
+	return p.getL1InfoTreeIndexByBridgeIndexWithTx(tx, depositCount)
+}
+
+func (p *processor) getL1InfoTreeIndexByBridgeIndexWithTx(tx kv.Tx, depositCount uint32) (uint32, error) {
+	indexBytes, err := tx.GetOne(relationTable, common.Uint32ToBytes(depositCount))
+	if err != nil {
+		return 0, err
+	}
+	if indexBytes == nil {
+		return 0, ErrNotFound
+	}
+	return common.BytesToUint32(indexBytes), nil
+}
diff --git a/l1bridge2infoindexsync/processor_test.go b/l1bridge2infoindexsync/processor_test.go
new file mode 100644
index 000000000..9305dd9bc
--- /dev/null
+++ b/l1bridge2infoindexsync/processor_test.go
@@ -0,0 +1,22 @@
+package l1bridge2infoindexsync
+
+import (
+	"context"
+	"testing"
+
+	"github.com/stretchr/testify/require"
+)
+
+func TestDuplicatedKey(t *testing.T) {
+	dbPath := t.TempDir()
+	p, err := newProcessor(dbPath)
+	require.NoError(t, err)
+	ctx := context.Background()
+	err = p.processUntilBlock(ctx, 5, []bridge2L1InfoRelation{{bridgeIndex: 2, l1InfoTreeIndex: 2}})
+	require.NoError(t, err)
+	err = p.processUntilBlock(ctx, 7, []bridge2L1InfoRelation{{bridgeIndex: 2, l1InfoTreeIndex: 3}})
+	require.NoError(t, err)
+	l1InfoTreeIndex, err := p.getL1InfoTreeIndexByBridgeIndex(ctx, 2)
+	require.NoError(t, err)
+	require.Equal(t, uint32(2), l1InfoTreeIndex)
+}
diff --git a/l1infotreesync/config.go b/l1infotreesync/config.go
new file mode 100644
index 000000000..1b1d80143
--- /dev/null
+++ b/l1infotreesync/config.go
@@ -0,0 +1,20 @@
+package l1infotreesync
+
+import (
+	"github.com/0xPolygon/cdk/config/types"
+	"github.com/ethereum/go-ethereum/common"
+)
+
+type Config struct {
+	DBPath             string         `mapstructure:"DBPath"`
+	GlobalExitRootAddr common.Address `mapstructure:"GlobalExitRootAddr"`
+	RollupManagerAddr  common.Address `mapstructure:"RollupManagerAddr"`
+	SyncBlockChunkSize uint64         `mapstructure:"SyncBlockChunkSize"`
+	// BlockFinality indicates the status of the blocks that will be queried in order to sync
+	BlockFinality              string         `jsonschema:"enum=LatestBlock, enum=SafeBlock, enum=PendingBlock, enum=FinalizedBlock, enum=EarliestBlock" mapstructure:"BlockFinality"`
+	URLRPCL1                   string         `mapstructure:"URLRPCL1"`
+	WaitForNewBlocksPeriod     types.Duration `mapstructure:"WaitForNewBlocksPeriod"`
+	InitialBlock               uint64         `mapstructure:"InitialBlock"`
+	RetryAfterErrorPeriod      types.Duration `mapstructure:"RetryAfterErrorPeriod"`
+	MaxRetryAttemptsAfterError int            `mapstructure:"MaxRetryAttemptsAfterError"`
+}
diff --git a/l1infotreesync/l1infotreesync.go b/l1infotreesync/l1infotreesync.go
index d83570d2d..8cd3ee70c 100644
--- a/l1infotreesync/l1infotreesync.go
+++ b/l1infotreesync/l1infotreesync.go
@@ -2,11 +2,12 @@ package l1infotreesync
 
 import (
 	"context"
+	"errors"
 	"time"
 
-	"github.com/0xPolygon/cdk/config/types"
 	"github.com/0xPolygon/cdk/etherman"
 	"github.com/0xPolygon/cdk/sync"
+	"github.com/0xPolygon/cdk/tree"
 	"github.com/ethereum/go-ethereum/common"
 )
 
@@ -15,20 +16,6 @@ const (
 	downloadBufferSize = 1000
 )
 
-type Config struct {
-	DBPath             string         `mapstructure:"DBPath"`
-	GlobalExitRootAddr common.Address `mapstructure:"GlobalExitRootAddr"`
-	RollupManagerAddr  common.Address `mapstructure:"RollupManagerAddr"`
-	SyncBlockChunkSize uint64         `mapstructure:"SyncBlockChunkSize"`
-	// TODO: BlockFinality doesnt work as per the jsonschema
-	BlockFinality              string         `jsonschema:"enum=latest,enum=safe, enum=pending, enum=finalized" mapstructure:"BlockFinality"`
-	URLRPCL1                   string         `mapstructure:"URLRPCL1"`
-	WaitForNewBlocksPeriod     types.Duration `mapstructure:"WaitForNewBlocksPeriod"`
-	InitialBlock               uint64         `mapstructure:"InitialBlock"`
-	RetryAfterErrorPeriod      types.Duration `mapstructure:"RetryAfterErrorPeriod"`
-	MaxRetryAttemptsAfterError int            `mapstructure:"MaxRetryAttemptsAfterError"`
-}
-
 type L1InfoTreeSync struct {
 	processor *processor
 	driver    *sync.EVMDriver
@@ -76,6 +63,7 @@ func New(
 		return nil, err
 	}
 	downloader, err := sync.NewEVMDownloader(
+		"l1infotreesync",
 		l1Client,
 		syncBlockChunkSize,
 		blockFinalityType,
@@ -104,10 +92,18 @@ func (s *L1InfoTreeSync) Start(ctx context.Context) {
 }
 
 // GetL1InfoTreeMerkleProof creates a merkle proof for the L1 Info tree
-func (s *L1InfoTreeSync) GetL1InfoTreeMerkleProof(ctx context.Context, index uint32) ([]common.Hash, common.Hash, error) {
+func (s *L1InfoTreeSync) GetL1InfoTreeMerkleProof(ctx context.Context, index uint32) ([32]common.Hash, common.Hash, error) {
 	return s.processor.GetL1InfoTreeMerkleProof(ctx, index)
 }
 
+// GetRollupExitTreeMerkleProof creates a merkle proof for the rollup exit tree
+func (s *L1InfoTreeSync) GetRollupExitTreeMerkleProof(ctx context.Context, networkID uint32, root common.Hash) ([32]common.Hash, error) {
+	if networkID == 0 {
+		return tree.EmptyProof, nil
+	}
+	return s.processor.rollupExitTree.GetProof(ctx, networkID-1, root)
+}
+
 // GetLatestInfoUntilBlock returns the most recent L1InfoTreeLeaf that occurred before or at blockNum.
 // If the blockNum has not been processed yet the error ErrBlockNotProcessed will be returned
 func (s *L1InfoTreeSync) GetLatestInfoUntilBlock(ctx context.Context, blockNum uint64) (*L1InfoTreeLeaf, error) {
@@ -144,3 +140,10 @@ func (s *L1InfoTreeSync) GetLastL1InfoTreeRootAndIndex(ctx context.Context) (uin
 func (s *L1InfoTreeSync) GetLastProcessedBlock(ctx context.Context) (uint64, error) {
 	return s.processor.GetLastProcessedBlock(ctx)
 }
+
+func (s *L1InfoTreeSync) GetLocalExitRoot(ctx context.Context, networkID uint32, rollupExitRoot common.Hash) (common.Hash, error) {
+	if networkID == 0 {
+		return common.Hash{}, errors.New("network 0 is not a rollup, and it's not part of the rollup exit tree")
+	}
+	return s.processor.rollupExitTree.GetLeaf(ctx, networkID-1, rollupExitRoot)
+}
diff --git a/l1infotreesync/processor.go b/l1infotreesync/processor.go
index 2af812d28..b50bb7453 100644
--- a/l1infotreesync/processor.go
+++ b/l1infotreesync/processor.go
@@ -159,21 +159,21 @@ func newProcessor(ctx context.Context, dbPath string) (*processor, error) {
 }
 
 // GetL1InfoTreeMerkleProof creates a merkle proof for the L1 Info tree
-func (p *processor) GetL1InfoTreeMerkleProof(ctx context.Context, index uint32) ([]ethCommon.Hash, ethCommon.Hash, error) {
+func (p *processor) GetL1InfoTreeMerkleProof(ctx context.Context, index uint32) ([32]ethCommon.Hash, ethCommon.Hash, error) {
 	tx, err := p.db.BeginRo(ctx)
 	if err != nil {
-		return nil, ethCommon.Hash{}, err
+		return tree.EmptyProof, ethCommon.Hash{}, err
 	}
 	defer tx.Rollback()
 
 	root, err := p.l1InfoTree.GetRootByIndex(tx, index)
 	if err != nil {
-		return nil, ethCommon.Hash{}, err
+		return tree.EmptyProof, ethCommon.Hash{}, err
 	}
 
 	proof, err := p.l1InfoTree.GetProof(ctx, index, root)
 	if err != nil {
-		return nil, ethCommon.Hash{}, err
+		return tree.EmptyProof, ethCommon.Hash{}, err
 	}
 
 	// TODO: check if we need to return root or wat
@@ -278,6 +278,7 @@ func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error {
 	if err != nil {
 		return err
 	}
+	defer tx.Rollback()
 	c, err := tx.Cursor(blockTable)
 	if err != nil {
 		return err
@@ -379,7 +380,7 @@ func (p *processor) ProcessBlock(ctx context.Context, b sync.Block) error {
 					Timestamp:       event.UpdateL1InfoTree.Timestamp,
 				}
 				if err := p.storeLeafInfo(tx, leafToStore); err != nil {
-					tx.Rollback()
+					rollback()
 					return err
 				}
 				l1InfoTreeLeavesToAdd = append(l1InfoTreeLeavesToAdd, tree.Leaf{
diff --git a/lastgersync/config.go b/lastgersync/config.go
new file mode 100644
index 000000000..9db63bec6
--- /dev/null
+++ b/lastgersync/config.go
@@ -0,0 +1,27 @@
+package lastgersync
+
+import (
+	"github.com/0xPolygon/cdk/config/types"
+	"github.com/ethereum/go-ethereum/common"
+)
+
+type Config struct {
+	// DBPath path of the DB
+	DBPath string `mapstructure:"DBPath"`
+	// BlockFinality indicates the status of the blocks that will be queried in order to sync
+	BlockFinality string `jsonschema:"enum=LatestBlock, enum=SafeBlock, enum=PendingBlock, enum=FinalizedBlock, enum=EarliestBlock" mapstructure:"BlockFinality"`
+	// InitialBlockNum is the first block that will be queried when starting the synchronization from scratch.
+	// It should be a number equal or bellow the creation of the bridge contract
+	InitialBlockNum uint64 `mapstructure:"InitialBlockNum"`
+	// GlobalExitRootL2Addr is the address of the GER smart contract on L2
+	GlobalExitRootL2Addr common.Address `mapstructure:"GlobalExitRootL2Addr"`
+	// RetryAfterErrorPeriod is the time that will be waited when an unexpected error happens before retry
+	RetryAfterErrorPeriod types.Duration `mapstructure:"RetryAfterErrorPeriod"`
+	// MaxRetryAttemptsAfterError is the maximum number of consecutive attempts that will happen before panicing.
+	// Any number smaller than zero will be considered as unlimited retries
+	MaxRetryAttemptsAfterError int `mapstructure:"MaxRetryAttemptsAfterError"`
+	// WaitForNewBlocksPeriod time that will be waited when the synchronizer has reached the latest block
+	WaitForNewBlocksPeriod types.Duration `mapstructure:"WaitForNewBlocksPeriod"`
+	// DownloadBufferSize buffer of events to be porcessed. When reached will stop downloading events until the processing catches up
+	DownloadBufferSize int `mapstructure:"DownloadBufferSize"`
+}
diff --git a/lastgersync/e2e_test.go b/lastgersync/e2e_test.go
new file mode 100644
index 000000000..59a2e834c
--- /dev/null
+++ b/lastgersync/e2e_test.go
@@ -0,0 +1,71 @@
+package lastgersync_test
+
+import (
+	"context"
+	"fmt"
+	"strconv"
+	"testing"
+	"time"
+
+	"github.com/0xPolygon/cdk/etherman"
+	"github.com/0xPolygon/cdk/lastgersync"
+	"github.com/0xPolygon/cdk/test/helpers"
+	"github.com/ethereum/go-ethereum/accounts/abi/bind"
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/stretchr/testify/require"
+)
+
+func TestE2E(t *testing.T) {
+	ctx := context.Background()
+	env := helpers.SetupAggoracleWithEVMChain(t)
+	dbPathSyncer := t.TempDir()
+	syncer, err := lastgersync.New(
+		ctx,
+		dbPathSyncer,
+		env.ReorgDetector,
+		env.L2Client.Client(),
+		env.GERL2Addr,
+		env.L1InfoTreeSync,
+		0,
+		0,
+		etherman.LatestBlock,
+		time.Millisecond*30,
+		10,
+	)
+	require.NoError(t, err)
+	go syncer.Start(ctx)
+
+	for i := 0; i < 10; i++ {
+		// Update GER on L1
+		_, err := env.GERL1Contract.UpdateExitRoot(env.AuthL1, common.HexToHash(strconv.Itoa(i)))
+		require.NoError(t, err)
+		env.L1Client.Commit()
+		time.Sleep(time.Millisecond * 50)
+		expectedGER, err := env.GERL1Contract.GetLastGlobalExitRoot(&bind.CallOpts{Pending: false})
+		require.NoError(t, err)
+		isInjected, err := env.AggOracleSender.IsGERAlreadyInjected(expectedGER)
+		require.NoError(t, err)
+		require.True(t, isInjected, fmt.Sprintf("iteration %d, GER: %s", i, common.Bytes2Hex(expectedGER[:])))
+
+		// Wait for syncer to catch up
+		syncerUpToDate := false
+		var errMsg string
+		for i := 0; i < 10; i++ {
+			lpb, err := syncer.GetLastProcessedBlock(ctx)
+			require.NoError(t, err)
+			lb, err := env.L2Client.Client().BlockNumber(ctx)
+			require.NoError(t, err)
+			if lpb == lb {
+				syncerUpToDate = true
+				break
+			}
+			time.Sleep(time.Millisecond * 10)
+			errMsg = fmt.Sprintf("last block from client: %d, last block from syncer: %d", lb, lpb)
+		}
+		require.True(t, syncerUpToDate, errMsg)
+
+		_, actualGER, err := syncer.GetFirstGERAfterL1InfoTreeIndex(ctx, uint32(i))
+		require.NoError(t, err)
+		require.Equal(t, common.Hash(expectedGER), actualGER)
+	}
+}
diff --git a/lastgersync/evmdownloader.go b/lastgersync/evmdownloader.go
new file mode 100644
index 000000000..717eb0957
--- /dev/null
+++ b/lastgersync/evmdownloader.go
@@ -0,0 +1,166 @@
+package lastgersync
+
+import (
+	"context"
+	"fmt"
+	"math/big"
+	"time"
+
+	"github.com/0xPolygon/cdk-contracts-tooling/contracts/manual/pessimisticglobalexitroot"
+	"github.com/0xPolygon/cdk/l1infotreesync"
+	"github.com/0xPolygon/cdk/log"
+	"github.com/0xPolygon/cdk/sync"
+	"github.com/0xPolygon/cdk/tree"
+	"github.com/ethereum/go-ethereum"
+	"github.com/ethereum/go-ethereum/accounts/abi/bind"
+	"github.com/ethereum/go-ethereum/common"
+)
+
+type EthClienter interface {
+	ethereum.LogFilterer
+	ethereum.BlockNumberReader
+	ethereum.ChainReader
+	bind.ContractBackend
+}
+
+type downloader struct {
+	*sync.EVMDownloaderImplementation
+	l2Client       EthClienter
+	gerContract    *pessimisticglobalexitroot.Pessimisticglobalexitroot
+	l1InfoTreesync *l1infotreesync.L1InfoTreeSync
+	processor      *processor
+	rh             *sync.RetryHandler
+}
+
+func newDownloader(
+	l2Client EthClienter,
+	globalExitRootL2 common.Address,
+	l1InfoTreesync *l1infotreesync.L1InfoTreeSync,
+	processor *processor,
+	rh *sync.RetryHandler,
+	blockFinality *big.Int,
+	waitForNewBlocksPeriod time.Duration,
+) (*downloader, error) {
+	gerContract, err := pessimisticglobalexitroot.NewPessimisticglobalexitroot(globalExitRootL2, l2Client)
+	if err != nil {
+		return nil, err
+	}
+	return &downloader{
+		EVMDownloaderImplementation: sync.NewEVMDownloaderImplementation(
+			"lastgersync", l2Client, blockFinality, waitForNewBlocksPeriod, nil, nil, nil, rh,
+		),
+		l2Client:       l2Client,
+		gerContract:    gerContract,
+		l1InfoTreesync: l1InfoTreesync,
+		processor:      processor,
+		rh:             rh,
+	}, nil
+}
+
+func (d *downloader) Download(ctx context.Context, fromBlock uint64, downloadedCh chan sync.EVMBlock) {
+	var (
+		attempts  int
+		lastIndex uint32
+		err       error
+	)
+	for {
+		lastIndex, err = d.processor.getLastIndex(ctx)
+		if err == ErrNotFound {
+			lastIndex = 0
+		} else if err != nil {
+			log.Errorf("error getting last indes: %v", err)
+			attempts++
+			d.rh.Handle("getLastIndex", attempts)
+			continue
+		}
+		break
+	}
+	for {
+		select {
+		case <-ctx.Done():
+			log.Debug("closing channel")
+			close(downloadedCh)
+			return
+		default:
+		}
+		lastBlock := d.WaitForNewBlocks(ctx, fromBlock)
+
+		attempts = 0
+		var gers []Event
+		for {
+			gers, err = d.getGERsFromIndex(ctx, lastIndex)
+			if err != nil {
+				log.Errorf("error getting GERs: %v", err)
+				attempts++
+				d.rh.Handle("getGERsFromIndex", attempts)
+				continue
+			}
+			break
+		}
+
+		attempts = 0
+		blockHeader := d.GetBlockHeader(ctx, lastBlock)
+		block := &sync.EVMBlock{
+			EVMBlockHeader: sync.EVMBlockHeader{
+				Num:        blockHeader.Num,
+				Hash:       blockHeader.Hash,
+				ParentHash: blockHeader.ParentHash,
+				Timestamp:  blockHeader.Timestamp,
+			},
+		}
+		d.setGreatestGERInjectedFromList(block, gers)
+
+		downloadedCh <- *block
+		if block.Events != nil {
+			lastIndex = block.Events[0].(Event).L1InfoTreeIndex
+		}
+	}
+}
+
+func (d *downloader) getGERsFromIndex(ctx context.Context, fromL1InfoTreeIndex uint32) ([]Event, error) {
+	lastIndex, _, err := d.l1InfoTreesync.GetLastL1InfoTreeRootAndIndex(ctx)
+	if err == tree.ErrNotFound {
+		return nil, nil
+	}
+	if err != nil {
+		return nil, fmt.Errorf("error calling GetLastL1InfoTreeRootAndIndex: %v", err)
+	}
+
+	gers := []Event{}
+	for i := fromL1InfoTreeIndex; i <= lastIndex; i++ {
+		info, err := d.l1InfoTreesync.GetInfoByIndex(ctx, i)
+		if err != nil {
+			return nil, fmt.Errorf("error calling GetInfoByIndex: %v", err)
+		}
+		gers = append(gers, Event{
+			L1InfoTreeIndex: i,
+			GlobalExitRoot:  info.GlobalExitRoot,
+		})
+	}
+
+	return gers, nil
+}
+
+func (d *downloader) setGreatestGERInjectedFromList(b *sync.EVMBlock, list []Event) {
+	for _, event := range list {
+		var attempts int
+		for {
+			timestamp, err := d.gerContract.GlobalExitRootMap(
+				&bind.CallOpts{Pending: false}, event.GlobalExitRoot,
+			)
+			if err != nil {
+				attempts++
+				log.Errorf(
+					"error calling contract function GlobalExitRootMap with ger %s: %v",
+					event.GlobalExitRoot.Hex(), err,
+				)
+				d.rh.Handle("GlobalExitRootMap", attempts)
+				continue
+			}
+			if timestamp.Cmp(big.NewInt(0)) == 1 {
+				b.Events = []interface{}{event}
+			}
+			break
+		}
+	}
+}
diff --git a/lastgersync/lastgersync.go b/lastgersync/lastgersync.go
new file mode 100644
index 000000000..2d7ef8cb1
--- /dev/null
+++ b/lastgersync/lastgersync.go
@@ -0,0 +1,84 @@
+package lastgersync
+
+import (
+	"context"
+
+	"time"
+
+	"github.com/0xPolygon/cdk/etherman"
+
+	"github.com/0xPolygon/cdk/l1infotreesync"
+	"github.com/0xPolygon/cdk/sync"
+	"github.com/ethereum/go-ethereum/common"
+)
+
+const (
+	reorgDetectorID = "lastGERSync"
+)
+
+type LastGERSync struct {
+	driver    *sync.EVMDriver
+	processor *processor
+}
+
+func New(
+	ctx context.Context,
+	dbPath string,
+	rd sync.ReorgDetector,
+	l2Client EthClienter,
+	globalExitRootL2 common.Address,
+	l1InfoTreesync *l1infotreesync.L1InfoTreeSync,
+	retryAfterErrorPeriod time.Duration,
+	maxRetryAttemptsAfterError int,
+	blockFinality etherman.BlockNumberFinality,
+	waitForNewBlocksPeriod time.Duration,
+	downloadBufferSize int,
+) (*LastGERSync, error) {
+	processor, err := newProcessor(dbPath)
+	if err != nil {
+		return nil, err
+	}
+
+	rh := &sync.RetryHandler{
+		RetryAfterErrorPeriod:      retryAfterErrorPeriod,
+		MaxRetryAttemptsAfterError: maxRetryAttemptsAfterError,
+	}
+	bf, err := blockFinality.ToBlockNum()
+	if err != nil {
+		return nil, err
+	}
+	downloader, err := newDownloader(
+		l2Client,
+		globalExitRootL2,
+		l1InfoTreesync,
+		processor,
+		rh,
+		bf,
+		waitForNewBlocksPeriod,
+	)
+	if err != nil {
+		return nil, err
+	}
+
+	driver, err := sync.NewEVMDriver(rd, processor, downloader, reorgDetectorID, downloadBufferSize, rh)
+	if err != nil {
+		return nil, err
+	}
+
+	return &LastGERSync{
+		driver:    driver,
+		processor: processor,
+	}, nil
+}
+
+func (s *LastGERSync) Start(ctx context.Context) {
+	s.driver.Sync(ctx)
+}
+
+func (s *LastGERSync) GetFirstGERAfterL1InfoTreeIndex(ctx context.Context, atOrAfterL1InfoTreeIndex uint32) (injectedL1InfoTreeIndex uint32, ger common.Hash, err error) {
+	return s.processor.GetFirstGERAfterL1InfoTreeIndex(ctx, atOrAfterL1InfoTreeIndex)
+}
+
+func (s *LastGERSync) GetLastProcessedBlock(ctx context.Context) (uint64, error) {
+	return s.processor.GetLastProcessedBlock(ctx)
+}
diff --git a/lastgersync/processor.go b/lastgersync/processor.go
new file mode 100644
index 000000000..88e89be99
--- /dev/null
+++ b/lastgersync/processor.go
@@ -0,0 +1,250 @@
+package lastgersync
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"math"
+
+	"github.com/0xPolygon/cdk/common"
+	"github.com/0xPolygon/cdk/sync"
+	ethCommon "github.com/ethereum/go-ethereum/common"
+	"github.com/ledgerwatch/erigon-lib/kv"
+	"github.com/ledgerwatch/erigon-lib/kv/mdbx"
+)
+
+const (
+	lastProcessedTable = "lastgersync-lastProcessed"
+	gerTable           = "lastgersync-ger"
+	blockTable         = "lastgersync-block"
+)
+
+var (
+	lastProcessedKey = []byte("lp")
+	ErrNotFound      = errors.New("not found")
+)
+
+type Event struct {
+	GlobalExitRoot  ethCommon.Hash
+	L1InfoTreeIndex uint32
+}
+
+type blockWithGERs struct {
+	// inclusive
+	FirstIndex uint32
+	// not inclusive
+	LastIndex uint32
+}
+
+func (b *blockWithGERs) MarshalBinary() ([]byte, error) {
+	return append(common.Uint32ToBytes(b.FirstIndex), common.Uint32ToBytes(b.LastIndex)...), nil
+}
+
+func (b *blockWithGERs) UnmarshalBinary(data []byte) error {
+	if len(data) != 8 {
+		return fmt.Errorf("expected len %d, actual len %d", 8, len(data))
+	}
+	b.FirstIndex = common.BytesToUint32(data[:4])
+	b.LastIndex = common.BytesToUint32(data[4:])
+	return nil
+}
+
+type processor struct {
+	db kv.RwDB
+}
+
+func newProcessor(dbPath string) (*processor, error) {
+	tableCfgFunc := func(defaultBuckets kv.TableCfg) kv.TableCfg {
+		cfg := kv.TableCfg{
+			lastProcessedTable: {},
+			gerTable:           {},
+			blockTable:         {},
+		}
+		return cfg
+	}
+	db, err := mdbx.NewMDBX(nil).
+		Path(dbPath).
+		WithTableCfg(tableCfgFunc).
+		Open()
+	if err != nil {
+		return nil, err
+	}
+	return &processor{
+		db: db,
+	}, nil
+}
+
+// GetLastProcessedBlockAndL1InfoTreeIndex returns the last processed block oby the processor, including blocks
+// that don't have events
+func (p *processor) GetLastProcessedBlock(ctx context.Context) (uint64, error) {
+	tx, err := p.db.BeginRo(ctx)
+	if err != nil {
+		return 0, err
+	}
+	defer tx.Rollback()
+	return p.getLastProcessedBlockWithTx(tx)
+}
+
+func (p *processor) getLastIndex(ctx context.Context) (uint32, error) {
+	tx, err := p.db.BeginRo(ctx)
+	if err != nil {
+		return 0, err
+	}
+	defer tx.Rollback()
+
+	return p.getLastIndexWithTx(tx)
+}
+
+func (p *processor) getLastIndexWithTx(tx kv.Tx) (uint32, error) {
+	iter, err := tx.RangeDescend(gerTable, common.Uint32ToBytes(math.MaxUint32), common.Uint32ToBytes(0), 1)
+	if err != nil {
+		return 0, err
+	}
+	k, _, err := iter.Next()
+	if err != nil {
+		return 0, err
+	}
+	if k == nil {
+		return 0, ErrNotFound
+	}
+	return common.BytesToUint32(k), nil
+}
+
+func (p *processor) getLastProcessedBlockWithTx(tx kv.Tx) (uint64, error) {
+	if lastProcessedBytes, err := tx.GetOne(lastProcessedTable, lastProcessedKey); err != nil {
+		return 0, err
+	} else if lastProcessedBytes == nil {
+		return 0, nil
+	} else {
+		return common.BytesToUint64(lastProcessedBytes), nil
+	}
+}
+
+func (p *processor) updateLastProcessedBlockWithTx(tx kv.RwTx, blockNum uint64) error {
+	return tx.Put(lastProcessedTable, lastProcessedKey, common.Uint64ToBytes(blockNum))
+}
+
+func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error {
+	tx, err := p.db.BeginRw(ctx)
+	if err != nil {
+		return err
+	}
+
+	lenEvents := len(block.Events)
+	var lastIndex int64
+	if lenEvents > 0 {
+		li, err := p.getLastIndexWithTx(tx)
+		if err == ErrNotFound {
+			lastIndex = -1
+		} else if err != nil {
+			tx.Rollback()
+			return err
+		} else {
+			lastIndex = int64(li)
+		}
+	}
+
+	for _, e := range block.Events {
+		event := e.(Event)
+		if int64(event.L1InfoTreeIndex) < lastIndex {
+			continue
+		}
+		lastIndex = int64(event.L1InfoTreeIndex)
+		if err := tx.Put(
+			gerTable,
+			common.Uint32ToBytes(event.L1InfoTreeIndex),
+			event.GlobalExitRoot[:],
+		); err != nil {
+			tx.Rollback()
+			return err
+		}
+	}
+
+	if lenEvents > 0 {
+		bwg := blockWithGERs{
+			FirstIndex: block.Events[0].(Event).L1InfoTreeIndex,
+			LastIndex:  block.Events[lenEvents-1].(Event).L1InfoTreeIndex + 1,
+		}
+		data, err := bwg.MarshalBinary()
+		if err != nil {
+			tx.Rollback()
+			return err
+		}
+		if err = tx.Put(blockTable, common.Uint64ToBytes(block.Num), data); err != nil {
+			tx.Rollback()
+			return err
+		}
+	}
+
+	if err := p.updateLastProcessedBlockWithTx(tx, block.Num); err != nil {
+		tx.Rollback()
+		return err
+	}
+
+	return tx.Commit()
+}
+
+func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error {
+	tx, err := p.db.BeginRw(ctx)
+	if err != nil {
+		return err
+	}
+
+	iter, err := tx.Range(blockTable, common.Uint64ToBytes(firstReorgedBlock), nil)
+	if err != nil {
+		tx.Rollback()
+		return err
+	}
+	for bNumBytes, bWithGERBytes, err := iter.Next(); bNumBytes != nil; bNumBytes, bWithGERBytes, err = iter.Next() {
+		if err != nil {
+			tx.Rollback()
+			return err
+		}
+		if err := tx.Delete(blockTable, bNumBytes); err != nil {
+			tx.Rollback()
+			return err
+		}
+
+		bWithGER := &blockWithGERs{}
+		if err := bWithGER.UnmarshalBinary(bWithGERBytes); err != nil {
+			tx.Rollback()
+			return err
+		}
+		for i := bWithGER.FirstIndex; i < bWithGER.LastIndex; i++ {
+			if err := tx.Delete(gerTable, common.Uint32ToBytes(i)); err != nil {
+				tx.Rollback()
+				return err
+			}
+		}
+	}
+
+	if err := p.updateLastProcessedBlockWithTx(tx, firstReorgedBlock-1); err != nil {
+		tx.Rollback()
+		return err
+	}
+
+	return tx.Commit()
+}
+
+// GetFirstGERAfterL1InfoTreeIndex returns the first GER injected on the chain that is related to l1InfoTreeIndex
+// or greater
+func (p *processor) GetFirstGERAfterL1InfoTreeIndex(ctx context.Context, l1InfoTreeIndex uint32) (uint32, ethCommon.Hash, error) {
+	tx, err := p.db.BeginRo(ctx)
+	if err != nil {
+		return 0, ethCommon.Hash{}, err
+	}
+	defer tx.Rollback()
+
+	iter, err := tx.Range(gerTable, common.Uint32ToBytes(l1InfoTreeIndex), nil)
+	if err != nil {
+		return 0, ethCommon.Hash{}, err
+	}
+	l1InfoIndexBytes, ger, err := iter.Next()
+	if err != nil {
+		return 0, ethCommon.Hash{}, err
+	}
+	if l1InfoIndexBytes == nil {
+		return 0, ethCommon.Hash{}, ErrNotFound
+	}
+	return common.BytesToUint32(l1InfoIndexBytes), ethCommon.BytesToHash(ger), nil
+}
diff --git a/reorgdetector/reorgdetector.go b/reorgdetector/reorgdetector.go
index c0b0caa2e..9eb631aaa 100644
--- a/reorgdetector/reorgdetector.go
+++ b/reorgdetector/reorgdetector.go
@@ -218,34 +218,36 @@ func (r *ReorgDetector) Subscribe(id string) (*Subscription, error) {
 }
 
 func (r *ReorgDetector) AddBlockToTrack(ctx context.Context, id string, blockNum uint64, blockHash common.Hash) error {
-	r.subscriptionsLock.RLock()
-	if sub, ok := r.subscriptions[id]; !ok {
-		r.subscriptionsLock.RUnlock()
-		return ErrNotSubscribed
-	} else {
-		// In case there are reorgs being processed, wait
-		// Note that this also makes any addition to trackedBlocks[id] safe
-		sub.pendingReorgsToBeProcessed.Wait()
-	}
-
-	r.subscriptionsLock.RUnlock()
-
-	if actualHash, ok := r.getUnfinalisedBlocksMap()[blockNum]; ok {
-		if actualHash.Hash == blockHash {
-			return r.saveTrackedBlock(ctx, id, block{Num: blockNum, Hash: blockHash})
-		} else {
-			return ErrInvalidBlockHash
-		}
-	} else {
-		// ReorgDetector has not added the requested block yet,
-		// so we add it to the unfinalised blocks and then to the subscriber blocks as well
-		block := block{Num: blockNum, Hash: blockHash}
-		if err := r.saveTrackedBlock(ctx, unfinalisedBlocksID, block); err != nil {
-			return err
-		}
-
-		return r.saveTrackedBlock(ctx, id, block)
-	}
+	return nil
+	// COMENTING THE CODE AS I'M SUSPECTING A DEATHLOCK
+	// r.subscriptionsLock.RLock()
+	// if sub, ok := r.subscriptions[id]; !ok {
+	// 	r.subscriptionsLock.RUnlock()
+	// 	return ErrNotSubscribed
+	// } else {
+	// 	// In case there are reorgs being processed, wait
+	// 	// Note that this also makes any addition to trackedBlocks[id] safe
+	// 	sub.pendingReorgsToBeProcessed.Wait()
+	// }
+
+	// r.subscriptionsLock.RUnlock()
+
+	// if actualHash, ok := r.getUnfinalisedBlocksMap()[blockNum]; ok {
+	// 	if actualHash.Hash == blockHash {
+	// 		return r.saveTrackedBlock(ctx, id, block{Num: blockNum, Hash: blockHash})
+	// 	} else {
+	// 		return ErrInvalidBlockHash
+	// 	}
+	// } else {
+	// 	// ReorgDetector has not added the requested block yet,
+	// 	// so we add it to the unfinalised blocks and then to the subscriber blocks as well
+	// 	block := block{Num: blockNum, Hash: blockHash}
+	// 	if err := r.saveTrackedBlock(ctx, unfinalisedBlocksID, block); err != nil {
+	// 		return err
+	// 	}
+
+	// 	return r.saveTrackedBlock(ctx, id, block)
+	// }
 }
 
 func (r *ReorgDetector) cleanStoredSubsBeforeStart(ctx context.Context, latestFinalisedBlock uint64) error {
diff --git a/reorgdetector/reorgdetector_test.go b/reorgdetector/reorgdetector_test.go
index 38bdd3c51..275f89a80 100644
--- a/reorgdetector/reorgdetector_test.go
+++ b/reorgdetector/reorgdetector_test.go
@@ -1,5 +1,6 @@
 package reorgdetector
 
+/*
 import (
 	"context"
 	"encoding/json"
@@ -19,6 +20,7 @@ import (
 	"github.com/stretchr/testify/require"
 )
 
+
 const testSubscriber = "testSubscriber"
 
 // newTestDB creates new instance of db used by tests.
@@ -462,3 +464,4 @@ func insertTestData(t *testing.T, ctx context.Context, db kv.RwDB, blocks []*typ
 
 	require.NoError(t, err)
 }
+*/
diff --git a/rpc/bridge.go b/rpc/bridge.go
new file mode 100644
index 000000000..0b550e72c
--- /dev/null
+++ b/rpc/bridge.go
@@ -0,0 +1,228 @@
+package rpc
+
+import (
+	"context"
+	"fmt"
+	"math/big"
+	"time"
+
+	"github.com/0xPolygon/cdk-rpc/rpc"
+	"github.com/0xPolygon/cdk/bridgesync"
+	"github.com/0xPolygon/cdk/claimsponsor"
+	"github.com/0xPolygon/cdk/l1bridge2infoindexsync"
+	"github.com/0xPolygon/cdk/l1infotreesync"
+	"github.com/0xPolygon/cdk/lastgersync"
+	"github.com/0xPolygon/cdk/log"
+	"github.com/ethereum/go-ethereum/common"
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/metric"
+)
+
+const (
+	// BRIDGE is the namespace of the bridge service
+	BRIDGE    = "bridge"
+	meterName = "github.com/0xPolygon/cdk/rpc"
+)
+
+// BridgeEndpoints contains implementations for the "bridge" RPC endpoints
+type BridgeEndpoints struct {
+	meter          metric.Meter
+	readTimeout    time.Duration
+	writeTimeout   time.Duration
+	networkID      uint32
+	sponsor        *claimsponsor.ClaimSponsor
+	l1InfoTree     *l1infotreesync.L1InfoTreeSync
+	l1Bridge2Index *l1bridge2infoindexsync.L1Bridge2InfoIndexSync
+	injectedGERs   *lastgersync.LastGERSync
+	bridgeL1       *bridgesync.BridgeSync
+	bridgeL2       *bridgesync.BridgeSync
+}
+
+// NewBridgeEndpoints returns InteropEndpoints
+func NewBridgeEndpoints(
+	writeTimeout time.Duration,
+	readTimeout time.Duration,
+	networkID uint32,
+	sponsor *claimsponsor.ClaimSponsor,
+	l1InfoTree *l1infotreesync.L1InfoTreeSync,
+	l1Bridge2Index *l1bridge2infoindexsync.L1Bridge2InfoIndexSync,
+	injectedGERs *lastgersync.LastGERSync,
+	bridgeL1 *bridgesync.BridgeSync,
+	bridgeL2 *bridgesync.BridgeSync,
+) *BridgeEndpoints {
+	meter := otel.Meter(meterName)
+	return &BridgeEndpoints{
+		meter:          meter,
+		readTimeout:    readTimeout,
+		writeTimeout:   writeTimeout,
+		networkID:      networkID,
+		sponsor:        sponsor,
+		l1InfoTree:     l1InfoTree,
+		l1Bridge2Index: l1Bridge2Index,
+		injectedGERs:   injectedGERs,
+		bridgeL1:       bridgeL1,
+		bridgeL2:       bridgeL2,
+	}
+}
+
+// L1InfoTreeIndexForBridge returns the first L1 Info Tree index in which the bridge was included.
+// networkID represents the origin network.
+// This call needs to be done to a client of the same network were the bridge tx was sent
+func (b *BridgeEndpoints) L1InfoTreeIndexForBridge(networkID uint32, depositCount uint32) (interface{}, rpc.Error) {
+	ctx, cancel := context.WithTimeout(context.Background(), b.readTimeout)
+	defer cancel()
+
+	c, merr := b.meter.Int64Counter("l1_info_tree_index_for_bridge")
+	if merr != nil {
+		log.Warnf("failed to create l1_info_tree_index_for_bridge counter: %s", merr)
+	}
+	c.Add(ctx, 1)
+
+	if networkID == 0 {
+		l1InfoTreeIndex, err := b.l1Bridge2Index.GetL1InfoTreeIndexByDepositCount(ctx, depositCount)
+		// TODO: special treatment of the error when not found,
+		// as it's expected that it will take some time for the L1 Info tree to be updated
+		if err != nil {
+			return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get l1InfoTreeIndex, error: %s", err))
+		}
+		return l1InfoTreeIndex, nil
+	}
+	if networkID == b.networkID {
+		// TODO: special treatment of the error when not found,
+		// as it's expected that it will take some time for the L1 Info tree to be updated
+		return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("TODO: batchsync / certificatesync missing implementation"))
+	}
+	return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("this client does not support network %d", networkID))
+}
+
+// InjectedInfoAfterIndex return the first GER injected onto the network that is linked
+// to the given index or greater. This call is usefull to understand when a bridge is ready to be claimed
+// on its destination network
+func (b *BridgeEndpoints) InjectedInfoAfterIndex(networkID uint32, l1InfoTreeIndex uint32) (interface{}, rpc.Error) {
+	ctx, cancel := context.WithTimeout(context.Background(), b.readTimeout)
+	defer cancel()
+
+	c, merr := b.meter.Int64Counter("injected_info_after_index")
+	if merr != nil {
+		log.Warnf("failed to create injected_info_after_index counter: %s", merr)
+	}
+	c.Add(ctx, 1)
+
+	if networkID == 0 {
+		info, err := b.l1InfoTree.GetInfoByIndex(ctx, l1InfoTreeIndex)
+		if err != nil {
+			return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get global exit root, error: %s", err))
+		}
+		return info, nil
+	}
+	if networkID == b.networkID {
+		injectedL1InfoTreeIndex, _, err := b.injectedGERs.GetFirstGERAfterL1InfoTreeIndex(ctx, l1InfoTreeIndex)
+		if err != nil {
+			return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get global exit root, error: %s", err))
+		}
+		info, err := b.l1InfoTree.GetInfoByIndex(ctx, injectedL1InfoTreeIndex)
+		if err != nil {
+			return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get global exit root, error: %s", err))
+		}
+		return info, nil
+	}
+	return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("this client does not support network %d", networkID))
+}
+
+type ClaimProof struct {
+	ProofLocalExitRoot  [32]common.Hash
+	ProofRollupExitRoot [32]common.Hash
+	L1InfoTreeLeaf      l1infotreesync.L1InfoTreeLeaf
+}
+
+// ClaimProof returns the proofs needed to claim a bridge. NetworkID and depositCount refere to the bridge origin
+// while globalExitRoot should be already injected on the destination network.
+// This call needs to be done to a client of the same network were the bridge tx was sent
+func (b *BridgeEndpoints) ClaimProof(networkID uint32, depositCount uint32, l1InfoTreeIndex uint32) (interface{}, rpc.Error) {
+	ctx, cancel := context.WithTimeout(context.Background(), b.readTimeout)
+	defer cancel()
+
+	c, merr := b.meter.Int64Counter("claim_proof")
+	if merr != nil {
+		log.Warnf("failed to create claim_proof counter: %s", merr)
+	}
+	c.Add(ctx, 1)
+
+	info, err := b.l1InfoTree.GetInfoByIndex(ctx, l1InfoTreeIndex)
+	if err != nil {
+		return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get info from the tree: %s", err))
+	}
+	proofRollupExitRoot, err := b.l1InfoTree.GetRollupExitTreeMerkleProof(ctx, networkID, info.GlobalExitRoot)
+	if err != nil {
+		return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get rollup exit proof, error: %s", err))
+	}
+	var proofLocalExitRoot [32]common.Hash
+	if networkID == 0 {
+		proofLocalExitRoot, err = b.bridgeL1.GetProof(ctx, depositCount, info.MainnetExitRoot)
+		if err != nil {
+			return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get local exit proof, error: %s", err))
+		}
+	} else if networkID == b.networkID {
+		localExitRoot, err := b.l1InfoTree.GetLocalExitRoot(ctx, networkID, info.RollupExitRoot)
+		if err != nil {
+			return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get local exit root from rollup exit tree, error: %s", err))
+		}
+		proofLocalExitRoot, err = b.bridgeL2.GetProof(ctx, depositCount, localExitRoot)
+		if err != nil {
+			return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get local exit proof, error: %s", err))
+		}
+	} else {
+		return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("this client does not support network %d", networkID))
+	}
+	return ClaimProof{
+		ProofLocalExitRoot:  proofLocalExitRoot,
+		ProofRollupExitRoot: proofRollupExitRoot,
+		L1InfoTreeLeaf:      *info,
+	}, nil
+}
+
+// SponsorClaim sends a claim tx on behalf of the user.
+// This call needs to be done to a client of the same network were the claim is going to be sent (bridge destination)
+func (b *BridgeEndpoints) SponsorClaim(claim claimsponsor.Claim) (interface{}, rpc.Error) {
+	ctx, cancel := context.WithTimeout(context.Background(), b.writeTimeout)
+	defer cancel()
+
+	c, merr := b.meter.Int64Counter("sponsor_claim")
+	if merr != nil {
+		log.Warnf("failed to create sponsor_claim counter: %s", merr)
+	}
+	c.Add(ctx, 1)
+
+	if b.sponsor == nil {
+		return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("this client does not support claim sponsoring"))
+	}
+	if claim.DestinationNetwork != b.networkID {
+		return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("this client only sponsors claims for network %d", b.networkID))
+	}
+	if err := b.sponsor.AddClaimToQueue(ctx, &claim); err != nil {
+		return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("error adding claim to the queue %s", err))
+	}
+	return nil, nil
+}
+
+// GetSponsoredClaimStatus returns the status of a claim that has been previously requested to be sponsored.
+// This call needs to be done to the same client were it was requested to be sponsored
+func (b *BridgeEndpoints) GetSponsoredClaimStatus(globalIndex *big.Int) (interface{}, rpc.Error) {
+	ctx, cancel := context.WithTimeout(context.Background(), b.readTimeout)
+	defer cancel()
+
+	c, merr := b.meter.Int64Counter("get_sponsored_claim_status")
+	if merr != nil {
+		log.Warnf("failed to create get_sponsored_claim_status counter: %s", merr)
+	}
+	c.Add(ctx, 1)
+
+	if b.sponsor == nil {
+		return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("this client does not support claim sponsoring"))
+	}
+	claim, err := b.sponsor.GetClaim(ctx, globalIndex)
+	if err != nil {
+		return "0x0", rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get claim status, error: %s", err))
+	}
+	return claim.Status, nil
+}
diff --git a/rpc/bridge_client.go b/rpc/bridge_client.go
new file mode 100644
index 000000000..0063e6604
--- /dev/null
+++ b/rpc/bridge_client.go
@@ -0,0 +1,91 @@
+package rpc
+
+import (
+	"encoding/json"
+	"fmt"
+	"math/big"
+
+	"github.com/0xPolygon/cdk-rpc/rpc"
+	"github.com/0xPolygon/cdk/claimsponsor"
+	"github.com/0xPolygon/cdk/l1infotreesync"
+)
+
+type BridgeClientInterface interface {
+	L1InfoTreeIndexForBridge(networkID uint32, depositCount uint32) (uint32, error)
+	InjectedInfoAfterIndex(networkID uint32, l1InfoTreeIndex uint32) (*l1infotreesync.L1InfoTreeLeaf, error)
+	ClaimProof(networkID uint32, depositCount uint32, l1InfoTreeIndex uint32) (*ClaimProof, error)
+	SponsorClaim(claim claimsponsor.Claim) error
+	GetSponsoredClaimStatus(globalIndex *big.Int) (claimsponsor.ClaimStatus, error)
+}
+
+// L1InfoTreeIndexForBridge returns the first L1 Info Tree index in which the bridge was included.
+// networkID represents the origin network.
+// This call needs to be done to a client of the same network were the bridge tx was sent
+func (c *Client) L1InfoTreeIndexForBridge(networkID uint32, depositCount uint32) (uint32, error) {
+	response, err := rpc.JSONRPCCall(c.url, "bridge_l1InfoTreeIndexForBridge", networkID, depositCount)
+	if err != nil {
+		return 0, err
+	}
+	if response.Error != nil {
+		return 0, fmt.Errorf("%v %v", response.Error.Code, response.Error.Message)
+	}
+	var result uint32
+	return result, json.Unmarshal(response.Result, &result)
+}
+
+// InjectedInfoAfterIndex return the first GER injected onto the network that is linked
+// to the given index or greater. This call is usefull to understand when a bridge is ready to be claimed
+// on its destination network
+func (c *Client) InjectedInfoAfterIndex(networkID uint32, l1InfoTreeIndex uint32) (*l1infotreesync.L1InfoTreeLeaf, error) {
+	response, err := rpc.JSONRPCCall(c.url, "bridge_injectedInfoAfterIndex", networkID, l1InfoTreeIndex)
+	if err != nil {
+		return nil, err
+	}
+	if response.Error != nil {
+		return nil, fmt.Errorf("%v %v", response.Error.Code, response.Error.Message)
+	}
+	var result l1infotreesync.L1InfoTreeLeaf
+	return &result, json.Unmarshal(response.Result, &result)
+}
+
+// ClaimProof returns the proofs needed to claim a bridge. NetworkID and depositCount refere to the bridge origin
+// while globalExitRoot should be already injected on the destination network.
+// This call needs to be done to a client of the same network were the bridge tx was sent
+func (c *Client) ClaimProof(networkID uint32, depositCount uint32, l1InfoTreeIndex uint32) (*ClaimProof, error) {
+	response, err := rpc.JSONRPCCall(c.url, "bridge_claimProof", networkID, depositCount, l1InfoTreeIndex)
+	if err != nil {
+		return nil, err
+	}
+	if response.Error != nil {
+		return nil, fmt.Errorf("%v %v", response.Error.Code, response.Error.Message)
+	}
+	var result ClaimProof
+	return &result, json.Unmarshal(response.Result, &result)
+}
+
+// SponsorClaim sends a claim tx on behalf of the user.
+// This call needs to be done to a client of the same network were the claim is going to be sent (bridge destination)
+func (c *Client) SponsorClaim(claim claimsponsor.Claim) error {
+	response, err := rpc.JSONRPCCall(c.url, "bridge_sponsorClaim", claim)
+	if err != nil {
+		return err
+	}
+	if response.Error != nil {
+		return fmt.Errorf("%v %v", response.Error.Code, response.Error.Message)
+	}
+	return nil
+}
+
+// GetSponsoredClaimStatus returns the status of a claim that has been previously requested to be sponsored.
+// This call needs to be done to the same client were it was requested to be sponsored
+func (c *Client) GetSponsoredClaimStatus(globalIndex *big.Int) (claimsponsor.ClaimStatus, error) {
+	response, err := rpc.JSONRPCCall(c.url, "bridge_getSponsoredClaimStatus", globalIndex)
+	if err != nil {
+		return "", err
+	}
+	if response.Error != nil {
+		return "", fmt.Errorf("%v %v", response.Error.Code, response.Error.Message)
+	}
+	var result claimsponsor.ClaimStatus
+	return result, json.Unmarshal(response.Result, &result)
+}
diff --git a/rpc/client.go b/rpc/client.go
new file mode 100644
index 000000000..b48fca519
--- /dev/null
+++ b/rpc/client.go
@@ -0,0 +1,31 @@
+package rpc
+
+// ClientInterface is the interface that defines the implementation of all the endpoints
+type ClientInterface interface {
+	BridgeClientInterface
+}
+
+// ClientFactoryInterface interface for the client factory
+type ClientFactoryInterface interface {
+	NewClient(url string) ClientInterface
+}
+
+// ClientFactory is the implementation of the data committee client factory
+type ClientFactory struct{}
+
+// NewClient returns an implementation of the data committee node client
+func (f *ClientFactory) NewClient(url string) ClientInterface {
+	return NewClient(url)
+}
+
+// Client wraps all the available endpoints of the data abailability committee node server
+type Client struct {
+	url string
+}
+
+// NewClient returns a client ready to be used
+func NewClient(url string) *Client {
+	return &Client{
+		url: url,
+	}
+}
diff --git a/sync/evmdownloader.go b/sync/evmdownloader.go
index 0cbf1f49d..31613cf3b 100644
--- a/sync/evmdownloader.go
+++ b/sync/evmdownloader.go
@@ -20,21 +20,23 @@ type EthClienter interface {
 	bind.ContractBackend
 }
 
-type evmDownloaderInterface interface {
-	waitForNewBlocks(ctx context.Context, lastBlockSeen uint64) (newLastBlock uint64)
-	getEventsByBlockRange(ctx context.Context, fromBlock, toBlock uint64) []EVMBlock
-	getLogs(ctx context.Context, fromBlock, toBlock uint64) []types.Log
-	getBlockHeader(ctx context.Context, blockNum uint64) EVMBlockHeader
+type EVMDownloaderInterface interface {
+	WaitForNewBlocks(ctx context.Context, lastBlockSeen uint64) (newLastBlock uint64)
+	GetEventsByBlockRange(ctx context.Context, fromBlock, toBlock uint64) []EVMBlock
+	GetLogs(ctx context.Context, fromBlock, toBlock uint64) []types.Log
+	GetBlockHeader(ctx context.Context, blockNum uint64) EVMBlockHeader
 }
 
 type LogAppenderMap map[common.Hash]func(b *EVMBlock, l types.Log) error
 
 type EVMDownloader struct {
 	syncBlockChunkSize uint64
-	evmDownloaderInterface
+	EVMDownloaderInterface
+	log *log.Logger
 }
 
 func NewEVMDownloader(
+	syncerID string,
 	ethClient EthClienter,
 	syncBlockChunkSize uint64,
 	blockFinalityType etherman.BlockNumberFinality,
@@ -43,6 +45,7 @@ func NewEVMDownloader(
 	adressessToQuery []common.Address,
 	rh *RetryHandler,
 ) (*EVMDownloader, error) {
+	logger := log.WithFields("syncer", syncerID)
 	finality, err := blockFinalityType.ToBlockNum()
 	if err != nil {
 		return nil, err
@@ -53,7 +56,8 @@ func NewEVMDownloader(
 	}
 	return &EVMDownloader{
 		syncBlockChunkSize: syncBlockChunkSize,
-		evmDownloaderInterface: &downloaderImplementation{
+		log:                logger,
+		EVMDownloaderInterface: &EVMDownloaderImplementation{
 			ethClient:              ethClient,
 			blockFinality:          finality,
 			waitForNewBlocksPeriod: waitForNewBlocksPeriod,
@@ -61,16 +65,17 @@ func NewEVMDownloader(
 			topicsToQuery:          topicsToQuery,
 			adressessToQuery:       adressessToQuery,
 			rh:                     rh,
+			log:                    logger,
 		},
 	}, nil
 }
 
-func (d *EVMDownloader) download(ctx context.Context, fromBlock uint64, downloadedCh chan EVMBlock) {
-	lastBlock := d.waitForNewBlocks(ctx, 0)
+func (d *EVMDownloader) Download(ctx context.Context, fromBlock uint64, downloadedCh chan EVMBlock) {
+	lastBlock := d.WaitForNewBlocks(ctx, 0)
 	for {
 		select {
 		case <-ctx.Done():
-			log.Debug("closing channel")
+			d.log.Debug("closing channel")
 			close(downloadedCh)
 			return
 		default:
@@ -80,28 +85,31 @@ func (d *EVMDownloader) download(ctx context.Context, fromBlock uint64, download
 			toBlock = lastBlock
 		}
 		if fromBlock > toBlock {
-			log.Debug("waiting for new blocks, last block ", toBlock)
-			lastBlock = d.waitForNewBlocks(ctx, toBlock)
+			d.log.Debugf(
+				"waiting for new blocks, last block processed %d, last block seen on L1 %d",
+				fromBlock-1, lastBlock,
+			)
+			lastBlock = d.WaitForNewBlocks(ctx, fromBlock-1)
 			continue
 		}
-		log.Debugf("getting events from blocks %d to  %d", fromBlock, toBlock)
-		blocks := d.getEventsByBlockRange(ctx, fromBlock, toBlock)
+		d.log.Debugf("getting events from blocks %d to  %d", fromBlock, toBlock)
+		blocks := d.GetEventsByBlockRange(ctx, fromBlock, toBlock)
 		for _, b := range blocks {
-			log.Debugf("sending block %d to the driver (with events)", b.Num)
+			d.log.Debugf("sending block %d to the driver (with events)", b.Num)
 			downloadedCh <- b
 		}
 		if len(blocks) == 0 || blocks[len(blocks)-1].Num < toBlock {
 			// Indicate the last downloaded block if there are not events on it
-			log.Debugf("sending block %d to the driver (without events)", toBlock)
+			d.log.Debugf("sending block %d to the driver (without events)", toBlock)
 			downloadedCh <- EVMBlock{
-				EVMBlockHeader: d.getBlockHeader(ctx, toBlock),
+				EVMBlockHeader: d.GetBlockHeader(ctx, toBlock),
 			}
 		}
 		fromBlock = toBlock + 1
 	}
 }
 
-type downloaderImplementation struct {
+type EVMDownloaderImplementation struct {
 	ethClient              EthClienter
 	blockFinality          *big.Int
 	waitForNewBlocksPeriod time.Duration
@@ -109,22 +117,46 @@ type downloaderImplementation struct {
 	topicsToQuery          []common.Hash
 	adressessToQuery       []common.Address
 	rh                     *RetryHandler
+	log                    *log.Logger
 }
 
-func (d *downloaderImplementation) waitForNewBlocks(ctx context.Context, lastBlockSeen uint64) (newLastBlock uint64) {
+func NewEVMDownloaderImplementation(
+	syncerID string,
+	ethClient EthClienter,
+	blockFinality *big.Int,
+	waitForNewBlocksPeriod time.Duration,
+	appender LogAppenderMap,
+	topicsToQuery []common.Hash,
+	adressessToQuery []common.Address,
+	rh *RetryHandler,
+) *EVMDownloaderImplementation {
+	logger := log.WithFields("syncer", syncerID)
+	return &EVMDownloaderImplementation{
+		ethClient:              ethClient,
+		blockFinality:          blockFinality,
+		waitForNewBlocksPeriod: waitForNewBlocksPeriod,
+		appender:               appender,
+		topicsToQuery:          topicsToQuery,
+		adressessToQuery:       adressessToQuery,
+		rh:                     rh,
+		log:                    logger,
+	}
+}
+
+func (d *EVMDownloaderImplementation) WaitForNewBlocks(ctx context.Context, lastBlockSeen uint64) (newLastBlock uint64) {
 	attempts := 0
 	ticker := time.NewTicker(d.waitForNewBlocksPeriod)
 	defer ticker.Stop()
 	for {
 		select {
 		case <-ctx.Done():
-			log.Info("context cancelled")
+			d.log.Info("context cancelled")
 			return lastBlockSeen
 		case <-ticker.C:
 			header, err := d.ethClient.HeaderByNumber(ctx, d.blockFinality)
 			if err != nil {
 				attempts++
-				log.Error("error getting last block num from eth client: ", err)
+				d.log.Error("error getting last block num from eth client: ", err)
 				d.rh.Handle("waitForNewBlocks", attempts)
 				continue
 			}
@@ -135,17 +167,17 @@ func (d *downloaderImplementation) waitForNewBlocks(ctx context.Context, lastBlo
 	}
 }
 
-func (d *downloaderImplementation) getEventsByBlockRange(ctx context.Context, fromBlock, toBlock uint64) []EVMBlock {
+func (d *EVMDownloaderImplementation) GetEventsByBlockRange(ctx context.Context, fromBlock, toBlock uint64) []EVMBlock {
 	blocks := []EVMBlock{}
-	logs := d.getLogs(ctx, fromBlock, toBlock)
+	logs := d.GetLogs(ctx, fromBlock, toBlock)
 	for _, l := range logs {
 		if len(blocks) == 0 || blocks[len(blocks)-1].Num < l.BlockNumber {
-			b := d.getBlockHeader(ctx, l.BlockNumber)
+			b := d.GetBlockHeader(ctx, l.BlockNumber)
 			if b.Hash != l.BlockHash {
-				log.Infof(
-					"there has been a block hash change between the event query and the block query for block %d: %s vs %s. Retrying.",
+				d.log.Infof(
+					"there has been a block hash change between the event query and the block query for block %d: %s vs %s. Retrtying.",
 					l.BlockNumber, b.Hash, l.BlockHash)
-				return d.getEventsByBlockRange(ctx, fromBlock, toBlock)
+				return d.GetEventsByBlockRange(ctx, fromBlock, toBlock)
 			}
 			blocks = append(blocks, EVMBlock{
 				EVMBlockHeader: EVMBlockHeader{
@@ -163,7 +195,7 @@ func (d *downloaderImplementation) getEventsByBlockRange(ctx context.Context, fr
 			err := d.appender[l.Topics[0]](&blocks[len(blocks)-1], l)
 			if err != nil {
 				attempts++
-				log.Error("error trying to append log: ", err)
+				d.log.Error("error trying to append log: ", err)
 				d.rh.Handle("getLogs", attempts)
 				continue
 			}
@@ -174,7 +206,7 @@ func (d *downloaderImplementation) getEventsByBlockRange(ctx context.Context, fr
 	return blocks
 }
 
-func (d *downloaderImplementation) getLogs(ctx context.Context, fromBlock, toBlock uint64) []types.Log {
+func (d *EVMDownloaderImplementation) GetLogs(ctx context.Context, fromBlock, toBlock uint64) []types.Log {
 	query := ethereum.FilterQuery{
 		FromBlock: new(big.Int).SetUint64(fromBlock),
 		Addresses: d.adressessToQuery,
@@ -189,7 +221,7 @@ func (d *downloaderImplementation) getLogs(ctx context.Context, fromBlock, toBlo
 		unfilteredLogs, err = d.ethClient.FilterLogs(ctx, query)
 		if err != nil {
 			attempts++
-			log.Error("error calling FilterLogs to eth client: ", err)
+			d.log.Error("error calling FilterLogs to eth client: ", err)
 			d.rh.Handle("getLogs", attempts)
 			continue
 		}
@@ -207,13 +239,13 @@ func (d *downloaderImplementation) getLogs(ctx context.Context, fromBlock, toBlo
 	return logs
 }
 
-func (d *downloaderImplementation) getBlockHeader(ctx context.Context, blockNum uint64) EVMBlockHeader {
+func (d *EVMDownloaderImplementation) GetBlockHeader(ctx context.Context, blockNum uint64) EVMBlockHeader {
 	attempts := 0
 	for {
 		header, err := d.ethClient.HeaderByNumber(ctx, big.NewInt(int64(blockNum)))
 		if err != nil {
 			attempts++
-			log.Errorf("error getting block header for block %d, err: %v", blockNum, err)
+			d.log.Errorf("error getting block header for block %d, err: %v", blockNum, err)
 			d.rh.Handle("getBlockHeader", attempts)
 			continue
 		}
diff --git a/sync/evmdownloader_test.go b/sync/evmdownloader_test.go
index 2c947370b..15a6608ce 100644
--- a/sync/evmdownloader_test.go
+++ b/sync/evmdownloader_test.go
@@ -172,7 +172,7 @@ func TestGetEventsByBlockRange(t *testing.T) {
 				}, nil)
 		}
 
-		actualBlocks := d.getEventsByBlockRange(ctx, tc.fromBlock, tc.toBlock)
+		actualBlocks := d.GetEventsByBlockRange(ctx, tc.fromBlock, tc.toBlock)
 		require.Equal(t, tc.expectedBlocks, actualBlocks, tc.description)
 	}
 }
@@ -208,9 +208,9 @@ func TestDownload(t *testing.T) {
 	ctx1, cancel := context.WithCancel(ctx)
 	expectedBlocks := []EVMBlock{}
 	dwnldr, _ := NewTestDownloader(t)
-	dwnldr.evmDownloaderInterface = d
+	dwnldr.EVMDownloaderInterface = d
 
-	d.On("waitForNewBlocks", mock.Anything, uint64(0)).
+	d.On("WaitForNewBlocks", mock.Anything, uint64(0)).
 		Return(uint64(1))
 	// iteratiion 0:
 	// last block is 1, download that block (no events and wait)
@@ -221,13 +221,13 @@ func TestDownload(t *testing.T) {
 		},
 	}
 	expectedBlocks = append(expectedBlocks, b1)
-	d.On("getEventsByBlockRange", mock.Anything, uint64(0), uint64(1)).
+	d.On("GetEventsByBlockRange", mock.Anything, uint64(0), uint64(1)).
 		Return([]EVMBlock{})
-	d.On("getBlockHeader", mock.Anything, uint64(1)).
+	d.On("GetBlockHeader", mock.Anything, uint64(1)).
 		Return(b1.EVMBlockHeader)
 
 	// iteration 1: wait for next block to be created
-	d.On("waitForNewBlocks", mock.Anything, uint64(1)).
+	d.On("WaitForNewBlocks", mock.Anything, uint64(1)).
 		After(time.Millisecond * 100).
 		Return(uint64(2)).Once()
 
@@ -239,11 +239,11 @@ func TestDownload(t *testing.T) {
 		},
 	}
 	expectedBlocks = append(expectedBlocks, b2)
-	d.On("getEventsByBlockRange", mock.Anything, uint64(2), uint64(2)).
+	d.On("GetEventsByBlockRange", mock.Anything, uint64(2), uint64(2)).
 		Return([]EVMBlock{b2})
 
 	// iteration 3: wait for next block to be created (jump to block 8)
-	d.On("waitForNewBlocks", mock.Anything, uint64(2)).
+	d.On("WaitForNewBlocks", mock.Anything, uint64(2)).
 		After(time.Millisecond * 100).
 		Return(uint64(8)).Once()
 
@@ -269,13 +269,13 @@ func TestDownload(t *testing.T) {
 		},
 	}
 	expectedBlocks = append(expectedBlocks, b6, b7, b8)
-	d.On("getEventsByBlockRange", mock.Anything, uint64(3), uint64(8)).
+	d.On("GetEventsByBlockRange", mock.Anything, uint64(3), uint64(8)).
 		Return([]EVMBlock{b6, b7})
-	d.On("getBlockHeader", mock.Anything, uint64(8)).
+	d.On("GetBlockHeader", mock.Anything, uint64(8)).
 		Return(b8.EVMBlockHeader)
 
 	// iteration 5: wait for next block to be created (jump to block 30)
-	d.On("waitForNewBlocks", mock.Anything, uint64(8)).
+	d.On("WaitForNewBlocks", mock.Anything, uint64(8)).
 		After(time.Millisecond * 100).
 		Return(uint64(30)).Once()
 
@@ -287,9 +287,9 @@ func TestDownload(t *testing.T) {
 		},
 	}
 	expectedBlocks = append(expectedBlocks, b19)
-	d.On("getEventsByBlockRange", mock.Anything, uint64(9), uint64(19)).
+	d.On("GetEventsByBlockRange", mock.Anything, uint64(9), uint64(19)).
 		Return([]EVMBlock{})
-	d.On("getBlockHeader", mock.Anything, uint64(19)).
+	d.On("GetBlockHeader", mock.Anything, uint64(19)).
 		Return(b19.EVMBlockHeader)
 
 	// iteration 7: from block 20 to 30, events on last block
@@ -301,15 +301,15 @@ func TestDownload(t *testing.T) {
 		Events: []interface{}{testEvent(common.HexToHash("30"))},
 	}
 	expectedBlocks = append(expectedBlocks, b30)
-	d.On("getEventsByBlockRange", mock.Anything, uint64(20), uint64(30)).
+	d.On("GetEventsByBlockRange", mock.Anything, uint64(20), uint64(30)).
 		Return([]EVMBlock{b30})
 
 	// iteration 8: wait for next block to be created (jump to block 35)
-	d.On("waitForNewBlocks", mock.Anything, uint64(30)).
+	d.On("WaitForNewBlocks", mock.Anything, uint64(30)).
 		After(time.Millisecond * 100).
 		Return(uint64(35)).Once()
 
-	go dwnldr.download(ctx1, 0, downloadCh)
+	go dwnldr.Download(ctx1, 0, downloadCh)
 	for _, expectedBlock := range expectedBlocks {
 		actualBlock := <-downloadCh
 		log.Debugf("block %d received!", actualBlock.Num)
@@ -331,7 +331,7 @@ func TestWaitForNewBlocks(t *testing.T) {
 	clientMock.On("HeaderByNumber", ctx, mock.Anything).Return(&types.Header{
 		Number: big.NewInt(6),
 	}, nil).Once()
-	actualBlock := d.waitForNewBlocks(ctx, currentBlock)
+	actualBlock := d.WaitForNewBlocks(ctx, currentBlock)
 	assert.Equal(t, expectedBlock, actualBlock)
 
 	// 2 iterations
@@ -341,7 +341,7 @@ func TestWaitForNewBlocks(t *testing.T) {
 	clientMock.On("HeaderByNumber", ctx, mock.Anything).Return(&types.Header{
 		Number: big.NewInt(6),
 	}, nil).Once()
-	actualBlock = d.waitForNewBlocks(ctx, currentBlock)
+	actualBlock = d.WaitForNewBlocks(ctx, currentBlock)
 	assert.Equal(t, expectedBlock, actualBlock)
 
 	// after error from client
@@ -349,7 +349,7 @@ func TestWaitForNewBlocks(t *testing.T) {
 	clientMock.On("HeaderByNumber", ctx, mock.Anything).Return(&types.Header{
 		Number: big.NewInt(6),
 	}, nil).Once()
-	actualBlock = d.waitForNewBlocks(ctx, currentBlock)
+	actualBlock = d.WaitForNewBlocks(ctx, currentBlock)
 	assert.Equal(t, expectedBlock, actualBlock)
 }
 
@@ -369,13 +369,13 @@ func TestGetBlockHeader(t *testing.T) {
 
 	// at first attempt
 	clientMock.On("HeaderByNumber", ctx, blockNumBig).Return(returnedBlock, nil).Once()
-	actualBlock := d.getBlockHeader(ctx, blockNum)
+	actualBlock := d.GetBlockHeader(ctx, blockNum)
 	assert.Equal(t, expectedBlock, actualBlock)
 
 	// after error from client
 	clientMock.On("HeaderByNumber", ctx, blockNumBig).Return(nil, errors.New("foo")).Once()
 	clientMock.On("HeaderByNumber", ctx, blockNumBig).Return(returnedBlock, nil).Once()
-	actualBlock = d.getBlockHeader(ctx, blockNum)
+	actualBlock = d.GetBlockHeader(ctx, blockNum)
 	assert.Equal(t, expectedBlock, actualBlock)
 }
 
@@ -394,7 +394,7 @@ func NewTestDownloader(t *testing.T) (*EVMDownloader, *L2Mock) {
 		RetryAfterErrorPeriod:      time.Millisecond * 100,
 	}
 	clientMock := NewL2Mock(t)
-	d, err := NewEVMDownloader(clientMock, syncBlockChunck, etherman.LatestBlock, time.Millisecond, buildAppender(), []common.Address{contractAddr}, rh)
+	d, err := NewEVMDownloader("test", clientMock, syncBlockChunck, etherman.LatestBlock, time.Millisecond, buildAppender(), []common.Address{contractAddr}, rh)
 	require.NoError(t, err)
 	return d, clientMock
 }
diff --git a/sync/evmdriver.go b/sync/evmdriver.go
index 8616e2a50..7f7829395 100644
--- a/sync/evmdriver.go
+++ b/sync/evmdriver.go
@@ -9,18 +9,23 @@ import (
 )
 
 type evmDownloaderFull interface {
-	evmDownloaderInterface
-	download(ctx context.Context, fromBlock uint64, downloadedCh chan EVMBlock)
+	EVMDownloaderInterface
+	downloader
+}
+
+type downloader interface {
+	Download(ctx context.Context, fromBlock uint64, downloadedCh chan EVMBlock)
 }
 
 type EVMDriver struct {
 	reorgDetector      ReorgDetector
 	reorgSub           *reorgdetector.Subscription
 	processor          processorInterface
-	downloader         evmDownloaderFull
+	downloader         downloader
 	reorgDetectorID    string
 	downloadBufferSize int
 	rh                 *RetryHandler
+	log                *log.Logger
 }
 
 type processorInterface interface {
@@ -37,11 +42,12 @@ type ReorgDetector interface {
 func NewEVMDriver(
 	reorgDetector ReorgDetector,
 	processor processorInterface,
-	downloader evmDownloaderFull,
+	downloader downloader,
 	reorgDetectorID string,
 	downloadBufferSize int,
 	rh *RetryHandler,
 ) (*EVMDriver, error) {
+	logger := log.WithFields("syncer", reorgDetectorID)
 	reorgSub, err := reorgDetector.Subscribe(reorgDetectorID)
 	if err != nil {
 		return nil, err
@@ -54,6 +60,7 @@ func NewEVMDriver(
 		reorgDetectorID:    reorgDetectorID,
 		downloadBufferSize: downloadBufferSize,
 		rh:                 rh,
+		log:                logger,
 	}, nil
 }
 
@@ -68,7 +75,7 @@ reset:
 		lastProcessedBlock, err = d.processor.GetLastProcessedBlock(ctx)
 		if err != nil {
 			attempts++
-			log.Error("error geting last processed block: ", err)
+			d.log.Error("error geting last processed block: ", err)
 			d.rh.Handle("Sync", attempts)
 			continue
 		}
@@ -79,15 +86,15 @@ reset:
 
 	// start downloading
 	downloadCh := make(chan EVMBlock, d.downloadBufferSize)
-	go d.downloader.download(cancellableCtx, lastProcessedBlock, downloadCh)
+	go d.downloader.Download(cancellableCtx, lastProcessedBlock, downloadCh)
 
 	for {
 		select {
 		case b := <-downloadCh:
-			log.Debug("handleNewBlock")
+			d.log.Debug("handleNewBlock")
 			d.handleNewBlock(ctx, b)
 		case firstReorgedBlock := <-d.reorgSub.FirstReorgedBlock:
-			log.Debug("handleReorg")
+			d.log.Debug("handleReorg")
 			d.handleReorg(ctx, cancel, downloadCh, firstReorgedBlock)
 			goto reset
 		}
@@ -100,7 +107,7 @@ func (d *EVMDriver) handleNewBlock(ctx context.Context, b EVMBlock) {
 		err := d.reorgDetector.AddBlockToTrack(ctx, d.reorgDetectorID, b.Num, b.Hash)
 		if err != nil {
 			attempts++
-			log.Errorf("error adding block %d to tracker: %v", b.Num, err)
+			d.log.Errorf("error adding block %d to tracker: %v", b.Num, err)
 			d.rh.Handle("handleNewBlock", attempts)
 			continue
 		}
@@ -115,7 +122,7 @@ func (d *EVMDriver) handleNewBlock(ctx context.Context, b EVMBlock) {
 		err := d.processor.ProcessBlock(ctx, blockToProcess)
 		if err != nil {
 			attempts++
-			log.Errorf("error processing events for blcok %d, err: ", b.Num, err)
+			d.log.Errorf("error processing events for blcok %d, err: ", b.Num, err)
 			d.rh.Handle("handleNewBlock", attempts)
 			continue
 		}
@@ -138,7 +145,7 @@ func (d *EVMDriver) handleReorg(
 		err := d.processor.Reorg(ctx, firstReorgedBlock)
 		if err != nil {
 			attempts++
-			log.Errorf(
+			d.log.Errorf(
 				"error processing reorg, last valid Block %d, err: %v",
 				firstReorgedBlock, err,
 			)
diff --git a/sync/evmdriver_test.go b/sync/evmdriver_test.go
index 34b8fb898..5b1abbfe7 100644
--- a/sync/evmdriver_test.go
+++ b/sync/evmdriver_test.go
@@ -52,7 +52,7 @@ func TestSync(t *testing.T) {
 		green bool
 	}
 	reorg1Completed := reorgSemaphore{}
-	dm.On("download", mock.Anything, mock.Anything, mock.Anything).Run(func(args mock.Arguments) {
+	dm.On("Download", mock.Anything, mock.Anything, mock.Anything).Run(func(args mock.Arguments) {
 		ctx := args.Get(0).(context.Context)
 		downloadedCh := args.Get(2).(chan EVMBlock)
 		log.Info("entering mock loop")
diff --git a/sync/mock_downloader_test.go b/sync/mock_downloader_test.go
index 738fc8733..1cd476ad0 100644
--- a/sync/mock_downloader_test.go
+++ b/sync/mock_downloader_test.go
@@ -14,13 +14,13 @@ type EVMDownloaderMock struct {
 	mock.Mock
 }
 
-// download provides a mock function with given fields: ctx, fromBlock, downloadedCh
-func (_m *EVMDownloaderMock) download(ctx context.Context, fromBlock uint64, downloadedCh chan EVMBlock) {
+// Download provides a mock function with given fields: ctx, fromBlock, downloadedCh
+func (_m *EVMDownloaderMock) Download(ctx context.Context, fromBlock uint64, downloadedCh chan EVMBlock) {
 	_m.Called(ctx, fromBlock, downloadedCh)
 }
 
-// getBlockHeader provides a mock function with given fields: ctx, blockNum
-func (_m *EVMDownloaderMock) getBlockHeader(ctx context.Context, blockNum uint64) EVMBlockHeader {
+// GetBlockHeader provides a mock function with given fields: ctx, blockNum
+func (_m *EVMDownloaderMock) GetBlockHeader(ctx context.Context, blockNum uint64) EVMBlockHeader {
 	ret := _m.Called(ctx, blockNum)
 
 	var r0 EVMBlockHeader
@@ -33,8 +33,8 @@ func (_m *EVMDownloaderMock) getBlockHeader(ctx context.Context, blockNum uint64
 	return r0
 }
 
-// getEventsByBlockRange provides a mock function with given fields: ctx, fromBlock, toBlock
-func (_m *EVMDownloaderMock) getEventsByBlockRange(ctx context.Context, fromBlock uint64, toBlock uint64) []EVMBlock {
+// GetEventsByBlockRange provides a mock function with given fields: ctx, fromBlock, toBlock
+func (_m *EVMDownloaderMock) GetEventsByBlockRange(ctx context.Context, fromBlock uint64, toBlock uint64) []EVMBlock {
 	ret := _m.Called(ctx, fromBlock, toBlock)
 
 	var r0 []EVMBlock
@@ -49,8 +49,8 @@ func (_m *EVMDownloaderMock) getEventsByBlockRange(ctx context.Context, fromBloc
 	return r0
 }
 
-// getLogs provides a mock function with given fields: ctx, fromBlock, toBlock
-func (_m *EVMDownloaderMock) getLogs(ctx context.Context, fromBlock uint64, toBlock uint64) []types.Log {
+// GetLogs provides a mock function with given fields: ctx, fromBlock, toBlock
+func (_m *EVMDownloaderMock) GetLogs(ctx context.Context, fromBlock uint64, toBlock uint64) []types.Log {
 	ret := _m.Called(ctx, fromBlock, toBlock)
 
 	var r0 []types.Log
@@ -65,8 +65,8 @@ func (_m *EVMDownloaderMock) getLogs(ctx context.Context, fromBlock uint64, toBl
 	return r0
 }
 
-// waitForNewBlocks provides a mock function with given fields: ctx, lastBlockSeen
-func (_m *EVMDownloaderMock) waitForNewBlocks(ctx context.Context, lastBlockSeen uint64) uint64 {
+// WaitForNewBlocks provides a mock function with given fields: ctx, lastBlockSeen
+func (_m *EVMDownloaderMock) WaitForNewBlocks(ctx context.Context, lastBlockSeen uint64) uint64 {
 	ret := _m.Called(ctx, lastBlockSeen)
 
 	var r0 uint64
diff --git a/test/Makefile b/test/Makefile
index bebd7ff84..86bd147ef 100644
--- a/test/Makefile
+++ b/test/Makefile
@@ -1,5 +1,5 @@
 .PHONY: generate-mocks
-generate-mocks: generate-mocks-bridgesync generate-mocks-reorgdetector generate-mocks-sequencesender generate-mocks-da generate-mocks-l1infotreesync generate-mocks-aggoracle generate-mocks-sync
+generate-mocks: generate-mocks-bridgesync generate-mocks-reorgdetector generate-mocks-sequencesender generate-mocks-da generate-mocks-l1infotreesync generate-mocks-helpers generate-mocks-sync
 
 
 .PHONY: generate-mocks-bridgesync
@@ -50,8 +50,8 @@ generate-mocks-l1infotreesync: ## Generates mocks for l1infotreesync , using moc
 	export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=ReorgDetector --dir=../sync --output=../l1infotreesync --outpkg=l1infotreesync --structname=ReorgDetectorMock --filename=mock_reorgdetector_test.go
 
 .PHONY: generate-mocks-aggoracle
-generate-mocks-aggoracle: ## Generates mocks for aggoracle , using mockery tool
-	export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=EthTxManager --dir=../aggoracle/chaingersender --output=../aggoracle --outpkg=aggoracle --structname=EthTxManagerMock --filename=mock_ethtxmanager_test.go
+generate-mocks-helpers: ## Generates mocks for helpers , using mockery tool
+	export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=EthTxManager --dir=../aggoracle/chaingersender --output=./helpers --outpkg=helpers --structname=EthTxManagerMock --filename=mock_ethtxmanager.go
 
 .PHONY: generate-mocks-sync
 generate-mocks-sync: ## Generates mocks for sync, using mockery tool
diff --git a/test/contracts/transparentupgradableproxy/transparentupgradableproxy.go b/test/contracts/transparentupgradableproxy/transparentupgradableproxy.go
new file mode 100644
index 000000000..2fb3e0429
--- /dev/null
+++ b/test/contracts/transparentupgradableproxy/transparentupgradableproxy.go
@@ -0,0 +1,773 @@
+// Code generated - DO NOT EDIT.
+// This file is a generated binding and any manual changes will be lost.
+
+package transparentupgradableproxy
+
+import (
+	"errors"
+	"math/big"
+	"strings"
+
+	ethereum "github.com/ethereum/go-ethereum"
+	"github.com/ethereum/go-ethereum/accounts/abi"
+	"github.com/ethereum/go-ethereum/accounts/abi/bind"
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/core/types"
+	"github.com/ethereum/go-ethereum/event"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var (
+	_ = errors.New
+	_ = big.NewInt
+	_ = strings.NewReader
+	_ = ethereum.NotFound
+	_ = bind.Bind
+	_ = common.Big1
+	_ = types.BloomLookup
+	_ = event.NewSubscription
+	_ = abi.ConvertType
+)
+
+// TransparentupgradableproxyMetaData contains all meta data concerning the Transparentupgradableproxy contract.
+var TransparentupgradableproxyMetaData = &bind.MetaData{
+	ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_logic\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"admin_\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"stateMutability\":\"payable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"previousAdmin\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"newAdmin\",\"type\":\"address\"}],\"name\":\"AdminChanged\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"beacon\",\"type\":\"address\"}],\"name\":\"BeaconUpgraded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"implementation\",\"type\":\"address\"}],\"name\":\"Upgraded\",\"type\":\"event\"},{\"stateMutability\":\"payable\",\"type\":\"fallback\"},{\"inputs\":[],\"name\":\"admin\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"admin_\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"newAdmin\",\"type\":\"address\"}],\"name\":\"changeAdmin\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"implementation\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"implementation_\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"newImplementation\",\"type\":\"address\"}],\"name\":\"upgradeTo\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"newImplementation\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"upgradeToAndCall\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"stateMutability\":\"payable\",\"type\":\"receive\"}]",
+	Bin: "0x608060405260405162000f6838038062000f68833981016040819052620000269162000415565b82816200003582825f6200004c565b50620000439050826200007d565b50505062000540565b6200005783620000ee565b5f82511180620000645750805b1562000078576200007683836200012f565b505b505050565b7f7e644d79422f17c01e4894b5f4f588d331ebfa28653d42ae832dc59e38c9798f620000be5f8051602062000f21833981519152546001600160a01b031690565b604080516001600160a01b03928316815291841660208301520160405180910390a1620000eb816200015e565b50565b620000f981620001fb565b6040516001600160a01b038216907fbc7cd75a20ee27fd9adebab32041f755214dbc6bffa90cc0225b39da2e5c2d3b905f90a250565b606062000157838360405180606001604052806027815260200162000f416027913962000292565b9392505050565b6001600160a01b038116620001c95760405162461bcd60e51b815260206004820152602660248201527f455243313936373a206e65772061646d696e20697320746865207a65726f206160448201526564647265737360d01b60648201526084015b60405180910390fd5b805f8051602062000f218339815191525b80546001600160a01b0319166001600160a01b039290921691909117905550565b6001600160a01b0381163b6200026a5760405162461bcd60e51b815260206004820152602d60248201527f455243313936373a206e657720696d706c656d656e746174696f6e206973206e60448201526c1bdd08184818dbdb9d1c9858dd609a1b6064820152608401620001c0565b807f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc620001da565b60605f80856001600160a01b031685604051620002b09190620004ef565b5f60405180830381855af49150503d805f8114620002ea576040519150601f19603f3d011682016040523d82523d5f602084013e620002ef565b606091505b50909250905062000303868383876200030d565b9695505050505050565b60608315620003805782515f0362000378576001600160a01b0385163b620003785760405162461bcd60e51b815260206004820152601d60248201527f416464726573733a2063616c6c20746f206e6f6e2d636f6e74726163740000006044820152606401620001c0565b50816200038c565b6200038c838362000394565b949350505050565b815115620003a55781518083602001fd5b8060405162461bcd60e51b8152600401620001c091906200050c565b80516001600160a01b0381168114620003d8575f80fd5b919050565b634e487b7160e01b5f52604160045260245ffd5b5f5b838110156200040d578181015183820152602001620003f3565b50505f910152565b5f805f6060848603121562000428575f80fd5b6200043384620003c1565b92506200044360208501620003c1565b60408501519092506001600160401b038082111562000460575f80fd5b818601915086601f83011262000474575f80fd5b815181811115620004895762000489620003dd565b604051601f8201601f19908116603f01168101908382118183101715620004b457620004b4620003dd565b81604052828152896020848701011115620004cd575f80fd5b620004e0836020830160208801620003f1565b80955050505050509250925092565b5f825162000502818460208701620003f1565b9190910192915050565b602081525f82518060208401526200052c816040850160208701620003f1565b601f01601f19169190910160400192915050565b6109d3806200054e5f395ff3fe60806040526004361061005d575f3560e01c80635c60da1b116100425780635c60da1b146100a65780638f283970146100e3578063f851a440146101025761006c565b80633659cfe6146100745780634f1ef286146100935761006c565b3661006c5761006a610116565b005b61006a610116565b34801561007f575f80fd5b5061006a61008e366004610854565b610130565b61006a6100a136600461086d565b610178565b3480156100b1575f80fd5b506100ba6101eb565b60405173ffffffffffffffffffffffffffffffffffffffff909116815260200160405180910390f35b3480156100ee575f80fd5b5061006a6100fd366004610854565b610228565b34801561010d575f80fd5b506100ba610255565b61011e610282565b61012e610129610359565b610362565b565b610138610380565b73ffffffffffffffffffffffffffffffffffffffff1633036101705761016d8160405180602001604052805f8152505f6103bf565b50565b61016d610116565b610180610380565b73ffffffffffffffffffffffffffffffffffffffff1633036101e3576101de8383838080601f0160208091040260200160405190810160405280939291908181526020018383808284375f92019190915250600192506103bf915050565b505050565b6101de610116565b5f6101f4610380565b73ffffffffffffffffffffffffffffffffffffffff16330361021d57610218610359565b905090565b610225610116565b90565b610230610380565b73ffffffffffffffffffffffffffffffffffffffff1633036101705761016d816103e9565b5f61025e610380565b73ffffffffffffffffffffffffffffffffffffffff16330361021d57610218610380565b61028a610380565b73ffffffffffffffffffffffffffffffffffffffff16330361012e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152604260248201527f5472616e73706172656e745570677261646561626c6550726f78793a2061646d60448201527f696e2063616e6e6f742066616c6c6261636b20746f2070726f7879207461726760648201527f6574000000000000000000000000000000000000000000000000000000000000608482015260a4015b60405180910390fd5b5f61021861044a565b365f80375f80365f845af43d5f803e80801561037c573d5ff35b3d5ffd5b5f7fb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d61035b5473ffffffffffffffffffffffffffffffffffffffff16919050565b6103c883610471565b5f825111806103d45750805b156101de576103e383836104bd565b50505050565b7f7e644d79422f17c01e4894b5f4f588d331ebfa28653d42ae832dc59e38c9798f610412610380565b6040805173ffffffffffffffffffffffffffffffffffffffff928316815291841660208301520160405180910390a161016d816104e9565b5f7f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc6103a3565b61047a816105f5565b60405173ffffffffffffffffffffffffffffffffffffffff8216907fbc7cd75a20ee27fd9adebab32041f755214dbc6bffa90cc0225b39da2e5c2d3b905f90a250565b60606104e28383604051806060016040528060278152602001610977602791396106c0565b9392505050565b73ffffffffffffffffffffffffffffffffffffffff811661058c576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602660248201527f455243313936373a206e65772061646d696e20697320746865207a65726f206160448201527f64647265737300000000000000000000000000000000000000000000000000006064820152608401610350565b807fb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d61035b80547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff9290921691909117905550565b73ffffffffffffffffffffffffffffffffffffffff81163b610699576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602d60248201527f455243313936373a206e657720696d706c656d656e746174696f6e206973206e60448201527f6f74206120636f6e7472616374000000000000000000000000000000000000006064820152608401610350565b807f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc6105af565b60605f808573ffffffffffffffffffffffffffffffffffffffff16856040516106e9919061090b565b5f60405180830381855af49150503d805f8114610721576040519150601f19603f3d011682016040523d82523d5f602084013e610726565b606091505b509150915061073786838387610741565b9695505050505050565b606083156107d65782515f036107cf5773ffffffffffffffffffffffffffffffffffffffff85163b6107cf576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f416464726573733a2063616c6c20746f206e6f6e2d636f6e74726163740000006044820152606401610350565b50816107e0565b6107e083836107e8565b949350505050565b8151156107f85781518083602001fd5b806040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016103509190610926565b803573ffffffffffffffffffffffffffffffffffffffff8116811461084f575f80fd5b919050565b5f60208284031215610864575f80fd5b6104e28261082c565b5f805f6040848603121561087f575f80fd5b6108888461082c565b9250602084013567ffffffffffffffff808211156108a4575f80fd5b818601915086601f8301126108b7575f80fd5b8135818111156108c5575f80fd5b8760208285010111156108d6575f80fd5b6020830194508093505050509250925092565b5f5b838110156109035781810151838201526020016108eb565b50505f910152565b5f825161091c8184602087016108e9565b9190910192915050565b602081525f82518060208401526109448160408501602087016108e9565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016919091016040019291505056fe416464726573733a206c6f772d6c6576656c2064656c65676174652063616c6c206661696c6564a26469706673582212202ac98acbfbb3d3ac1b74050e18c4e76db25a3ff2801ec69bf85d0c61414d502b64736f6c63430008140033b53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103416464726573733a206c6f772d6c6576656c2064656c65676174652063616c6c206661696c6564",
+}
+
+// TransparentupgradableproxyABI is the input ABI used to generate the binding from.
+// Deprecated: Use TransparentupgradableproxyMetaData.ABI instead.
+var TransparentupgradableproxyABI = TransparentupgradableproxyMetaData.ABI
+
+// TransparentupgradableproxyBin is the compiled bytecode used for deploying new contracts.
+// Deprecated: Use TransparentupgradableproxyMetaData.Bin instead.
+var TransparentupgradableproxyBin = TransparentupgradableproxyMetaData.Bin
+
+// DeployTransparentupgradableproxy deploys a new Ethereum contract, binding an instance of Transparentupgradableproxy to it.
+func DeployTransparentupgradableproxy(auth *bind.TransactOpts, backend bind.ContractBackend, _logic common.Address, admin_ common.Address, _data []byte) (common.Address, *types.Transaction, *Transparentupgradableproxy, error) {
+	parsed, err := TransparentupgradableproxyMetaData.GetAbi()
+	if err != nil {
+		return common.Address{}, nil, nil, err
+	}
+	if parsed == nil {
+		return common.Address{}, nil, nil, errors.New("GetABI returned nil")
+	}
+
+	address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(TransparentupgradableproxyBin), backend, _logic, admin_, _data)
+	if err != nil {
+		return common.Address{}, nil, nil, err
+	}
+	return address, tx, &Transparentupgradableproxy{TransparentupgradableproxyCaller: TransparentupgradableproxyCaller{contract: contract}, TransparentupgradableproxyTransactor: TransparentupgradableproxyTransactor{contract: contract}, TransparentupgradableproxyFilterer: TransparentupgradableproxyFilterer{contract: contract}}, nil
+}
+
+// Transparentupgradableproxy is an auto generated Go binding around an Ethereum contract.
+type Transparentupgradableproxy struct {
+	TransparentupgradableproxyCaller     // Read-only binding to the contract
+	TransparentupgradableproxyTransactor // Write-only binding to the contract
+	TransparentupgradableproxyFilterer   // Log filterer for contract events
+}
+
+// TransparentupgradableproxyCaller is an auto generated read-only Go binding around an Ethereum contract.
+type TransparentupgradableproxyCaller struct {
+	contract *bind.BoundContract // Generic contract wrapper for the low level calls
+}
+
+// TransparentupgradableproxyTransactor is an auto generated write-only Go binding around an Ethereum contract.
+type TransparentupgradableproxyTransactor struct {
+	contract *bind.BoundContract // Generic contract wrapper for the low level calls
+}
+
+// TransparentupgradableproxyFilterer is an auto generated log filtering Go binding around an Ethereum contract events.
+type TransparentupgradableproxyFilterer struct {
+	contract *bind.BoundContract // Generic contract wrapper for the low level calls
+}
+
+// TransparentupgradableproxySession is an auto generated Go binding around an Ethereum contract,
+// with pre-set call and transact options.
+type TransparentupgradableproxySession struct {
+	Contract     *Transparentupgradableproxy // Generic contract binding to set the session for
+	CallOpts     bind.CallOpts               // Call options to use throughout this session
+	TransactOpts bind.TransactOpts           // Transaction auth options to use throughout this session
+}
+
+// TransparentupgradableproxyCallerSession is an auto generated read-only Go binding around an Ethereum contract,
+// with pre-set call options.
+type TransparentupgradableproxyCallerSession struct {
+	Contract *TransparentupgradableproxyCaller // Generic contract caller binding to set the session for
+	CallOpts bind.CallOpts                     // Call options to use throughout this session
+}
+
+// TransparentupgradableproxyTransactorSession is an auto generated write-only Go binding around an Ethereum contract,
+// with pre-set transact options.
+type TransparentupgradableproxyTransactorSession struct {
+	Contract     *TransparentupgradableproxyTransactor // Generic contract transactor binding to set the session for
+	TransactOpts bind.TransactOpts                     // Transaction auth options to use throughout this session
+}
+
+// TransparentupgradableproxyRaw is an auto generated low-level Go binding around an Ethereum contract.
+type TransparentupgradableproxyRaw struct {
+	Contract *Transparentupgradableproxy // Generic contract binding to access the raw methods on
+}
+
+// TransparentupgradableproxyCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract.
+type TransparentupgradableproxyCallerRaw struct {
+	Contract *TransparentupgradableproxyCaller // Generic read-only contract binding to access the raw methods on
+}
+
+// TransparentupgradableproxyTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract.
+type TransparentupgradableproxyTransactorRaw struct {
+	Contract *TransparentupgradableproxyTransactor // Generic write-only contract binding to access the raw methods on
+}
+
+// NewTransparentupgradableproxy creates a new instance of Transparentupgradableproxy, bound to a specific deployed contract.
+func NewTransparentupgradableproxy(address common.Address, backend bind.ContractBackend) (*Transparentupgradableproxy, error) {
+	contract, err := bindTransparentupgradableproxy(address, backend, backend, backend)
+	if err != nil {
+		return nil, err
+	}
+	return &Transparentupgradableproxy{TransparentupgradableproxyCaller: TransparentupgradableproxyCaller{contract: contract}, TransparentupgradableproxyTransactor: TransparentupgradableproxyTransactor{contract: contract}, TransparentupgradableproxyFilterer: TransparentupgradableproxyFilterer{contract: contract}}, nil
+}
+
+// NewTransparentupgradableproxyCaller creates a new read-only instance of Transparentupgradableproxy, bound to a specific deployed contract.
+func NewTransparentupgradableproxyCaller(address common.Address, caller bind.ContractCaller) (*TransparentupgradableproxyCaller, error) {
+	contract, err := bindTransparentupgradableproxy(address, caller, nil, nil)
+	if err != nil {
+		return nil, err
+	}
+	return &TransparentupgradableproxyCaller{contract: contract}, nil
+}
+
+// NewTransparentupgradableproxyTransactor creates a new write-only instance of Transparentupgradableproxy, bound to a specific deployed contract.
+func NewTransparentupgradableproxyTransactor(address common.Address, transactor bind.ContractTransactor) (*TransparentupgradableproxyTransactor, error) {
+	contract, err := bindTransparentupgradableproxy(address, nil, transactor, nil)
+	if err != nil {
+		return nil, err
+	}
+	return &TransparentupgradableproxyTransactor{contract: contract}, nil
+}
+
+// NewTransparentupgradableproxyFilterer creates a new log filterer instance of Transparentupgradableproxy, bound to a specific deployed contract.
+func NewTransparentupgradableproxyFilterer(address common.Address, filterer bind.ContractFilterer) (*TransparentupgradableproxyFilterer, error) {
+	contract, err := bindTransparentupgradableproxy(address, nil, nil, filterer)
+	if err != nil {
+		return nil, err
+	}
+	return &TransparentupgradableproxyFilterer{contract: contract}, nil
+}
+
+// bindTransparentupgradableproxy binds a generic wrapper to an already deployed contract.
+func bindTransparentupgradableproxy(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {
+	parsed, err := TransparentupgradableproxyMetaData.GetAbi()
+	if err != nil {
+		return nil, err
+	}
+	return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil
+}
+
+// Call invokes the (constant) contract method with params as input values and
+// sets the output to result. The result type might be a single field for simple
+// returns, a slice of interfaces for anonymous returns and a struct for named
+// returns.
+func (_Transparentupgradableproxy *TransparentupgradableproxyRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error {
+	return _Transparentupgradableproxy.Contract.TransparentupgradableproxyCaller.contract.Call(opts, result, method, params...)
+}
+
+// Transfer initiates a plain transaction to move funds to the contract, calling
+// its default method if one is available.
+func (_Transparentupgradableproxy *TransparentupgradableproxyRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {
+	return _Transparentupgradableproxy.Contract.TransparentupgradableproxyTransactor.contract.Transfer(opts)
+}
+
+// Transact invokes the (paid) contract method with params as input values.
+func (_Transparentupgradableproxy *TransparentupgradableproxyRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) {
+	return _Transparentupgradableproxy.Contract.TransparentupgradableproxyTransactor.contract.Transact(opts, method, params...)
+}
+
+// Call invokes the (constant) contract method with params as input values and
+// sets the output to result. The result type might be a single field for simple
+// returns, a slice of interfaces for anonymous returns and a struct for named
+// returns.
+func (_Transparentupgradableproxy *TransparentupgradableproxyCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error {
+	return _Transparentupgradableproxy.Contract.contract.Call(opts, result, method, params...)
+}
+
+// Transfer initiates a plain transaction to move funds to the contract, calling
+// its default method if one is available.
+func (_Transparentupgradableproxy *TransparentupgradableproxyTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {
+	return _Transparentupgradableproxy.Contract.contract.Transfer(opts)
+}
+
+// Transact invokes the (paid) contract method with params as input values.
+func (_Transparentupgradableproxy *TransparentupgradableproxyTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) {
+	return _Transparentupgradableproxy.Contract.contract.Transact(opts, method, params...)
+}
+
+// Admin is a paid mutator transaction binding the contract method 0xf851a440.
+//
+// Solidity: function admin() returns(address admin_)
+func (_Transparentupgradableproxy *TransparentupgradableproxyTransactor) Admin(opts *bind.TransactOpts) (*types.Transaction, error) {
+	return _Transparentupgradableproxy.contract.Transact(opts, "admin")
+}
+
+// Admin is a paid mutator transaction binding the contract method 0xf851a440.
+//
+// Solidity: function admin() returns(address admin_)
+func (_Transparentupgradableproxy *TransparentupgradableproxySession) Admin() (*types.Transaction, error) {
+	return _Transparentupgradableproxy.Contract.Admin(&_Transparentupgradableproxy.TransactOpts)
+}
+
+// Admin is a paid mutator transaction binding the contract method 0xf851a440.
+//
+// Solidity: function admin() returns(address admin_)
+func (_Transparentupgradableproxy *TransparentupgradableproxyTransactorSession) Admin() (*types.Transaction, error) {
+	return _Transparentupgradableproxy.Contract.Admin(&_Transparentupgradableproxy.TransactOpts)
+}
+
+// ChangeAdmin is a paid mutator transaction binding the contract method 0x8f283970.
+//
+// Solidity: function changeAdmin(address newAdmin) returns()
+func (_Transparentupgradableproxy *TransparentupgradableproxyTransactor) ChangeAdmin(opts *bind.TransactOpts, newAdmin common.Address) (*types.Transaction, error) {
+	return _Transparentupgradableproxy.contract.Transact(opts, "changeAdmin", newAdmin)
+}
+
+// ChangeAdmin is a paid mutator transaction binding the contract method 0x8f283970.
+//
+// Solidity: function changeAdmin(address newAdmin) returns()
+func (_Transparentupgradableproxy *TransparentupgradableproxySession) ChangeAdmin(newAdmin common.Address) (*types.Transaction, error) {
+	return _Transparentupgradableproxy.Contract.ChangeAdmin(&_Transparentupgradableproxy.TransactOpts, newAdmin)
+}
+
+// ChangeAdmin is a paid mutator transaction binding the contract method 0x8f283970.
+//
+// Solidity: function changeAdmin(address newAdmin) returns()
+func (_Transparentupgradableproxy *TransparentupgradableproxyTransactorSession) ChangeAdmin(newAdmin common.Address) (*types.Transaction, error) {
+	return _Transparentupgradableproxy.Contract.ChangeAdmin(&_Transparentupgradableproxy.TransactOpts, newAdmin)
+}
+
+// Implementation is a paid mutator transaction binding the contract method 0x5c60da1b.
+//
+// Solidity: function implementation() returns(address implementation_)
+func (_Transparentupgradableproxy *TransparentupgradableproxyTransactor) Implementation(opts *bind.TransactOpts) (*types.Transaction, error) {
+	return _Transparentupgradableproxy.contract.Transact(opts, "implementation")
+}
+
+// Implementation is a paid mutator transaction binding the contract method 0x5c60da1b.
+//
+// Solidity: function implementation() returns(address implementation_)
+func (_Transparentupgradableproxy *TransparentupgradableproxySession) Implementation() (*types.Transaction, error) {
+	return _Transparentupgradableproxy.Contract.Implementation(&_Transparentupgradableproxy.TransactOpts)
+}
+
+// Implementation is a paid mutator transaction binding the contract method 0x5c60da1b.
+//
+// Solidity: function implementation() returns(address implementation_)
+func (_Transparentupgradableproxy *TransparentupgradableproxyTransactorSession) Implementation() (*types.Transaction, error) {
+	return _Transparentupgradableproxy.Contract.Implementation(&_Transparentupgradableproxy.TransactOpts)
+}
+
+// UpgradeTo is a paid mutator transaction binding the contract method 0x3659cfe6.
+//
+// Solidity: function upgradeTo(address newImplementation) returns()
+func (_Transparentupgradableproxy *TransparentupgradableproxyTransactor) UpgradeTo(opts *bind.TransactOpts, newImplementation common.Address) (*types.Transaction, error) {
+	return _Transparentupgradableproxy.contract.Transact(opts, "upgradeTo", newImplementation)
+}
+
+// UpgradeTo is a paid mutator transaction binding the contract method 0x3659cfe6.
+//
+// Solidity: function upgradeTo(address newImplementation) returns()
+func (_Transparentupgradableproxy *TransparentupgradableproxySession) UpgradeTo(newImplementation common.Address) (*types.Transaction, error) {
+	return _Transparentupgradableproxy.Contract.UpgradeTo(&_Transparentupgradableproxy.TransactOpts, newImplementation)
+}
+
+// UpgradeTo is a paid mutator transaction binding the contract method 0x3659cfe6.
+//
+// Solidity: function upgradeTo(address newImplementation) returns()
+func (_Transparentupgradableproxy *TransparentupgradableproxyTransactorSession) UpgradeTo(newImplementation common.Address) (*types.Transaction, error) {
+	return _Transparentupgradableproxy.Contract.UpgradeTo(&_Transparentupgradableproxy.TransactOpts, newImplementation)
+}
+
+// UpgradeToAndCall is a paid mutator transaction binding the contract method 0x4f1ef286.
+//
+// Solidity: function upgradeToAndCall(address newImplementation, bytes data) payable returns()
+func (_Transparentupgradableproxy *TransparentupgradableproxyTransactor) UpgradeToAndCall(opts *bind.TransactOpts, newImplementation common.Address, data []byte) (*types.Transaction, error) {
+	return _Transparentupgradableproxy.contract.Transact(opts, "upgradeToAndCall", newImplementation, data)
+}
+
+// UpgradeToAndCall is a paid mutator transaction binding the contract method 0x4f1ef286.
+//
+// Solidity: function upgradeToAndCall(address newImplementation, bytes data) payable returns()
+func (_Transparentupgradableproxy *TransparentupgradableproxySession) UpgradeToAndCall(newImplementation common.Address, data []byte) (*types.Transaction, error) {
+	return _Transparentupgradableproxy.Contract.UpgradeToAndCall(&_Transparentupgradableproxy.TransactOpts, newImplementation, data)
+}
+
+// UpgradeToAndCall is a paid mutator transaction binding the contract method 0x4f1ef286.
+//
+// Solidity: function upgradeToAndCall(address newImplementation, bytes data) payable returns()
+func (_Transparentupgradableproxy *TransparentupgradableproxyTransactorSession) UpgradeToAndCall(newImplementation common.Address, data []byte) (*types.Transaction, error) {
+	return _Transparentupgradableproxy.Contract.UpgradeToAndCall(&_Transparentupgradableproxy.TransactOpts, newImplementation, data)
+}
+
+// Fallback is a paid mutator transaction binding the contract fallback function.
+//
+// Solidity: fallback() payable returns()
+func (_Transparentupgradableproxy *TransparentupgradableproxyTransactor) Fallback(opts *bind.TransactOpts, calldata []byte) (*types.Transaction, error) {
+	return _Transparentupgradableproxy.contract.RawTransact(opts, calldata)
+}
+
+// Fallback is a paid mutator transaction binding the contract fallback function.
+//
+// Solidity: fallback() payable returns()
+func (_Transparentupgradableproxy *TransparentupgradableproxySession) Fallback(calldata []byte) (*types.Transaction, error) {
+	return _Transparentupgradableproxy.Contract.Fallback(&_Transparentupgradableproxy.TransactOpts, calldata)
+}
+
+// Fallback is a paid mutator transaction binding the contract fallback function.
+//
+// Solidity: fallback() payable returns()
+func (_Transparentupgradableproxy *TransparentupgradableproxyTransactorSession) Fallback(calldata []byte) (*types.Transaction, error) {
+	return _Transparentupgradableproxy.Contract.Fallback(&_Transparentupgradableproxy.TransactOpts, calldata)
+}
+
+// Receive is a paid mutator transaction binding the contract receive function.
+//
+// Solidity: receive() payable returns()
+func (_Transparentupgradableproxy *TransparentupgradableproxyTransactor) Receive(opts *bind.TransactOpts) (*types.Transaction, error) {
+	return _Transparentupgradableproxy.contract.RawTransact(opts, nil) // calldata is disallowed for receive function
+}
+
+// Receive is a paid mutator transaction binding the contract receive function.
+//
+// Solidity: receive() payable returns()
+func (_Transparentupgradableproxy *TransparentupgradableproxySession) Receive() (*types.Transaction, error) {
+	return _Transparentupgradableproxy.Contract.Receive(&_Transparentupgradableproxy.TransactOpts)
+}
+
+// Receive is a paid mutator transaction binding the contract receive function.
+//
+// Solidity: receive() payable returns()
+func (_Transparentupgradableproxy *TransparentupgradableproxyTransactorSession) Receive() (*types.Transaction, error) {
+	return _Transparentupgradableproxy.Contract.Receive(&_Transparentupgradableproxy.TransactOpts)
+}
+
+// TransparentupgradableproxyAdminChangedIterator is returned from FilterAdminChanged and is used to iterate over the raw logs and unpacked data for AdminChanged events raised by the Transparentupgradableproxy contract.
+type TransparentupgradableproxyAdminChangedIterator struct {
+	Event *TransparentupgradableproxyAdminChanged // Event containing the contract specifics and raw log
+
+	contract *bind.BoundContract // Generic contract to use for unpacking event data
+	event    string              // Event name to use for unpacking event data
+
+	logs chan types.Log        // Log channel receiving the found contract events
+	sub  ethereum.Subscription // Subscription for errors, completion and termination
+	done bool                  // Whether the subscription completed delivering logs
+	fail error                 // Occurred error to stop iteration
+}
+
+// Next advances the iterator to the subsequent event, returning whether there
+// are any more events found. In case of a retrieval or parsing error, false is
+// returned and Error() can be queried for the exact failure.
+func (it *TransparentupgradableproxyAdminChangedIterator) Next() bool {
+	// If the iterator failed, stop iterating
+	if it.fail != nil {
+		return false
+	}
+	// If the iterator completed, deliver directly whatever's available
+	if it.done {
+		select {
+		case log := <-it.logs:
+			it.Event = new(TransparentupgradableproxyAdminChanged)
+			if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
+				it.fail = err
+				return false
+			}
+			it.Event.Raw = log
+			return true
+
+		default:
+			return false
+		}
+	}
+	// Iterator still in progress, wait for either a data or an error event
+	select {
+	case log := <-it.logs:
+		it.Event = new(TransparentupgradableproxyAdminChanged)
+		if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
+			it.fail = err
+			return false
+		}
+		it.Event.Raw = log
+		return true
+
+	case err := <-it.sub.Err():
+		it.done = true
+		it.fail = err
+		return it.Next()
+	}
+}
+
+// Error returns any retrieval or parsing error occurred during filtering.
+func (it *TransparentupgradableproxyAdminChangedIterator) Error() error {
+	return it.fail
+}
+
+// Close terminates the iteration process, releasing any pending underlying
+// resources.
+func (it *TransparentupgradableproxyAdminChangedIterator) Close() error {
+	it.sub.Unsubscribe()
+	return nil
+}
+
+// TransparentupgradableproxyAdminChanged represents a AdminChanged event raised by the Transparentupgradableproxy contract.
+type TransparentupgradableproxyAdminChanged struct {
+	PreviousAdmin common.Address
+	NewAdmin      common.Address
+	Raw           types.Log // Blockchain specific contextual infos
+}
+
+// FilterAdminChanged is a free log retrieval operation binding the contract event 0x7e644d79422f17c01e4894b5f4f588d331ebfa28653d42ae832dc59e38c9798f.
+//
+// Solidity: event AdminChanged(address previousAdmin, address newAdmin)
+func (_Transparentupgradableproxy *TransparentupgradableproxyFilterer) FilterAdminChanged(opts *bind.FilterOpts) (*TransparentupgradableproxyAdminChangedIterator, error) {
+
+	logs, sub, err := _Transparentupgradableproxy.contract.FilterLogs(opts, "AdminChanged")
+	if err != nil {
+		return nil, err
+	}
+	return &TransparentupgradableproxyAdminChangedIterator{contract: _Transparentupgradableproxy.contract, event: "AdminChanged", logs: logs, sub: sub}, nil
+}
+
+// WatchAdminChanged is a free log subscription operation binding the contract event 0x7e644d79422f17c01e4894b5f4f588d331ebfa28653d42ae832dc59e38c9798f.
+//
+// Solidity: event AdminChanged(address previousAdmin, address newAdmin)
+func (_Transparentupgradableproxy *TransparentupgradableproxyFilterer) WatchAdminChanged(opts *bind.WatchOpts, sink chan<- *TransparentupgradableproxyAdminChanged) (event.Subscription, error) {
+
+	logs, sub, err := _Transparentupgradableproxy.contract.WatchLogs(opts, "AdminChanged")
+	if err != nil {
+		return nil, err
+	}
+	return event.NewSubscription(func(quit <-chan struct{}) error {
+		defer sub.Unsubscribe()
+		for {
+			select {
+			case log := <-logs:
+				// New log arrived, parse the event and forward to the user
+				event := new(TransparentupgradableproxyAdminChanged)
+				if err := _Transparentupgradableproxy.contract.UnpackLog(event, "AdminChanged", log); err != nil {
+					return err
+				}
+				event.Raw = log
+
+				select {
+				case sink <- event:
+				case err := <-sub.Err():
+					return err
+				case <-quit:
+					return nil
+				}
+			case err := <-sub.Err():
+				return err
+			case <-quit:
+				return nil
+			}
+		}
+	}), nil
+}
+
+// ParseAdminChanged is a log parse operation binding the contract event 0x7e644d79422f17c01e4894b5f4f588d331ebfa28653d42ae832dc59e38c9798f.
+//
+// Solidity: event AdminChanged(address previousAdmin, address newAdmin)
+func (_Transparentupgradableproxy *TransparentupgradableproxyFilterer) ParseAdminChanged(log types.Log) (*TransparentupgradableproxyAdminChanged, error) {
+	event := new(TransparentupgradableproxyAdminChanged)
+	if err := _Transparentupgradableproxy.contract.UnpackLog(event, "AdminChanged", log); err != nil {
+		return nil, err
+	}
+	event.Raw = log
+	return event, nil
+}
+
+// TransparentupgradableproxyBeaconUpgradedIterator is returned from FilterBeaconUpgraded and is used to iterate over the raw logs and unpacked data for BeaconUpgraded events raised by the Transparentupgradableproxy contract.
+type TransparentupgradableproxyBeaconUpgradedIterator struct {
+	Event *TransparentupgradableproxyBeaconUpgraded // Event containing the contract specifics and raw log
+
+	contract *bind.BoundContract // Generic contract to use for unpacking event data
+	event    string              // Event name to use for unpacking event data
+
+	logs chan types.Log        // Log channel receiving the found contract events
+	sub  ethereum.Subscription // Subscription for errors, completion and termination
+	done bool                  // Whether the subscription completed delivering logs
+	fail error                 // Occurred error to stop iteration
+}
+
+// Next advances the iterator to the subsequent event, returning whether there
+// are any more events found. In case of a retrieval or parsing error, false is
+// returned and Error() can be queried for the exact failure.
+func (it *TransparentupgradableproxyBeaconUpgradedIterator) Next() bool {
+	// If the iterator failed, stop iterating
+	if it.fail != nil {
+		return false
+	}
+	// If the iterator completed, deliver directly whatever's available
+	if it.done {
+		select {
+		case log := <-it.logs:
+			it.Event = new(TransparentupgradableproxyBeaconUpgraded)
+			if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
+				it.fail = err
+				return false
+			}
+			it.Event.Raw = log
+			return true
+
+		default:
+			return false
+		}
+	}
+	// Iterator still in progress, wait for either a data or an error event
+	select {
+	case log := <-it.logs:
+		it.Event = new(TransparentupgradableproxyBeaconUpgraded)
+		if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
+			it.fail = err
+			return false
+		}
+		it.Event.Raw = log
+		return true
+
+	case err := <-it.sub.Err():
+		it.done = true
+		it.fail = err
+		return it.Next()
+	}
+}
+
+// Error returns any retrieval or parsing error occurred during filtering.
+func (it *TransparentupgradableproxyBeaconUpgradedIterator) Error() error {
+	return it.fail
+}
+
+// Close terminates the iteration process, releasing any pending underlying
+// resources.
+func (it *TransparentupgradableproxyBeaconUpgradedIterator) Close() error {
+	it.sub.Unsubscribe()
+	return nil
+}
+
+// TransparentupgradableproxyBeaconUpgraded represents a BeaconUpgraded event raised by the Transparentupgradableproxy contract.
+type TransparentupgradableproxyBeaconUpgraded struct {
+	Beacon common.Address
+	Raw    types.Log // Blockchain specific contextual infos
+}
+
+// FilterBeaconUpgraded is a free log retrieval operation binding the contract event 0x1cf3b03a6cf19fa2baba4df148e9dcabedea7f8a5c07840e207e5c089be95d3e.
+//
+// Solidity: event BeaconUpgraded(address indexed beacon)
+func (_Transparentupgradableproxy *TransparentupgradableproxyFilterer) FilterBeaconUpgraded(opts *bind.FilterOpts, beacon []common.Address) (*TransparentupgradableproxyBeaconUpgradedIterator, error) {
+
+	var beaconRule []interface{}
+	for _, beaconItem := range beacon {
+		beaconRule = append(beaconRule, beaconItem)
+	}
+
+	logs, sub, err := _Transparentupgradableproxy.contract.FilterLogs(opts, "BeaconUpgraded", beaconRule)
+	if err != nil {
+		return nil, err
+	}
+	return &TransparentupgradableproxyBeaconUpgradedIterator{contract: _Transparentupgradableproxy.contract, event: "BeaconUpgraded", logs: logs, sub: sub}, nil
+}
+
+// WatchBeaconUpgraded is a free log subscription operation binding the contract event 0x1cf3b03a6cf19fa2baba4df148e9dcabedea7f8a5c07840e207e5c089be95d3e.
+//
+// Solidity: event BeaconUpgraded(address indexed beacon)
+func (_Transparentupgradableproxy *TransparentupgradableproxyFilterer) WatchBeaconUpgraded(opts *bind.WatchOpts, sink chan<- *TransparentupgradableproxyBeaconUpgraded, beacon []common.Address) (event.Subscription, error) {
+
+	var beaconRule []interface{}
+	for _, beaconItem := range beacon {
+		beaconRule = append(beaconRule, beaconItem)
+	}
+
+	logs, sub, err := _Transparentupgradableproxy.contract.WatchLogs(opts, "BeaconUpgraded", beaconRule)
+	if err != nil {
+		return nil, err
+	}
+	return event.NewSubscription(func(quit <-chan struct{}) error {
+		defer sub.Unsubscribe()
+		for {
+			select {
+			case log := <-logs:
+				// New log arrived, parse the event and forward to the user
+				event := new(TransparentupgradableproxyBeaconUpgraded)
+				if err := _Transparentupgradableproxy.contract.UnpackLog(event, "BeaconUpgraded", log); err != nil {
+					return err
+				}
+				event.Raw = log
+
+				select {
+				case sink <- event:
+				case err := <-sub.Err():
+					return err
+				case <-quit:
+					return nil
+				}
+			case err := <-sub.Err():
+				return err
+			case <-quit:
+				return nil
+			}
+		}
+	}), nil
+}
+
+// ParseBeaconUpgraded is a log parse operation binding the contract event 0x1cf3b03a6cf19fa2baba4df148e9dcabedea7f8a5c07840e207e5c089be95d3e.
+//
+// Solidity: event BeaconUpgraded(address indexed beacon)
+func (_Transparentupgradableproxy *TransparentupgradableproxyFilterer) ParseBeaconUpgraded(log types.Log) (*TransparentupgradableproxyBeaconUpgraded, error) {
+	event := new(TransparentupgradableproxyBeaconUpgraded)
+	if err := _Transparentupgradableproxy.contract.UnpackLog(event, "BeaconUpgraded", log); err != nil {
+		return nil, err
+	}
+	event.Raw = log
+	return event, nil
+}
+
+// TransparentupgradableproxyUpgradedIterator is returned from FilterUpgraded and is used to iterate over the raw logs and unpacked data for Upgraded events raised by the Transparentupgradableproxy contract.
+type TransparentupgradableproxyUpgradedIterator struct {
+	Event *TransparentupgradableproxyUpgraded // Event containing the contract specifics and raw log
+
+	contract *bind.BoundContract // Generic contract to use for unpacking event data
+	event    string              // Event name to use for unpacking event data
+
+	logs chan types.Log        // Log channel receiving the found contract events
+	sub  ethereum.Subscription // Subscription for errors, completion and termination
+	done bool                  // Whether the subscription completed delivering logs
+	fail error                 // Occurred error to stop iteration
+}
+
+// Next advances the iterator to the subsequent event, returning whether there
+// are any more events found. In case of a retrieval or parsing error, false is
+// returned and Error() can be queried for the exact failure.
+func (it *TransparentupgradableproxyUpgradedIterator) Next() bool {
+	// If the iterator failed, stop iterating
+	if it.fail != nil {
+		return false
+	}
+	// If the iterator completed, deliver directly whatever's available
+	if it.done {
+		select {
+		case log := <-it.logs:
+			it.Event = new(TransparentupgradableproxyUpgraded)
+			if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
+				it.fail = err
+				return false
+			}
+			it.Event.Raw = log
+			return true
+
+		default:
+			return false
+		}
+	}
+	// Iterator still in progress, wait for either a data or an error event
+	select {
+	case log := <-it.logs:
+		it.Event = new(TransparentupgradableproxyUpgraded)
+		if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
+			it.fail = err
+			return false
+		}
+		it.Event.Raw = log
+		return true
+
+	case err := <-it.sub.Err():
+		it.done = true
+		it.fail = err
+		return it.Next()
+	}
+}
+
+// Error returns any retrieval or parsing error occurred during filtering.
+func (it *TransparentupgradableproxyUpgradedIterator) Error() error {
+	return it.fail
+}
+
+// Close terminates the iteration process, releasing any pending underlying
+// resources.
+func (it *TransparentupgradableproxyUpgradedIterator) Close() error {
+	it.sub.Unsubscribe()
+	return nil
+}
+
+// TransparentupgradableproxyUpgraded represents a Upgraded event raised by the Transparentupgradableproxy contract.
+type TransparentupgradableproxyUpgraded struct {
+	Implementation common.Address
+	Raw            types.Log // Blockchain specific contextual infos
+}
+
+// FilterUpgraded is a free log retrieval operation binding the contract event 0xbc7cd75a20ee27fd9adebab32041f755214dbc6bffa90cc0225b39da2e5c2d3b.
+//
+// Solidity: event Upgraded(address indexed implementation)
+func (_Transparentupgradableproxy *TransparentupgradableproxyFilterer) FilterUpgraded(opts *bind.FilterOpts, implementation []common.Address) (*TransparentupgradableproxyUpgradedIterator, error) {
+
+	var implementationRule []interface{}
+	for _, implementationItem := range implementation {
+		implementationRule = append(implementationRule, implementationItem)
+	}
+
+	logs, sub, err := _Transparentupgradableproxy.contract.FilterLogs(opts, "Upgraded", implementationRule)
+	if err != nil {
+		return nil, err
+	}
+	return &TransparentupgradableproxyUpgradedIterator{contract: _Transparentupgradableproxy.contract, event: "Upgraded", logs: logs, sub: sub}, nil
+}
+
+// WatchUpgraded is a free log subscription operation binding the contract event 0xbc7cd75a20ee27fd9adebab32041f755214dbc6bffa90cc0225b39da2e5c2d3b.
+//
+// Solidity: event Upgraded(address indexed implementation)
+func (_Transparentupgradableproxy *TransparentupgradableproxyFilterer) WatchUpgraded(opts *bind.WatchOpts, sink chan<- *TransparentupgradableproxyUpgraded, implementation []common.Address) (event.Subscription, error) {
+
+	var implementationRule []interface{}
+	for _, implementationItem := range implementation {
+		implementationRule = append(implementationRule, implementationItem)
+	}
+
+	logs, sub, err := _Transparentupgradableproxy.contract.WatchLogs(opts, "Upgraded", implementationRule)
+	if err != nil {
+		return nil, err
+	}
+	return event.NewSubscription(func(quit <-chan struct{}) error {
+		defer sub.Unsubscribe()
+		for {
+			select {
+			case log := <-logs:
+				// New log arrived, parse the event and forward to the user
+				event := new(TransparentupgradableproxyUpgraded)
+				if err := _Transparentupgradableproxy.contract.UnpackLog(event, "Upgraded", log); err != nil {
+					return err
+				}
+				event.Raw = log
+
+				select {
+				case sink <- event:
+				case err := <-sub.Err():
+					return err
+				case <-quit:
+					return nil
+				}
+			case err := <-sub.Err():
+				return err
+			case <-quit:
+				return nil
+			}
+		}
+	}), nil
+}
+
+// ParseUpgraded is a log parse operation binding the contract event 0xbc7cd75a20ee27fd9adebab32041f755214dbc6bffa90cc0225b39da2e5c2d3b.
+//
+// Solidity: event Upgraded(address indexed implementation)
+func (_Transparentupgradableproxy *TransparentupgradableproxyFilterer) ParseUpgraded(log types.Log) (*TransparentupgradableproxyUpgraded, error) {
+	event := new(TransparentupgradableproxyUpgraded)
+	if err := _Transparentupgradableproxy.contract.UnpackLog(event, "Upgraded", log); err != nil {
+		return nil, err
+	}
+	event.Raw = log
+	return event, nil
+}
diff --git a/test/helpers/aggoracle_e2e.go b/test/helpers/aggoracle_e2e.go
new file mode 100644
index 000000000..c2908b8d3
--- /dev/null
+++ b/test/helpers/aggoracle_e2e.go
@@ -0,0 +1,332 @@
+package helpers
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"math/big"
+	"testing"
+	"time"
+
+	"github.com/0xPolygon/cdk-contracts-tooling/contracts/elderberry-paris/polygonzkevmbridgev2"
+	gerContractL1 "github.com/0xPolygon/cdk-contracts-tooling/contracts/manual/globalexitrootnopush0"
+	gerContractEVMChain "github.com/0xPolygon/cdk-contracts-tooling/contracts/manual/pessimisticglobalexitrootnopush0"
+	"github.com/0xPolygon/cdk/aggoracle"
+	"github.com/0xPolygon/cdk/aggoracle/chaingersender"
+	"github.com/0xPolygon/cdk/etherman"
+	"github.com/0xPolygon/cdk/l1infotreesync"
+	"github.com/0xPolygon/cdk/reorgdetector"
+	"github.com/0xPolygon/cdk/test/contracts/transparentupgradableproxy"
+	"github.com/ethereum/go-ethereum/accounts/abi/bind"
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/core/types"
+	"github.com/ethereum/go-ethereum/crypto"
+	"github.com/ethereum/go-ethereum/ethclient/simulated"
+	"github.com/stretchr/testify/require"
+)
+
+const (
+	NetworkIDL2 = uint32(1)
+)
+
+type AggoracleWithEVMChainEnv struct {
+	L1Client         *simulated.Backend
+	L2Client         *simulated.Backend
+	L1InfoTreeSync   *l1infotreesync.L1InfoTreeSync
+	GERL1Contract    *gerContractL1.Globalexitrootnopush0
+	GERL1Addr        common.Address
+	GERL2Contract    *gerContractEVMChain.Pessimisticglobalexitrootnopush0
+	GERL2Addr        common.Address
+	AuthL1           *bind.TransactOpts
+	AuthL2           *bind.TransactOpts
+	AggOracle        *aggoracle.AggOracle
+	AggOracleSender  aggoracle.ChainSender
+	ReorgDetector    *reorgdetector.ReorgDetector
+	BridgeL1Contract *polygonzkevmbridgev2.Polygonzkevmbridgev2
+	BridgeL1Addr     common.Address
+	BridgeL2Contract *polygonzkevmbridgev2.Polygonzkevmbridgev2
+	BridgeL2Addr     common.Address
+	NetworkIDL2      uint32
+	EthTxManMockL2   *EthTxManagerMock
+}
+
+func SetupAggoracleWithEVMChain(t *testing.T) *AggoracleWithEVMChainEnv {
+	ctx := context.Background()
+	l1Client, syncer, gerL1Contract, gerL1Addr, bridgeL1Contract, bridgeL1Addr, authL1, rd := CommonSetup(t)
+	sender, l2Client, gerL2Contract, gerL2Addr, bridgeL2Contract, bridgeL2Addr, authL2, ethTxManMockL2 := EVMSetup(t)
+	oracle, err := aggoracle.New(sender, l1Client.Client(), syncer, etherman.LatestBlock, time.Millisecond*20)
+	require.NoError(t, err)
+	go oracle.Start(ctx)
+
+	return &AggoracleWithEVMChainEnv{
+		L1Client:         l1Client,
+		L2Client:         l2Client,
+		L1InfoTreeSync:   syncer,
+		GERL1Contract:    gerL1Contract,
+		GERL1Addr:        gerL1Addr,
+		GERL2Contract:    gerL2Contract,
+		GERL2Addr:        gerL2Addr,
+		AuthL1:           authL1,
+		AuthL2:           authL2,
+		AggOracle:        oracle,
+		AggOracleSender:  sender,
+		ReorgDetector:    rd,
+		BridgeL1Contract: bridgeL1Contract,
+		BridgeL1Addr:     bridgeL1Addr,
+		BridgeL2Contract: bridgeL2Contract,
+		BridgeL2Addr:     bridgeL2Addr,
+		NetworkIDL2:      NetworkIDL2,
+		EthTxManMockL2:   ethTxManMockL2,
+	}
+}
+
+func CommonSetup(t *testing.T) (
+	*simulated.Backend,
+	*l1infotreesync.L1InfoTreeSync,
+	*gerContractL1.Globalexitrootnopush0,
+	common.Address,
+	*polygonzkevmbridgev2.Polygonzkevmbridgev2,
+	common.Address,
+	*bind.TransactOpts,
+	*reorgdetector.ReorgDetector,
+) {
+	// Config and spin up
+	ctx := context.Background()
+	// Simulated L1
+	privateKeyL1, err := crypto.GenerateKey()
+	require.NoError(t, err)
+	authL1, err := bind.NewKeyedTransactorWithChainID(privateKeyL1, big.NewInt(1337))
+	require.NoError(t, err)
+	l1Client, gerL1Addr, gerL1Contract, bridgeL1Addr, bridgeL1Contract, err := newSimulatedL1(authL1)
+	require.NoError(t, err)
+	// Reorg detector
+	dbPathReorgDetector := t.TempDir()
+	reorg, err := reorgdetector.New(ctx, l1Client.Client(), dbPathReorgDetector)
+	require.NoError(t, err)
+	// Syncer
+	dbPathSyncer := t.TempDir()
+	syncer, err := l1infotreesync.New(ctx, dbPathSyncer, gerL1Addr, common.Address{}, 10, etherman.LatestBlock, reorg, l1Client.Client(), time.Millisecond, 0, 100*time.Millisecond, 3)
+	require.NoError(t, err)
+	go syncer.Start(ctx)
+
+	return l1Client, syncer, gerL1Contract, gerL1Addr, bridgeL1Contract, bridgeL1Addr, authL1, reorg
+}
+
+func EVMSetup(t *testing.T) (
+	aggoracle.ChainSender,
+	*simulated.Backend,
+	*gerContractEVMChain.Pessimisticglobalexitrootnopush0,
+	common.Address,
+	*polygonzkevmbridgev2.Polygonzkevmbridgev2,
+	common.Address,
+	*bind.TransactOpts,
+	*EthTxManagerMock,
+) {
+	privateKeyL2, err := crypto.GenerateKey()
+	require.NoError(t, err)
+	authL2, err := bind.NewKeyedTransactorWithChainID(privateKeyL2, big.NewInt(1337))
+	require.NoError(t, err)
+	l2Client, gerL2Addr, gerL2Sc, bridgeL2Addr, bridgeL2Sc, err := newSimulatedEVMAggSovereignChain(authL2)
+	require.NoError(t, err)
+	ethTxManMock := NewEthTxManMock(t, l2Client, authL2)
+	sender, err := chaingersender.NewEVMChainGERSender(gerL2Addr, authL2.From, l2Client.Client(), ethTxManMock, 0, time.Millisecond*50)
+	require.NoError(t, err)
+
+	return sender, l2Client, gerL2Sc, gerL2Addr, bridgeL2Sc, bridgeL2Addr, authL2, ethTxManMock
+}
+
+func newSimulatedL1(auth *bind.TransactOpts) (
+	client *simulated.Backend,
+	gerAddr common.Address,
+	gerContract *gerContractL1.Globalexitrootnopush0,
+	bridgeAddr common.Address,
+	bridgeContract *polygonzkevmbridgev2.Polygonzkevmbridgev2,
+	err error,
+) {
+	ctx := context.Background()
+	privateKeyL1, err := crypto.GenerateKey()
+	if err != nil {
+		return
+	}
+	authDeployer, err := bind.NewKeyedTransactorWithChainID(privateKeyL1, big.NewInt(1337))
+	balance, _ := new(big.Int).SetString("10000000000000000000000000", 10) //nolint:gomnd
+	address := auth.From
+	genesisAlloc := map[common.Address]types.Account{
+		address: {
+			Balance: balance,
+		},
+		authDeployer.From: {
+			Balance: balance,
+		},
+	}
+	blockGasLimit := uint64(999999999999999999) //nolint:gomnd
+	client = simulated.NewBackend(genesisAlloc, simulated.WithBlockGasLimit(blockGasLimit))
+
+	bridgeImplementationAddr, _, _, err := polygonzkevmbridgev2.DeployPolygonzkevmbridgev2(authDeployer, client.Client())
+	if err != nil {
+		return
+	}
+	client.Commit()
+
+	nonce, err := client.Client().PendingNonceAt(ctx, authDeployer.From)
+	if err != nil {
+		return
+	}
+	precalculatedAddr := crypto.CreateAddress(authDeployer.From, nonce+1)
+	bridgeABI, err := polygonzkevmbridgev2.Polygonzkevmbridgev2MetaData.GetAbi()
+	if err != nil {
+		return
+	}
+	if bridgeABI == nil {
+		err = errors.New("GetABI returned nil")
+		return
+	}
+	dataCallProxy, err := bridgeABI.Pack("initialize",
+		uint32(0),        // networkIDMainnet
+		common.Address{}, // gasTokenAddressMainnet"
+		uint32(0),        // gasTokenNetworkMainnet
+		precalculatedAddr,
+		common.Address{},
+		[]byte{}, // gasTokenMetadata
+	)
+	if err != nil {
+		return
+	}
+	bridgeAddr, _, _, err = transparentupgradableproxy.DeployTransparentupgradableproxy(
+		authDeployer,
+		client.Client(),
+		bridgeImplementationAddr,
+		authDeployer.From,
+		dataCallProxy,
+	)
+	if err != nil {
+		return
+	}
+	client.Commit()
+	bridgeContract, err = polygonzkevmbridgev2.NewPolygonzkevmbridgev2(bridgeAddr, client.Client())
+	if err != nil {
+		return
+	}
+	checkGERAddr, err := bridgeContract.GlobalExitRootManager(&bind.CallOpts{Pending: false})
+	if err != nil {
+		return
+	}
+	if precalculatedAddr != checkGERAddr {
+		err = fmt.Errorf("error deploying bridge, unexpected GER addr. Expected %s. Actual %s", precalculatedAddr.Hex(), checkGERAddr.Hex())
+	}
+
+	gerAddr, _, gerContract, err = gerContractL1.DeployGlobalexitrootnopush0(authDeployer, client.Client(), auth.From, bridgeAddr)
+
+	client.Commit()
+	if precalculatedAddr != gerAddr {
+		err = fmt.Errorf("error calculating addr. Expected %s. Actual %s", precalculatedAddr.Hex(), gerAddr.Hex())
+	}
+	return
+}
+
+func newSimulatedEVMAggSovereignChain(auth *bind.TransactOpts) (
+	client *simulated.Backend,
+	gerAddr common.Address,
+	gerContract *gerContractEVMChain.Pessimisticglobalexitrootnopush0,
+	bridgeAddr common.Address,
+	bridgeContract *polygonzkevmbridgev2.Polygonzkevmbridgev2,
+	err error,
+) {
+	ctx := context.Background()
+	privateKeyL1, err := crypto.GenerateKey()
+	if err != nil {
+		return
+	}
+	authDeployer, err := bind.NewKeyedTransactorWithChainID(privateKeyL1, big.NewInt(1337))
+	balance, _ := new(big.Int).SetString("10000000000000000000000000", 10) //nolint:gomnd
+	address := auth.From
+	precalculatedBridgeAddr := crypto.CreateAddress(authDeployer.From, 1)
+	genesisAlloc := map[common.Address]types.Account{
+		address: {
+			Balance: balance,
+		},
+		authDeployer.From: {
+			Balance: balance,
+		},
+		precalculatedBridgeAddr: {
+			Balance: balance,
+		},
+	}
+	blockGasLimit := uint64(999999999999999999) //nolint:gomnd
+	client = simulated.NewBackend(genesisAlloc, simulated.WithBlockGasLimit(blockGasLimit))
+
+	bridgeImplementationAddr, _, _, err := polygonzkevmbridgev2.DeployPolygonzkevmbridgev2(authDeployer, client.Client())
+	if err != nil {
+		return
+	}
+	client.Commit()
+
+	nonce, err := client.Client().PendingNonceAt(ctx, authDeployer.From)
+	if err != nil {
+		return
+	}
+	precalculatedAddr := crypto.CreateAddress(authDeployer.From, nonce+1)
+	bridgeABI, err := polygonzkevmbridgev2.Polygonzkevmbridgev2MetaData.GetAbi()
+	if err != nil {
+		return
+	}
+	if bridgeABI == nil {
+		err = errors.New("GetABI returned nil")
+		return
+	}
+	dataCallProxy, err := bridgeABI.Pack("initialize",
+		NetworkIDL2,
+		common.Address{}, // gasTokenAddressMainnet"
+		uint32(0),        // gasTokenNetworkMainnet
+		precalculatedAddr,
+		common.Address{},
+		[]byte{}, // gasTokenMetadata
+	)
+	if err != nil {
+		return
+	}
+	bridgeAddr, _, _, err = transparentupgradableproxy.DeployTransparentupgradableproxy(
+		authDeployer,
+		client.Client(),
+		bridgeImplementationAddr,
+		authDeployer.From,
+		dataCallProxy,
+	)
+	if err != nil {
+		return
+	}
+	if bridgeAddr != precalculatedBridgeAddr {
+		err = fmt.Errorf("error calculating bridge addr. Expected: %s. Actual: %s", precalculatedBridgeAddr, bridgeAddr)
+		return
+	}
+	client.Commit()
+	bridgeContract, err = polygonzkevmbridgev2.NewPolygonzkevmbridgev2(bridgeAddr, client.Client())
+	if err != nil {
+		return
+	}
+	checkGERAddr, err := bridgeContract.GlobalExitRootManager(&bind.CallOpts{})
+	if err != nil {
+		return
+	}
+	if precalculatedAddr != checkGERAddr {
+		err = errors.New("error deploying bridge")
+	}
+
+	gerAddr, _, gerContract, err = gerContractEVMChain.DeployPessimisticglobalexitrootnopush0(authDeployer, client.Client(), auth.From)
+	if err != nil {
+		return
+	}
+	client.Commit()
+
+	_GLOBAL_EXIT_ROOT_SETTER_ROLE := common.HexToHash("0x7b95520991dfda409891be0afa2635b63540f92ee996fda0bf695a166e5c5176")
+	_, err = gerContract.GrantRole(authDeployer, _GLOBAL_EXIT_ROOT_SETTER_ROLE, auth.From)
+	client.Commit()
+	hasRole, _ := gerContract.HasRole(&bind.CallOpts{Pending: false}, _GLOBAL_EXIT_ROOT_SETTER_ROLE, auth.From)
+	if !hasRole {
+		err = errors.New("failed to set role")
+	}
+	if precalculatedAddr != gerAddr {
+		err = errors.New("error calculating addr")
+	}
+	return
+}
diff --git a/test/helpers/ethtxmanmock_e2e.go b/test/helpers/ethtxmanmock_e2e.go
new file mode 100644
index 000000000..b63ecc496
--- /dev/null
+++ b/test/helpers/ethtxmanmock_e2e.go
@@ -0,0 +1,83 @@
+package helpers
+
+import (
+	"context"
+	"math/big"
+	"testing"
+
+	"github.com/0xPolygon/cdk/log"
+	"github.com/0xPolygonHermez/zkevm-ethtx-manager/ethtxmanager"
+	"github.com/ethereum/go-ethereum"
+	"github.com/ethereum/go-ethereum/accounts/abi/bind"
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/core/types"
+	"github.com/ethereum/go-ethereum/ethclient/simulated"
+	"github.com/stretchr/testify/mock"
+)
+
+func NewEthTxManMock(
+	t *testing.T,
+	client *simulated.Backend,
+	auth *bind.TransactOpts,
+) *EthTxManagerMock {
+	ethTxMock := NewEthTxManagerMock(t)
+	ethTxMock.On("Add", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).
+		Run(func(args mock.Arguments) {
+			ctx := context.Background()
+			nonce, err := client.Client().PendingNonceAt(ctx, auth.From)
+			if err != nil {
+				log.Error(err)
+				return
+			}
+			gas, err := client.Client().EstimateGas(ctx, ethereum.CallMsg{
+				From:  auth.From,
+				To:    args.Get(1).(*common.Address),
+				Value: big.NewInt(0),
+				Data:  args.Get(4).([]byte),
+			})
+			if err != nil {
+				log.Error(err)
+				res, err := client.Client().CallContract(ctx, ethereum.CallMsg{
+					From:  auth.From,
+					To:    args.Get(1).(*common.Address),
+					Value: big.NewInt(0),
+					Data:  args.Get(4).([]byte),
+				}, nil)
+				log.Debugf("contract call: %s", res)
+				if err != nil {
+					log.Errorf("%+v", err)
+				}
+				return
+			}
+			price, err := client.Client().SuggestGasPrice(ctx)
+			if err != nil {
+				log.Error(err)
+			}
+			tx := types.NewTx(&types.LegacyTx{
+				To:       args.Get(1).(*common.Address),
+				Nonce:    nonce,
+				Value:    big.NewInt(0),
+				Data:     args.Get(4).([]byte),
+				Gas:      gas,
+				GasPrice: price,
+			})
+			tx.Gas()
+			signedTx, err := auth.Signer(auth.From, tx)
+			if err != nil {
+				log.Error(err)
+				return
+			}
+			err = client.Client().SendTransaction(ctx, signedTx)
+			if err != nil {
+				log.Error(err)
+				return
+			}
+			client.Commit()
+		}).
+		Return(common.Hash{}, nil)
+	// res, err := c.ethTxMan.Result(ctx, id)
+	ethTxMock.On("Result", mock.Anything, mock.Anything).
+		Return(ethtxmanager.MonitoredTxResult{Status: ethtxmanager.MonitoredTxStatusMined}, nil)
+
+	return ethTxMock
+}
diff --git a/aggoracle/mock_ethtxmanager_test.go b/test/helpers/mock_ethtxmanager.go
similarity index 99%
rename from aggoracle/mock_ethtxmanager_test.go
rename to test/helpers/mock_ethtxmanager.go
index 37bcbeda0..995084a2e 100644
--- a/aggoracle/mock_ethtxmanager_test.go
+++ b/test/helpers/mock_ethtxmanager.go
@@ -1,6 +1,6 @@
 // Code generated by mockery v2.22.1. DO NOT EDIT.
 
-package aggoracle
+package helpers
 
 import (
 	big "math/big"
diff --git a/tree/appendonlytree.go b/tree/appendonlytree.go
index da828d0ab..d04454f13 100644
--- a/tree/appendonlytree.go
+++ b/tree/appendonlytree.go
@@ -12,7 +12,7 @@ import (
 // AppendOnlyTree is a tree where leaves are added sequentially (by index)
 type AppendOnlyTree struct {
 	*Tree
-	lastLeftCache []common.Hash
+	lastLeftCache [defaultHeight]common.Hash
 	lastIndex     int64
 }
 
@@ -36,8 +36,8 @@ func (t *AppendOnlyTree) AddLeaves(tx kv.RwTx, leaves []Leaf) (func(), error) {
 	}
 
 	backupIndx := t.lastIndex
-	backupCache := make([]common.Hash, len(t.lastLeftCache))
-	copy(backupCache, t.lastLeftCache)
+	backupCache := [defaultHeight]common.Hash{}
+	copy(backupCache[:], t.lastLeftCache[:])
 	rollback := func() {
 		t.lastIndex = backupIndx
 		t.lastLeftCache = backupCache
@@ -62,7 +62,7 @@ func (t *AppendOnlyTree) addLeaf(tx kv.RwTx, leaf Leaf) error {
 	// Calculate new tree nodes
 	currentChildHash := leaf.Hash
 	newNodes := []treeNode{}
-	for h := uint8(0); h < t.height; h++ {
+	for h := uint8(0); h < defaultHeight; h++ {
 		var parent treeNode
 		if leaf.Index&(1<<h) > 0 {
 			// Add child to the right
@@ -104,12 +104,23 @@ func (t *AppendOnlyTree) GetRootByIndex(tx kv.Tx, index uint32) (common.Hash, er
 	return t.getRootByIndex(tx, uint64(index))
 }
 
+func (t *AppendOnlyTree) GetIndexByRoot(ctx context.Context, root common.Hash) (uint32, error) {
+	tx, err := t.db.BeginRo(ctx)
+	if err != nil {
+		return 0, err
+	}
+	defer tx.Rollback()
+	index, err := t.getIndexByRoot(tx, root)
+	return uint32(index), err
+}
+
 // GetLastIndexAndRoot returns the last index and root added to the tree
 func (t *AppendOnlyTree) GetLastIndexAndRoot(ctx context.Context) (uint32, common.Hash, error) {
 	tx, err := t.db.BeginRo(ctx)
 	if err != nil {
 		return 0, common.Hash{}, err
 	}
+	defer tx.Rollback()
 	i, root, err := t.getLastIndexAndRootWithTx(tx)
 	if err != nil {
 		return 0, common.Hash{}, err
@@ -144,7 +155,7 @@ func (t *AppendOnlyTree) initLastIndex(tx kv.Tx) (common.Hash, error) {
 }
 
 func (t *AppendOnlyTree) initLastLeftCache(tx kv.Tx, lastIndex int64, lastRoot common.Hash) error {
-	siblings := make([]common.Hash, t.height, t.height)
+	siblings := [defaultHeight]common.Hash{}
 	if lastIndex == -1 {
 		t.lastLeftCache = siblings
 		return nil
@@ -153,7 +164,7 @@ func (t *AppendOnlyTree) initLastLeftCache(tx kv.Tx, lastIndex int64, lastRoot c
 
 	currentNodeHash := lastRoot
 	// It starts in height-1 because 0 is the level of the leafs
-	for h := int(t.height - 1); h >= 0; h-- {
+	for h := int(defaultHeight - 1); h >= 0; h-- {
 		currentNode, err := t.getRHTNode(tx, currentNodeHash)
 		if err != nil {
 			return fmt.Errorf(
@@ -164,7 +175,7 @@ func (t *AppendOnlyTree) initLastLeftCache(tx kv.Tx, lastIndex int64, lastRoot c
 		if currentNode == nil {
 			return ErrNotFound
 		}
-		siblings = append(siblings, currentNode.left)
+		siblings[h] = currentNode.left
 		if index&(1<<h) > 0 {
 			currentNodeHash = currentNode.right
 		} else {
diff --git a/tree/tree.go b/tree/tree.go
index f361be285..e7f8e5c18 100644
--- a/tree/tree.go
+++ b/tree/tree.go
@@ -13,12 +13,14 @@ import (
 )
 
 const (
-	defaultHeight  uint8 = 32
-	rootTableSufix       = "-root"
-	rhtTableSufix        = "-rht"
+	defaultHeight   uint8 = 32
+	rootTableSufix        = "-root"
+	rhtTableSufix         = "-rht"
+	indexTableSufix       = "-index"
 )
 
 var (
+	EmptyProof  = [32]common.Hash{}
 	ErrNotFound = errors.New("not found")
 )
 
@@ -31,7 +33,7 @@ type Tree struct {
 	db         kv.RwDB
 	rhtTable   string
 	rootTable  string
-	height     uint8
+	indexTable string
 	zeroHashes []common.Hash
 }
 
@@ -66,18 +68,21 @@ func (n *treeNode) UnmarshalBinary(data []byte) error {
 func AddTables(tableCfg map[string]kv.TableCfgItem, dbPrefix string) {
 	rootTable := dbPrefix + rootTableSufix
 	rhtTable := dbPrefix + rhtTableSufix
+	indexTable := dbPrefix + indexTableSufix
 	tableCfg[rootTable] = kv.TableCfgItem{}
 	tableCfg[rhtTable] = kv.TableCfgItem{}
+	tableCfg[indexTable] = kv.TableCfgItem{}
 }
 
 func newTree(db kv.RwDB, dbPrefix string) *Tree {
 	rootTable := dbPrefix + rootTableSufix
 	rhtTable := dbPrefix + rhtTableSufix
+	indexTable := dbPrefix + indexTableSufix
 	t := &Tree{
 		rhtTable:   rhtTable,
 		rootTable:  rootTable,
+		indexTable: indexTable,
 		db:         db,
-		height:     defaultHeight,
 		zeroHashes: generateZeroHashes(defaultHeight),
 	}
 
@@ -95,22 +100,31 @@ func (t *Tree) getRootByIndex(tx kv.Tx, index uint64) (common.Hash, error) {
 	return common.BytesToHash(rootBytes), nil
 }
 
+func (t *Tree) getIndexByRoot(tx kv.Tx, root common.Hash) (uint64, error) {
+	indexBytes, err := tx.GetOne(t.indexTable, root[:])
+	if err != nil {
+		return 0, err
+	}
+	if indexBytes == nil {
+		return 0, ErrNotFound
+	}
+	return dbCommon.BytesToUint64(indexBytes), nil
+}
+
 func (t *Tree) getSiblings(tx kv.Tx, index uint32, root common.Hash) (
-	siblings []common.Hash,
+	siblings [32]common.Hash,
 	hasUsedZeroHashes bool,
 	err error,
 ) {
-	siblings = make([]common.Hash, int(t.height))
-
 	currentNodeHash := root
-	var currentNode *treeNode
 	// It starts in height-1 because 0 is the level of the leafs
-	for h := int(t.height - 1); h >= 0; h-- {
+	for h := int(defaultHeight - 1); h >= 0; h-- {
+		var currentNode *treeNode
 		currentNode, err = t.getRHTNode(tx, currentNodeHash)
 		if err != nil {
 			if err == ErrNotFound {
 				hasUsedZeroHashes = true
-				siblings = append(siblings, t.zeroHashes[h])
+				siblings[h] = t.zeroHashes[h]
 				err = nil
 				continue
 			} else {
@@ -143,35 +157,30 @@ func (t *Tree) getSiblings(tx kv.Tx, index uint32, root common.Hash) (
 		* Now, let's do AND operation => 100&100=100 which is higher than 0 so we need the left sibling (O5)
 		 */
 		if index&(1<<h) > 0 {
-			siblings = append(siblings, currentNode.left)
+			siblings[h] = currentNode.left
 			currentNodeHash = currentNode.right
 		} else {
-			siblings = append(siblings, currentNode.right)
+			siblings[h] = currentNode.right
 			currentNodeHash = currentNode.left
 		}
 	}
 
-	// Reverse siblings to go from leafs to root
-	for i, j := 0, len(siblings)-1; i < j; i, j = i+1, j-1 {
-		siblings[i], siblings[j] = siblings[j], siblings[i]
-	}
-
 	return
 }
 
 // GetProof returns the merkle proof for a given index and root.
-func (t *Tree) GetProof(ctx context.Context, index uint32, root common.Hash) ([]common.Hash, error) {
+func (t *Tree) GetProof(ctx context.Context, index uint32, root common.Hash) ([defaultHeight]common.Hash, error) {
 	tx, err := t.db.BeginRw(ctx)
 	if err != nil {
-		return nil, err
+		return [defaultHeight]common.Hash{}, err
 	}
 	defer tx.Rollback()
 	siblings, isErrNotFound, err := t.getSiblings(tx, index, root)
 	if err != nil {
-		return nil, err
+		return [defaultHeight]common.Hash{}, err
 	}
 	if isErrNotFound {
-		return nil, ErrNotFound
+		return [defaultHeight]common.Hash{}, ErrNotFound
 	}
 	return siblings, nil
 }
@@ -220,7 +229,10 @@ func (t *Tree) storeNodes(tx kv.RwTx, nodes []treeNode) error {
 }
 
 func (t *Tree) storeRoot(tx kv.RwTx, rootIndex uint64, root common.Hash) error {
-	return tx.Put(t.rootTable, dbCommon.Uint64ToBytes(rootIndex), root[:])
+	if err := tx.Put(t.rootTable, dbCommon.Uint64ToBytes(rootIndex), root[:]); err != nil {
+		return err
+	}
+	return tx.Put(t.indexTable, root[:], dbCommon.Uint64ToBytes(rootIndex))
 }
 
 // GetLastRoot returns the last processed root
@@ -229,6 +241,7 @@ func (t *Tree) GetLastRoot(ctx context.Context) (common.Hash, error) {
 	if err != nil {
 		return common.Hash{}, err
 	}
+	defer tx.Rollback()
 
 	i, root, err := t.getLastIndexAndRootWithTx(tx)
 	if err != nil {
@@ -262,3 +275,26 @@ func (t *Tree) getLastIndexAndRootWithTx(tx kv.Tx) (int64, common.Hash, error) {
 	}
 	return int64(dbCommon.BytesToUint64(lastIndexBytes)), common.Hash(rootBytes), nil
 }
+
+func (t *Tree) GetLeaf(ctx context.Context, index uint32, root common.Hash) (common.Hash, error) {
+	tx, err := t.db.BeginRo(ctx)
+	if err != nil {
+		return common.Hash{}, err
+	}
+	defer tx.Rollback()
+
+	currentNodeHash := root
+	for h := int(defaultHeight - 1); h >= 0; h-- {
+		currentNode, err := t.getRHTNode(tx, currentNodeHash)
+		if err != nil {
+			return common.Hash{}, err
+		}
+		if index&(1<<h) > 0 {
+			currentNodeHash = currentNode.right
+		} else {
+			currentNodeHash = currentNode.left
+		}
+	}
+
+	return currentNodeHash, nil
+}
diff --git a/tree/updatabletree.go b/tree/updatabletree.go
index ddebd5dfc..48365ee21 100644
--- a/tree/updatabletree.go
+++ b/tree/updatabletree.go
@@ -29,7 +29,7 @@ func NewUpdatableTree(ctx context.Context, db kv.RwDB, dbPrefix string) (*Updata
 		return nil, err
 	}
 	if rootIndex == -1 {
-		root = t.zeroHashes[t.height]
+		root = t.zeroHashes[defaultHeight]
 	}
 	ut := &UpdatableTree{
 		Tree:     t,
@@ -70,7 +70,7 @@ func (t *UpdatableTree) upsertLeaf(tx kv.RwTx, leaf Leaf) error {
 	}
 	currentChildHash := leaf.Hash
 	newNodes := []treeNode{}
-	for h := uint8(0); h < t.height; h++ {
+	for h := uint8(0); h < defaultHeight; h++ {
 		var parent treeNode
 		if leaf.Index&(1<<h) > 0 {
 			// Add child to the right
@@ -130,7 +130,7 @@ func (t *UpdatableTree) Reorg(tx kv.RwTx, firstReorgedIndex uint64) (func(), err
 	}
 
 	// no root found after reorg, going back to empty tree
-	t.lastRoot = t.zeroHashes[t.height]
+	t.lastRoot = t.zeroHashes[defaultHeight]
 	return rollback, nil
 }