diff --git a/.github/workflows/test-e2e-multi_pp.yml b/.github/workflows/test-e2e-multi_pp.yml deleted file mode 100644 index 72c7140d5..000000000 --- a/.github/workflows/test-e2e-multi_pp.yml +++ /dev/null @@ -1,85 +0,0 @@ -# based on: https://github.com/0xPolygon/kurtosis-cdk/blob/jhilliard/multi-pp-testing/multi-pp-test.sh.md -name: Test e2e multi pp -on: - push: - branches: - - '**' - workflow_dispatch: {} - - -jobs: - test-e2e-multi_pp: - strategy: - fail-fast: false - matrix: - go-version: [ 1.22.x ] - goarch: [ "amd64" ] - e2e-group: - - "fork12-pessimistic" - runs-on: ubuntu-latest - steps: - - name: Checkout code - uses: actions/checkout@v4 - - - name: Install Go - uses: actions/setup-go@v5 - with: - go-version: ${{ matrix.go-version }} - env: - GOARCH: ${{ matrix.goarch }} - - - - name: Build Docker - run: make build-docker - - - name: Build Tools - run: make build-tools - - - name: Checkout kurtosis-cdk - uses: actions/checkout@v4 - with: - repository: 0xPolygon/kurtosis-cdk - path: kurtosis-cdk - ref: jhilliard/multi-pp-testing - - - name: Install Kurtosis CDK tools - uses: ./kurtosis-cdk/.github/actions/setup-kurtosis-cdk - - - name: Install polycli - run: | - git clone https://github.com/0xPolygon/polygon-cli -b jhilliard/alonso - cd polygon-cli - make install - cp ~/go/bin/polycli /usr/local/bin/polycli - /usr/local/bin/polycli version - - - name: Setup Bats and bats libs - uses: bats-core/bats-action@2.0.0 - - - name: Test - run: make test-e2e-fork12-multi-pessimistic - - working-directory: test - env: - KURTOSIS_FOLDER: ${{ github.workspace }}/kurtosis-cdk - BATS_LIB_PATH: /usr/lib/ - agglayer_prover_sp1_key: ${{ secrets.SP1_PRIVATE_KEY }} - - - name: Dump enclave logs - if: failure() - run: kurtosis dump ./dump - - - name: Generate archive name - if: failure() - run: | - archive_name="dump_run_with_args_${{matrix.e2e-group}}_${{ github.run_id }}" - echo "ARCHIVE_NAME=${archive_name}" >> "$GITHUB_ENV" - echo "Generated archive name: ${archive_name}" - kurtosis service exec cdk cdk-node-001 'cat /etc/cdk/cdk-node-config.toml' > ./dump/cdk-node-config.toml - - - name: Upload logs - if: failure() - uses: actions/upload-artifact@v4 - with: - name: ${{ env.ARCHIVE_NAME }} - path: ./dump diff --git a/.github/workflows/test-e2e.yml b/.github/workflows/test-e2e.yml index e164705c0..6b32f7b96 100644 --- a/.github/workflows/test-e2e.yml +++ b/.github/workflows/test-e2e.yml @@ -40,7 +40,6 @@ jobs: - "fork11-rollup" - "fork12-validium" - "fork12-rollup" - - "fork12-pessimistic" steps: - uses: actions/checkout@v4 diff --git a/Makefile b/Makefile index 0174206ac..90c0d0027 100644 --- a/Makefile +++ b/Makefile @@ -66,10 +66,6 @@ build-rust: build-go: $(GOENVVARS) go build -ldflags "all=$(LDFLAGS)" -o $(GOBIN)/$(GOBINARY) $(GOCMD) -.PHONY: build-tools -build-tools: ## Builds the tools - $(GOENVVARS) go build -o $(GOBIN)/aggsender_find_imported_bridge ./tools/aggsender_find_imported_bridge - .PHONY: build-docker build-docker: ## Builds a docker image with the cdk binary docker build -t cdk -f ./Dockerfile . diff --git a/agglayer/client.go b/agglayer/client.go index 8a186be46..01453165a 100644 --- a/agglayer/client.go +++ b/agglayer/client.go @@ -20,18 +20,10 @@ var ( jSONRPCCall = rpc.JSONRPCCall ) -type AggLayerClientGetEpochConfiguration interface { - GetEpochConfiguration() (*ClockConfiguration, error) -} - // AgglayerClientInterface is the interface that defines the methods that the AggLayerClient will implement type AgglayerClientInterface interface { SendTx(signedTx SignedTx) (common.Hash, error) WaitTxToBeMined(hash common.Hash, ctx context.Context) error - SendCertificate(certificate *SignedCertificate) (common.Hash, error) - GetCertificateHeader(certificateHash common.Hash) (*CertificateHeader, error) - GetLatestKnownCertificateHeader(networkID uint32) (*CertificateHeader, error) - AggLayerClientGetEpochConfiguration } // AggLayerClient is the client that will be used to interact with the AggLayer @@ -97,86 +89,3 @@ func (c *AggLayerClient) WaitTxToBeMined(hash common.Hash, ctx context.Context) } } } - -// SendCertificate sends a certificate to the AggLayer -func (c *AggLayerClient) SendCertificate(certificate *SignedCertificate) (common.Hash, error) { - certificateToSend := certificate.CopyWithDefaulting() - - response, err := rpc.JSONRPCCall(c.url, "interop_sendCertificate", certificateToSend) - if err != nil { - return common.Hash{}, err - } - - if response.Error != nil { - return common.Hash{}, fmt.Errorf("%d %s", response.Error.Code, response.Error.Message) - } - - var result types.ArgHash - err = json.Unmarshal(response.Result, &result) - if err != nil { - return common.Hash{}, err - } - - return result.Hash(), nil -} - -// GetCertificateHeader returns the certificate header associated to the hash -func (c *AggLayerClient) GetCertificateHeader(certificateHash common.Hash) (*CertificateHeader, error) { - response, err := rpc.JSONRPCCall(c.url, "interop_getCertificateHeader", certificateHash) - if err != nil { - return nil, err - } - - if response.Error != nil { - return nil, fmt.Errorf("%d %s", response.Error.Code, response.Error.Message) - } - - var result *CertificateHeader - err = json.Unmarshal(response.Result, &result) - if err != nil { - return nil, err - } - - return result, nil -} - -// GetEpochConfiguration returns the clock configuration of AggLayer -func (c *AggLayerClient) GetEpochConfiguration() (*ClockConfiguration, error) { - response, err := jSONRPCCall(c.url, "interop_getEpochConfiguration") - if err != nil { - return nil, err - } - - if response.Error != nil { - return nil, fmt.Errorf("GetEpochConfiguration code=%d msg=%s", response.Error.Code, response.Error.Message) - } - - var result *ClockConfiguration - err = json.Unmarshal(response.Result, &result) - if err != nil { - return nil, err - } - - return result, nil -} - -// GetLatestKnownCertificateHeader returns the last certificate header submitted by networkID -func (c *AggLayerClient) GetLatestKnownCertificateHeader(networkID uint32) (*CertificateHeader, error) { - response, err := jSONRPCCall(c.url, "interop_getLatestKnownCertificateHeader", networkID) - if err != nil { - return nil, fmt.Errorf("GetLatestKnownCertificateHeader error jSONRPCCall. Err: %w", err) - } - - if response.Error != nil { - return nil, fmt.Errorf("GetLatestKnownCertificateHeader rpc returns an error: code=%d msg=%s", - response.Error.Code, response.Error.Message) - } - - var result *CertificateHeader - err = json.Unmarshal(response.Result, &result) - if err != nil { - return nil, fmt.Errorf("GetLatestKnownCertificateHeader error Unmashal. Err: %w", err) - } - - return result, nil -} diff --git a/agglayer/client_test.go b/agglayer/client_test.go deleted file mode 100644 index 91ec98c50..000000000 --- a/agglayer/client_test.go +++ /dev/null @@ -1,163 +0,0 @@ -package agglayer - -import ( - "fmt" - "testing" - - "github.com/0xPolygon/cdk-rpc/rpc" - "github.com/ethereum/go-ethereum/common" - "github.com/stretchr/testify/require" -) - -const ( - testURL = "http://localhost:8080" -) - -func TestExploratoryClient(t *testing.T) { - t.Skip("This test is for exploratory purposes only") - sut := NewAggLayerClient("http://127.0.0.1:32781") - config, err := sut.GetEpochConfiguration() - require.NoError(t, err) - require.NotNil(t, config) - fmt.Printf("Config: %s", config.String()) - - lastCert, err := sut.GetLatestKnownCertificateHeader(1) - require.NoError(t, err) - require.NotNil(t, lastCert) - fmt.Printf("LastCert: %s", lastCert.String()) -} - -func TestExploratoryGetCertificateHeader(t *testing.T) { - t.Skip("This test is exploratory and should be skipped") - aggLayerClient := NewAggLayerClient("http://localhost:32796") - certificateID := common.HexToHash("0xf153e75e24591432ac5deafaeaafba3fec0fd851261c86051b9c0d540b38c369") - certificateHeader, err := aggLayerClient.GetCertificateHeader(certificateID) - require.NoError(t, err) - fmt.Print(certificateHeader) -} -func TestExploratoryGetEpochConfiguration(t *testing.T) { - t.Skip("This test is exploratory and should be skipped") - aggLayerClient := NewAggLayerClient("http://localhost:32796") - clockConfig, err := aggLayerClient.GetEpochConfiguration() - require.NoError(t, err) - fmt.Print(clockConfig) -} - -func TestExploratoryGetLatestKnownCertificateHeader(t *testing.T) { - t.Skip("This test is exploratory and should be skipped") - aggLayerClient := NewAggLayerClient("http://localhost:32843") - cert, err := aggLayerClient.GetLatestKnownCertificateHeader(1) - require.NoError(t, err) - fmt.Print(cert) -} - -func TestGetEpochConfigurationResponseWithError(t *testing.T) { - sut := NewAggLayerClient(testURL) - response := rpc.Response{ - Error: &rpc.ErrorObject{}, - } - jSONRPCCall = func(url, method string, params ...interface{}) (rpc.Response, error) { - return response, nil - } - clockConfig, err := sut.GetEpochConfiguration() - require.Nil(t, clockConfig) - require.Error(t, err) -} - -func TestGetEpochConfigurationResponseBadJson(t *testing.T) { - sut := NewAggLayerClient(testURL) - response := rpc.Response{ - Result: []byte(`{`), - } - jSONRPCCall = func(url, method string, params ...interface{}) (rpc.Response, error) { - return response, nil - } - clockConfig, err := sut.GetEpochConfiguration() - require.Nil(t, clockConfig) - require.Error(t, err) -} - -func TestGetEpochConfigurationErrorResponse(t *testing.T) { - sut := NewAggLayerClient(testURL) - - jSONRPCCall = func(url, method string, params ...interface{}) (rpc.Response, error) { - return rpc.Response{}, fmt.Errorf("unittest error") - } - clockConfig, err := sut.GetEpochConfiguration() - require.Nil(t, clockConfig) - require.Error(t, err) -} - -func TestGetEpochConfigurationOkResponse(t *testing.T) { - sut := NewAggLayerClient(testURL) - response := rpc.Response{ - Result: []byte(`{"epoch_duration": 1, "genesis_block": 1}`), - } - jSONRPCCall = func(url, method string, params ...interface{}) (rpc.Response, error) { - return response, nil - } - clockConfig, err := sut.GetEpochConfiguration() - require.NotNil(t, clockConfig) - require.NoError(t, err) - require.Equal(t, ClockConfiguration{ - EpochDuration: 1, - GenesisBlock: 1, - }, *clockConfig) -} - -func TestGetLatestKnownCertificateHeaderOkResponse(t *testing.T) { - sut := NewAggLayerClient(testURL) - response := rpc.Response{ - Result: []byte(`{"network_id":1,"height":0,"epoch_number":223,"certificate_index":0,"certificate_id":"0xf9179d2fbe535814b5a14496e2eed474f49c6131227a9dfc5d2d8caf9e212054","new_local_exit_root":"0x7ae06f4a5d0b6da7dd4973fb6ef40d82c9f2680899b3baaf9e564413b59cc160","metadata":"0x00000000000000000000000000000000000000000000000000000000000001a7","status":"Settled"}`), - } - jSONRPCCall = func(url, method string, params ...interface{}) (rpc.Response, error) { - return response, nil - } - cert, err := sut.GetLatestKnownCertificateHeader(1) - require.NotNil(t, cert) - require.NoError(t, err) - require.Nil(t, cert.PreviousLocalExitRoot) -} - -func TestGetLatestKnownCertificateHeaderErrorResponse(t *testing.T) { - sut := NewAggLayerClient(testURL) - jSONRPCCall = func(url, method string, params ...interface{}) (rpc.Response, error) { - return rpc.Response{}, fmt.Errorf("unittest error") - } - - cert, err := sut.GetLatestKnownCertificateHeader(1) - - require.Nil(t, cert) - require.Error(t, err) -} - -func TestGetLatestKnownCertificateHeaderResponseBadJson(t *testing.T) { - sut := NewAggLayerClient(testURL) - response := rpc.Response{ - Result: []byte(`{`), - } - jSONRPCCall = func(url, method string, params ...interface{}) (rpc.Response, error) { - return response, nil - } - - cert, err := sut.GetLatestKnownCertificateHeader(1) - - require.Nil(t, cert) - require.Error(t, err) -} - -func TestGetLatestKnownCertificateHeaderWithPrevLERResponse(t *testing.T) { - sut := NewAggLayerClient(testURL) - response := rpc.Response{ - Result: []byte(`{"network_id":1,"height":0,"epoch_number":223,"certificate_index":0,"certificate_id":"0xf9179d2fbe535814b5a14496e2eed474f49c6131227a9dfc5d2d8caf9e212054","prev_local_exit_root":"0x27ae5ba08d7291c96c8cbddcc148bf48a6d68c7974b94356f53754ef6171d757","new_local_exit_root":"0x7ae06f4a5d0b6da7dd4973fb6ef40d82c9f2680899b3baaf9e564413b59cc160","metadata":"0x00000000000000000000000000000000000000000000000000000000000001a7","status":"Settled"}`), - } - jSONRPCCall = func(_, _ string, _ ...interface{}) (rpc.Response, error) { - return response, nil - } - cert, err := sut.GetLatestKnownCertificateHeader(1) - - require.NoError(t, err) - require.NotNil(t, cert) - - require.Equal(t, "0x27ae5ba08d7291c96c8cbddcc148bf48a6d68c7974b94356f53754ef6171d757", cert.PreviousLocalExitRoot.String()) -} diff --git a/agglayer/errors_test.go b/agglayer/errors_test.go deleted file mode 100644 index 142930263..000000000 --- a/agglayer/errors_test.go +++ /dev/null @@ -1,225 +0,0 @@ -package agglayer - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestConvertMapValue_String(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - data map[string]interface{} - key string - want string - errString string - }{ - { - name: "Key exists and type matches", - data: map[string]interface{}{ - "key1": "value1", - }, - key: "key1", - want: "value1", - }, - { - name: "Key exists but type does not match", - data: map[string]interface{}{ - "key1": 1, - }, - key: "key1", - want: "", - errString: "is not of type", - }, - { - name: "Key does not exist", - data: map[string]interface{}{ - "key1": "value1", - }, - key: "key2", - want: "", - errString: "key key2 not found in map", - }, - } - - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - got, err := convertMapValue[string](tt.data, tt.key) - if tt.errString != "" { - require.ErrorContains(t, err, tt.errString) - } else { - require.Equal(t, tt.want, got) - } - }) - } -} - -//nolint:dupl -func TestConvertMapValue_Uint32(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - data map[string]interface{} - key string - want uint32 - errString string - }{ - { - name: "Key exists and type matches", - data: map[string]interface{}{ - "key1": uint32(123), - }, - key: "key1", - want: uint32(123), - }, - { - name: "Key exists but type does not match", - data: map[string]interface{}{ - "key1": "value1", - }, - key: "key1", - want: 0, - errString: "is not of type", - }, - { - name: "Key does not exist", - data: map[string]interface{}{ - "key1": uint32(123), - }, - key: "key2", - want: 0, - errString: "key key2 not found in map", - }, - } - - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - got, err := convertMapValue[uint32](tt.data, tt.key) - if tt.errString != "" { - require.ErrorContains(t, err, tt.errString) - } else { - require.Equal(t, tt.want, got) - } - }) - } -} - -//nolint:dupl -func TestConvertMapValue_Uint64(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - data map[string]interface{} - key string - want uint64 - errString string - }{ - { - name: "Key exists and type matches", - data: map[string]interface{}{ - "key1": uint64(3411), - }, - key: "key1", - want: uint64(3411), - }, - { - name: "Key exists but type does not match", - data: map[string]interface{}{ - "key1": "not a number", - }, - key: "key1", - want: 0, - errString: "is not of type", - }, - { - name: "Key does not exist", - data: map[string]interface{}{ - "key1": uint64(123555), - }, - key: "key22", - want: 0, - errString: "key key22 not found in map", - }, - } - - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - got, err := convertMapValue[uint64](tt.data, tt.key) - if tt.errString != "" { - require.ErrorContains(t, err, tt.errString) - } else { - require.Equal(t, tt.want, got) - } - }) - } -} - -func TestConvertMapValue_Bool(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - data map[string]interface{} - key string - want bool - errString string - }{ - { - name: "Key exists and type matches", - data: map[string]interface{}{ - "key1": true, - }, - key: "key1", - want: true, - }, - { - name: "Key exists but type does not match", - data: map[string]interface{}{ - "key1": "value1", - }, - key: "key1", - want: false, - errString: "is not of type", - }, - { - name: "Key does not exist", - data: map[string]interface{}{ - "key1": true, - }, - key: "key2", - want: false, - errString: "key key2 not found in map", - }, - } - - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - got, err := convertMapValue[bool](tt.data, tt.key) - if tt.errString != "" { - require.ErrorContains(t, err, tt.errString) - } else { - require.Equal(t, tt.want, got) - } - }) - } -} diff --git a/agglayer/mock_agglayer_client.go b/agglayer/mock_agglayer_client.go index 8b8c86899..8b9a819fd 100644 --- a/agglayer/mock_agglayer_client.go +++ b/agglayer/mock_agglayer_client.go @@ -23,237 +23,6 @@ func (_m *AgglayerClientMock) EXPECT() *AgglayerClientMock_Expecter { return &AgglayerClientMock_Expecter{mock: &_m.Mock} } -// GetCertificateHeader provides a mock function with given fields: certificateHash -func (_m *AgglayerClientMock) GetCertificateHeader(certificateHash common.Hash) (*CertificateHeader, error) { - ret := _m.Called(certificateHash) - - if len(ret) == 0 { - panic("no return value specified for GetCertificateHeader") - } - - var r0 *CertificateHeader - var r1 error - if rf, ok := ret.Get(0).(func(common.Hash) (*CertificateHeader, error)); ok { - return rf(certificateHash) - } - if rf, ok := ret.Get(0).(func(common.Hash) *CertificateHeader); ok { - r0 = rf(certificateHash) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*CertificateHeader) - } - } - - if rf, ok := ret.Get(1).(func(common.Hash) error); ok { - r1 = rf(certificateHash) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// AgglayerClientMock_GetCertificateHeader_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCertificateHeader' -type AgglayerClientMock_GetCertificateHeader_Call struct { - *mock.Call -} - -// GetCertificateHeader is a helper method to define mock.On call -// - certificateHash common.Hash -func (_e *AgglayerClientMock_Expecter) GetCertificateHeader(certificateHash interface{}) *AgglayerClientMock_GetCertificateHeader_Call { - return &AgglayerClientMock_GetCertificateHeader_Call{Call: _e.mock.On("GetCertificateHeader", certificateHash)} -} - -func (_c *AgglayerClientMock_GetCertificateHeader_Call) Run(run func(certificateHash common.Hash)) *AgglayerClientMock_GetCertificateHeader_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(common.Hash)) - }) - return _c -} - -func (_c *AgglayerClientMock_GetCertificateHeader_Call) Return(_a0 *CertificateHeader, _a1 error) *AgglayerClientMock_GetCertificateHeader_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *AgglayerClientMock_GetCertificateHeader_Call) RunAndReturn(run func(common.Hash) (*CertificateHeader, error)) *AgglayerClientMock_GetCertificateHeader_Call { - _c.Call.Return(run) - return _c -} - -// GetEpochConfiguration provides a mock function with no fields -func (_m *AgglayerClientMock) GetEpochConfiguration() (*ClockConfiguration, error) { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for GetEpochConfiguration") - } - - var r0 *ClockConfiguration - var r1 error - if rf, ok := ret.Get(0).(func() (*ClockConfiguration, error)); ok { - return rf() - } - if rf, ok := ret.Get(0).(func() *ClockConfiguration); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*ClockConfiguration) - } - } - - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// AgglayerClientMock_GetEpochConfiguration_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetEpochConfiguration' -type AgglayerClientMock_GetEpochConfiguration_Call struct { - *mock.Call -} - -// GetEpochConfiguration is a helper method to define mock.On call -func (_e *AgglayerClientMock_Expecter) GetEpochConfiguration() *AgglayerClientMock_GetEpochConfiguration_Call { - return &AgglayerClientMock_GetEpochConfiguration_Call{Call: _e.mock.On("GetEpochConfiguration")} -} - -func (_c *AgglayerClientMock_GetEpochConfiguration_Call) Run(run func()) *AgglayerClientMock_GetEpochConfiguration_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *AgglayerClientMock_GetEpochConfiguration_Call) Return(_a0 *ClockConfiguration, _a1 error) *AgglayerClientMock_GetEpochConfiguration_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *AgglayerClientMock_GetEpochConfiguration_Call) RunAndReturn(run func() (*ClockConfiguration, error)) *AgglayerClientMock_GetEpochConfiguration_Call { - _c.Call.Return(run) - return _c -} - -// GetLatestKnownCertificateHeader provides a mock function with given fields: networkID -func (_m *AgglayerClientMock) GetLatestKnownCertificateHeader(networkID uint32) (*CertificateHeader, error) { - ret := _m.Called(networkID) - - if len(ret) == 0 { - panic("no return value specified for GetLatestKnownCertificateHeader") - } - - var r0 *CertificateHeader - var r1 error - if rf, ok := ret.Get(0).(func(uint32) (*CertificateHeader, error)); ok { - return rf(networkID) - } - if rf, ok := ret.Get(0).(func(uint32) *CertificateHeader); ok { - r0 = rf(networkID) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*CertificateHeader) - } - } - - if rf, ok := ret.Get(1).(func(uint32) error); ok { - r1 = rf(networkID) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// AgglayerClientMock_GetLatestKnownCertificateHeader_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLatestKnownCertificateHeader' -type AgglayerClientMock_GetLatestKnownCertificateHeader_Call struct { - *mock.Call -} - -// GetLatestKnownCertificateHeader is a helper method to define mock.On call -// - networkID uint32 -func (_e *AgglayerClientMock_Expecter) GetLatestKnownCertificateHeader(networkID interface{}) *AgglayerClientMock_GetLatestKnownCertificateHeader_Call { - return &AgglayerClientMock_GetLatestKnownCertificateHeader_Call{Call: _e.mock.On("GetLatestKnownCertificateHeader", networkID)} -} - -func (_c *AgglayerClientMock_GetLatestKnownCertificateHeader_Call) Run(run func(networkID uint32)) *AgglayerClientMock_GetLatestKnownCertificateHeader_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(uint32)) - }) - return _c -} - -func (_c *AgglayerClientMock_GetLatestKnownCertificateHeader_Call) Return(_a0 *CertificateHeader, _a1 error) *AgglayerClientMock_GetLatestKnownCertificateHeader_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *AgglayerClientMock_GetLatestKnownCertificateHeader_Call) RunAndReturn(run func(uint32) (*CertificateHeader, error)) *AgglayerClientMock_GetLatestKnownCertificateHeader_Call { - _c.Call.Return(run) - return _c -} - -// SendCertificate provides a mock function with given fields: certificate -func (_m *AgglayerClientMock) SendCertificate(certificate *SignedCertificate) (common.Hash, error) { - ret := _m.Called(certificate) - - if len(ret) == 0 { - panic("no return value specified for SendCertificate") - } - - var r0 common.Hash - var r1 error - if rf, ok := ret.Get(0).(func(*SignedCertificate) (common.Hash, error)); ok { - return rf(certificate) - } - if rf, ok := ret.Get(0).(func(*SignedCertificate) common.Hash); ok { - r0 = rf(certificate) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(common.Hash) - } - } - - if rf, ok := ret.Get(1).(func(*SignedCertificate) error); ok { - r1 = rf(certificate) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// AgglayerClientMock_SendCertificate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SendCertificate' -type AgglayerClientMock_SendCertificate_Call struct { - *mock.Call -} - -// SendCertificate is a helper method to define mock.On call -// - certificate *SignedCertificate -func (_e *AgglayerClientMock_Expecter) SendCertificate(certificate interface{}) *AgglayerClientMock_SendCertificate_Call { - return &AgglayerClientMock_SendCertificate_Call{Call: _e.mock.On("SendCertificate", certificate)} -} - -func (_c *AgglayerClientMock_SendCertificate_Call) Run(run func(certificate *SignedCertificate)) *AgglayerClientMock_SendCertificate_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(*SignedCertificate)) - }) - return _c -} - -func (_c *AgglayerClientMock_SendCertificate_Call) Return(_a0 common.Hash, _a1 error) *AgglayerClientMock_SendCertificate_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *AgglayerClientMock_SendCertificate_Call) RunAndReturn(run func(*SignedCertificate) (common.Hash, error)) *AgglayerClientMock_SendCertificate_Call { - _c.Call.Return(run) - return _c -} - // SendTx provides a mock function with given fields: signedTx func (_m *AgglayerClientMock) SendTx(signedTx SignedTx) (common.Hash, error) { ret := _m.Called(signedTx) diff --git a/agglayer/types.go b/agglayer/types.go deleted file mode 100644 index de1a0063d..000000000 --- a/agglayer/types.go +++ /dev/null @@ -1,897 +0,0 @@ -package agglayer - -import ( - "encoding/json" - "errors" - "fmt" - "math/big" - "reflect" - "slices" - "strings" - - "github.com/0xPolygon/cdk/bridgesync" - cdkcommon "github.com/0xPolygon/cdk/common" - "github.com/0xPolygon/cdk/tree/types" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" -) - -type CertificateStatus int - -const ( - Pending CertificateStatus = iota - Proven - Candidate - InError - Settled - - nilStr = "nil" - nullStr = "null" - base10 = 10 -) - -var ( - NonSettledStatuses = []CertificateStatus{Pending, Candidate, Proven} - ClosedStatuses = []CertificateStatus{Settled, InError} -) - -// String representation of the enum -func (c CertificateStatus) String() string { - return [...]string{"Pending", "Proven", "Candidate", "InError", "Settled"}[c] -} - -// IsClosed returns true if the certificate is closed (settled or inError) -func (c CertificateStatus) IsClosed() bool { - return !c.IsOpen() -} - -// IsSettled returns true if the certificate is settled -func (c CertificateStatus) IsSettled() bool { - return c == Settled -} - -// IsInError returns true if the certificate is in error -func (c CertificateStatus) IsInError() bool { - return c == InError -} - -// IsOpen returns true if the certificate is open (pending, candidate or proven) -func (c CertificateStatus) IsOpen() bool { - return slices.Contains(NonSettledStatuses, c) -} - -// UnmarshalJSON is the implementation of the json.Unmarshaler interface -func (c *CertificateStatus) UnmarshalJSON(rawStatus []byte) error { - status := strings.Trim(string(rawStatus), "\"") - if strings.Contains(status, "InError") { - status = "InError" - } - - switch status { - case "Pending": - *c = Pending - case "InError": - *c = InError - case "Proven": - *c = Proven - case "Candidate": - *c = Candidate - case "Settled": - *c = Settled - default: - // Maybe the status is numeric: - var statusInt int - if _, err := fmt.Sscanf(status, "%d", &statusInt); err == nil { - *c = CertificateStatus(statusInt) - } else { - return fmt.Errorf("invalid status: %s", status) - } - } - - return nil -} - -type LeafType uint8 - -func (l LeafType) Uint8() uint8 { - return uint8(l) -} - -func (l LeafType) String() string { - return [...]string{"Transfer", "Message"}[l] -} - -func (l *LeafType) UnmarshalJSON(raw []byte) error { - rawStr := strings.Trim(string(raw), "\"") - switch rawStr { - case "Transfer": - *l = LeafTypeAsset - case "Message": - *l = LeafTypeMessage - default: - var value int - if _, err := fmt.Sscanf(rawStr, "%d", &value); err != nil { - return fmt.Errorf("invalid LeafType: %s", rawStr) - } - *l = LeafType(value) - } - return nil -} - -const ( - LeafTypeAsset LeafType = iota - LeafTypeMessage -) - -// Certificate is the data structure that will be sent to the agglayer -type Certificate struct { - NetworkID uint32 `json:"network_id"` - Height uint64 `json:"height"` - PrevLocalExitRoot common.Hash `json:"prev_local_exit_root"` - NewLocalExitRoot common.Hash `json:"new_local_exit_root"` - BridgeExits []*BridgeExit `json:"bridge_exits"` - ImportedBridgeExits []*ImportedBridgeExit `json:"imported_bridge_exits"` - Metadata common.Hash `json:"metadata"` -} - -// Brief returns a string with a brief cert -func (c *Certificate) Brief() string { - if c == nil { - return nilStr - } - res := fmt.Sprintf("agglayer.Cert {height: %d prevLER: %s newLER: %s exits: %d imported_exits: %d}", c.Height, - c.PrevLocalExitRoot.String(), c.NewLocalExitRoot.String(), - len(c.BridgeExits), len(c.ImportedBridgeExits)) - return res -} - -// Hash returns a hash that uniquely identifies the certificate -func (c *Certificate) Hash() common.Hash { - bridgeExitsHashes := make([][]byte, len(c.BridgeExits)) - for i, bridgeExit := range c.BridgeExits { - bridgeExitsHashes[i] = bridgeExit.Hash().Bytes() - } - - importedBridgeExitsHashes := make([][]byte, len(c.ImportedBridgeExits)) - for i, importedBridgeExit := range c.ImportedBridgeExits { - importedBridgeExitsHashes[i] = importedBridgeExit.Hash().Bytes() - } - - bridgeExitsPart := crypto.Keccak256(bridgeExitsHashes...) - importedBridgeExitsPart := crypto.Keccak256(importedBridgeExitsHashes...) - - return crypto.Keccak256Hash( - cdkcommon.Uint32ToBytes(c.NetworkID), - cdkcommon.Uint64ToBytes(c.Height), - c.PrevLocalExitRoot.Bytes(), - c.NewLocalExitRoot.Bytes(), - bridgeExitsPart, - importedBridgeExitsPart, - ) -} - -// HashToSign is the actual hash that needs to be signed by the aggsender -// as expected by the agglayer -func (c *Certificate) HashToSign() common.Hash { - globalIndexHashes := make([][]byte, len(c.ImportedBridgeExits)) - for i, importedBridgeExit := range c.ImportedBridgeExits { - globalIndexHashes[i] = importedBridgeExit.GlobalIndex.Hash().Bytes() - } - - return crypto.Keccak256Hash( - c.NewLocalExitRoot.Bytes(), - crypto.Keccak256Hash(globalIndexHashes...).Bytes(), - ) -} - -// SignedCertificate is the struct that contains the certificate and the signature of the signer -type SignedCertificate struct { - *Certificate - Signature *Signature `json:"signature"` -} - -func (s *SignedCertificate) Brief() string { - return fmt.Sprintf("Certificate:%s,\nSignature: %s", s.Certificate.Brief(), s.Signature.String()) -} - -// CopyWithDefaulting returns a shallow copy of the signed certificate -func (s *SignedCertificate) CopyWithDefaulting() *SignedCertificate { - certificateCopy := *s.Certificate - - if certificateCopy.BridgeExits == nil { - certificateCopy.BridgeExits = make([]*BridgeExit, 0) - } - - if certificateCopy.ImportedBridgeExits == nil { - certificateCopy.ImportedBridgeExits = make([]*ImportedBridgeExit, 0) - } - - signature := s.Signature - if signature == nil { - signature = &Signature{} - } - - return &SignedCertificate{ - Certificate: &certificateCopy, - Signature: signature, - } -} - -// Signature is the data structure that will hold the signature of the given certificate -type Signature struct { - R common.Hash `json:"r"` - S common.Hash `json:"s"` - OddParity bool `json:"odd_y_parity"` -} - -func (s *Signature) String() string { - return fmt.Sprintf("R: %s, S: %s, OddParity: %t", s.R.String(), s.S.String(), s.OddParity) -} - -// TokenInfo encapsulates the information to uniquely identify a token on the origin network. -type TokenInfo struct { - OriginNetwork uint32 `json:"origin_network"` - OriginTokenAddress common.Address `json:"origin_token_address"` -} - -// String returns a string representation of the TokenInfo struct -func (t *TokenInfo) String() string { - return fmt.Sprintf("OriginNetwork: %d, OriginTokenAddress: %s", t.OriginNetwork, t.OriginTokenAddress.String()) -} - -// GlobalIndex represents the global index of an imported bridge exit -type GlobalIndex struct { - MainnetFlag bool `json:"mainnet_flag"` - RollupIndex uint32 `json:"rollup_index"` - LeafIndex uint32 `json:"leaf_index"` -} - -// String returns a string representation of the GlobalIndex struct -func (g *GlobalIndex) String() string { - return fmt.Sprintf("MainnetFlag: %t, RollupIndex: %d, LeafIndex: %d", g.MainnetFlag, g.RollupIndex, g.LeafIndex) -} - -func (g *GlobalIndex) Hash() common.Hash { - return crypto.Keccak256Hash( - cdkcommon.BigIntToLittleEndianBytes( - bridgesync.GenerateGlobalIndex(g.MainnetFlag, g.RollupIndex, g.LeafIndex), - ), - ) -} - -func (g *GlobalIndex) UnmarshalFromMap(data map[string]interface{}) error { - rollupIndex, err := convertMapValue[uint32](data, "rollup_index") - if err != nil { - return err - } - - leafIndex, err := convertMapValue[uint32](data, "leaf_index") - if err != nil { - return err - } - - mainnetFlag, err := convertMapValue[bool](data, "mainnet_flag") - if err != nil { - return err - } - - g.RollupIndex = rollupIndex - g.LeafIndex = leafIndex - g.MainnetFlag = mainnetFlag - - return nil -} - -// BridgeExit represents a token bridge exit -type BridgeExit struct { - LeafType LeafType `json:"leaf_type"` - TokenInfo *TokenInfo `json:"token_info"` - DestinationNetwork uint32 `json:"dest_network"` - DestinationAddress common.Address `json:"dest_address"` - Amount *big.Int `json:"amount"` - IsMetadataHashed bool `json:"-"` - Metadata []byte `json:"metadata"` -} - -func (b *BridgeExit) String() string { - res := fmt.Sprintf("LeafType: %s, DestinationNetwork: %d, DestinationAddress: %s, Amount: %s, Metadata: %s", - b.LeafType.String(), b.DestinationNetwork, b.DestinationAddress.String(), - b.Amount.String(), common.Bytes2Hex(b.Metadata)) - - if b.TokenInfo == nil { - res += ", TokenInfo: nil" - } else { - res += fmt.Sprintf(", TokenInfo: %s", b.TokenInfo.String()) - } - - return res -} - -// Hash returns a hash that uniquely identifies the bridge exit -func (b *BridgeExit) Hash() common.Hash { - if b.Amount == nil { - b.Amount = big.NewInt(0) - } - var metaDataHash []byte - if b.IsMetadataHashed { - metaDataHash = b.Metadata - } else { - metaDataHash = crypto.Keccak256(b.Metadata) - } - - return crypto.Keccak256Hash( - []byte{b.LeafType.Uint8()}, - cdkcommon.Uint32ToBytes(b.TokenInfo.OriginNetwork), - b.TokenInfo.OriginTokenAddress.Bytes(), - cdkcommon.Uint32ToBytes(b.DestinationNetwork), - b.DestinationAddress.Bytes(), - common.BigToHash(b.Amount).Bytes(), - metaDataHash, - ) -} - -// MarshalJSON is the implementation of the json.Marshaler interface -func (b *BridgeExit) MarshalJSON() ([]byte, error) { - var metadataString interface{} - if b.IsMetadataHashed { - metadataString = common.Bytes2Hex(b.Metadata) - } else if len(b.Metadata) > 0 { - metadataString = bytesToUints(b.Metadata) - } else { - metadataString = nil - } - - return json.Marshal(&struct { - LeafType string `json:"leaf_type"` - TokenInfo *TokenInfo `json:"token_info"` - DestinationNetwork uint32 `json:"dest_network"` - DestinationAddress common.Address `json:"dest_address"` - Amount string `json:"amount"` - Metadata interface{} `json:"metadata"` - }{ - LeafType: b.LeafType.String(), - TokenInfo: b.TokenInfo, - DestinationNetwork: b.DestinationNetwork, - DestinationAddress: b.DestinationAddress, - Amount: b.Amount.String(), - Metadata: metadataString, - }) -} - -func (b *BridgeExit) UnmarshalJSON(data []byte) error { - aux := &struct { - LeafType LeafType `json:"leaf_type"` - TokenInfo *TokenInfo `json:"token_info"` - DestinationNetwork uint32 `json:"dest_network"` - DestinationAddress common.Address `json:"dest_address"` - Amount string `json:"amount"` - Metadata interface{} `json:"metadata"` - }{} - - if err := json.Unmarshal(data, &aux); err != nil { - return err - } - b.LeafType = aux.LeafType - b.TokenInfo = aux.TokenInfo - b.DestinationNetwork = aux.DestinationNetwork - b.DestinationAddress = aux.DestinationAddress - var ok bool - if !strings.Contains(aux.Amount, nilStr) { - b.Amount, ok = new(big.Int).SetString(aux.Amount, base10) - if !ok { - return fmt.Errorf("failed to convert amount to big.Int: %s", aux.Amount) - } - } - if s, ok := aux.Metadata.(string); ok { - b.IsMetadataHashed = true - b.Metadata = common.Hex2Bytes(s) - } else if uints, ok := aux.Metadata.([]interface{}); ok { - b.IsMetadataHashed = false - b.Metadata = make([]byte, len(uints)) - for k, v := range uints { - value, ok := v.(float64) - if !ok { - return fmt.Errorf("failed to convert metadata to byte: %v", v) - } - b.Metadata[k] = byte(value) - } - } else { - b.Metadata = nil - } - return nil -} - -// bytesToUints converts a byte slice to a slice of uints -func bytesToUints(data []byte) []uint { - uints := make([]uint, len(data)) - for i, b := range data { - uints[i] = uint(b) - } - return uints -} - -// MerkleProof represents an inclusion proof of a leaf in a Merkle tree -type MerkleProof struct { - Root common.Hash `json:"root"` - Proof [types.DefaultHeight]common.Hash `json:"proof"` -} - -// MarshalJSON is the implementation of the json.Marshaler interface -func (m *MerkleProof) MarshalJSON() ([]byte, error) { - return json.Marshal(&struct { - Root common.Hash `json:"root"` - Proof map[string][types.DefaultHeight]common.Hash `json:"proof"` - }{ - Root: m.Root, - Proof: map[string][types.DefaultHeight]common.Hash{ - "siblings": m.Proof, - }, - }) -} - -func (m *MerkleProof) UnmarshalJSON(data []byte) error { - aux := &struct { - Root common.Hash `json:"root"` - Proof map[string][types.DefaultHeight]common.Hash `json:"proof"` - }{} - - if err := json.Unmarshal(data, &aux); err != nil { - return err - } - m.Root = aux.Root - m.Proof = aux.Proof["siblings"] - return nil -} - -// Hash returns the hash of the Merkle proof struct -func (m *MerkleProof) Hash() common.Hash { - proofsAsSingleSlice := make([]byte, 0) - - for _, proof := range m.Proof { - proofsAsSingleSlice = append(proofsAsSingleSlice, proof.Bytes()...) - } - - return crypto.Keccak256Hash( - m.Root.Bytes(), - proofsAsSingleSlice, - ) -} - -func (m *MerkleProof) String() string { - return fmt.Sprintf("Root: %s, Proof: %v", m.Root.String(), m.Proof) -} - -// L1InfoTreeLeafInner represents the inner part of the L1 info tree leaf -type L1InfoTreeLeafInner struct { - GlobalExitRoot common.Hash `json:"global_exit_root"` - BlockHash common.Hash `json:"block_hash"` - Timestamp uint64 `json:"timestamp"` -} - -// Hash returns the hash of the L1InfoTreeLeafInner struct -func (l *L1InfoTreeLeafInner) Hash() common.Hash { - return crypto.Keccak256Hash( - l.GlobalExitRoot.Bytes(), - l.BlockHash.Bytes(), - cdkcommon.Uint64ToBytes(l.Timestamp), - ) -} - -func (l *L1InfoTreeLeafInner) String() string { - return fmt.Sprintf("GlobalExitRoot: %s, BlockHash: %s, Timestamp: %d", - l.GlobalExitRoot.String(), l.BlockHash.String(), l.Timestamp) -} - -// L1InfoTreeLeaf represents the leaf of the L1 info tree -type L1InfoTreeLeaf struct { - L1InfoTreeIndex uint32 `json:"l1_info_tree_index"` - RollupExitRoot common.Hash `json:"rer"` - MainnetExitRoot common.Hash `json:"mer"` - Inner *L1InfoTreeLeafInner `json:"inner"` -} - -// Hash returns the hash of the L1InfoTreeLeaf struct -func (l *L1InfoTreeLeaf) Hash() common.Hash { - return l.Inner.Hash() -} - -func (l *L1InfoTreeLeaf) String() string { - return fmt.Sprintf("L1InfoTreeIndex: %d, RollupExitRoot: %s, MainnetExitRoot: %s, Inner: %s", - l.L1InfoTreeIndex, - l.RollupExitRoot.String(), - l.MainnetExitRoot.String(), - l.Inner.String(), - ) -} - -// Claim is the interface that will be implemented by the different types of claims -type Claim interface { - Type() string - Hash() common.Hash - MarshalJSON() ([]byte, error) - String() string -} - -// ClaimFromMainnnet represents a claim originating from the mainnet -type ClaimFromMainnnet struct { - ProofLeafMER *MerkleProof `json:"proof_leaf_mer"` - ProofGERToL1Root *MerkleProof `json:"proof_ger_l1root"` - L1Leaf *L1InfoTreeLeaf `json:"l1_leaf"` -} - -// Type is the implementation of Claim interface -func (c ClaimFromMainnnet) Type() string { - return "Mainnet" -} - -// MarshalJSON is the implementation of Claim interface -func (c *ClaimFromMainnnet) MarshalJSON() ([]byte, error) { - return json.Marshal(&struct { - Child map[string]interface{} `json:"Mainnet"` - }{ - Child: map[string]interface{}{ - "proof_leaf_mer": c.ProofLeafMER, - "proof_ger_l1root": c.ProofGERToL1Root, - "l1_leaf": c.L1Leaf, - }, - }) -} - -func (c *ClaimFromMainnnet) UnmarshalJSON(data []byte) error { - if string(data) == nullStr { - return nil - } - - claimData := &struct { - Child struct { - ProofLeafMER *MerkleProof `json:"proof_leaf_mer"` - ProofGERToL1Root *MerkleProof `json:"proof_ger_l1root"` - L1Leaf *L1InfoTreeLeaf `json:"l1_leaf"` - } `json:"Mainnet"` - }{} - if err := json.Unmarshal(data, &claimData); err != nil { - return fmt.Errorf("failed to unmarshal the subobject: %w", err) - } - c.ProofLeafMER = claimData.Child.ProofLeafMER - c.ProofGERToL1Root = claimData.Child.ProofGERToL1Root - c.L1Leaf = claimData.Child.L1Leaf - - return nil -} - -// Hash is the implementation of Claim interface -func (c *ClaimFromMainnnet) Hash() common.Hash { - return crypto.Keccak256Hash( - c.ProofLeafMER.Hash().Bytes(), - c.ProofGERToL1Root.Hash().Bytes(), - c.L1Leaf.Hash().Bytes(), - ) -} - -func (c *ClaimFromMainnnet) String() string { - return fmt.Sprintf("ProofLeafMER: %s, ProofGERToL1Root: %s, L1Leaf: %s", - c.ProofLeafMER.String(), c.ProofGERToL1Root.String(), c.L1Leaf.String()) -} - -// ClaimFromRollup represents a claim originating from a rollup -type ClaimFromRollup struct { - ProofLeafLER *MerkleProof `json:"proof_leaf_ler"` - ProofLERToRER *MerkleProof `json:"proof_ler_rer"` - ProofGERToL1Root *MerkleProof `json:"proof_ger_l1root"` - L1Leaf *L1InfoTreeLeaf `json:"l1_leaf"` -} - -// Type is the implementation of Claim interface -func (c ClaimFromRollup) Type() string { - return "Rollup" -} - -// MarshalJSON is the implementation of Claim interface -func (c *ClaimFromRollup) MarshalJSON() ([]byte, error) { - return json.Marshal(&struct { - Child map[string]interface{} `json:"Rollup"` - }{ - Child: map[string]interface{}{ - "proof_leaf_ler": c.ProofLeafLER, - "proof_ler_rer": c.ProofLERToRER, - "proof_ger_l1root": c.ProofGERToL1Root, - "l1_leaf": c.L1Leaf, - }, - }) -} - -func (c *ClaimFromRollup) UnmarshalJSON(data []byte) error { - if string(data) == nullStr { - return nil - } - - claimData := &struct { - Child struct { - ProofLeafLER *MerkleProof `json:"proof_leaf_ler"` - ProofLERToRER *MerkleProof `json:"proof_ler_rer"` - ProofGERToL1Root *MerkleProof `json:"proof_ger_l1root"` - L1Leaf *L1InfoTreeLeaf `json:"l1_leaf"` - } `json:"Rollup"` - }{} - - if err := json.Unmarshal(data, &claimData); err != nil { - return fmt.Errorf("failed to unmarshal the subobject: %w", err) - } - c.ProofLeafLER = claimData.Child.ProofLeafLER - c.ProofLERToRER = claimData.Child.ProofLERToRER - c.ProofGERToL1Root = claimData.Child.ProofGERToL1Root - c.L1Leaf = claimData.Child.L1Leaf - - return nil -} - -// Hash is the implementation of Claim interface -func (c *ClaimFromRollup) Hash() common.Hash { - return crypto.Keccak256Hash( - c.ProofLeafLER.Hash().Bytes(), - c.ProofLERToRER.Hash().Bytes(), - c.ProofGERToL1Root.Hash().Bytes(), - c.L1Leaf.Hash().Bytes(), - ) -} - -func (c *ClaimFromRollup) String() string { - return fmt.Sprintf("ProofLeafLER: %s, ProofLERToRER: %s, ProofGERToL1Root: %s, L1Leaf: %s", - c.ProofLeafLER.String(), c.ProofLERToRER.String(), c.ProofGERToL1Root.String(), c.L1Leaf.String()) -} - -// ClaimSelector is a helper struct that allow to decice which type of claim to unmarshal -type ClaimSelector struct { - obj Claim -} - -func (c *ClaimSelector) GetObject() Claim { - return c.obj -} - -func (c *ClaimSelector) UnmarshalJSON(data []byte) error { - var obj map[string]interface{} - if string(data) == nullStr { - return nil - } - if err := json.Unmarshal(data, &obj); err != nil { - return err - } - var ok bool - if _, ok = obj["Mainnet"]; ok { - c.obj = &ClaimFromMainnnet{} - } else if _, ok = obj["Rollup"]; ok { - c.obj = &ClaimFromRollup{} - } else { - return errors.New("invalid claim type") - } - - return json.Unmarshal(data, &c.obj) -} - -// ImportedBridgeExit represents a token bridge exit originating on another network but claimed on the current network. -type ImportedBridgeExit struct { - BridgeExit *BridgeExit `json:"bridge_exit"` - ClaimData Claim `json:"claim_data"` - GlobalIndex *GlobalIndex `json:"global_index"` -} - -func (c *ImportedBridgeExit) String() string { - var res string - - if c.BridgeExit == nil { - res = "BridgeExit: nil" - } else { - res = fmt.Sprintf("BridgeExit: %s", c.BridgeExit.String()) - } - - if c.GlobalIndex == nil { - res += ", GlobalIndex: nil" - } else { - res += fmt.Sprintf(", GlobalIndex: %s", c.GlobalIndex.String()) - } - - res += fmt.Sprintf("ClaimData: %s", c.ClaimData.String()) - - return res -} - -func (c *ImportedBridgeExit) UnmarshalJSON(data []byte) error { - aux := &struct { - BridgeExit *BridgeExit `json:"bridge_exit"` - ClaimData ClaimSelector `json:"claim_data"` - GlobalIndex *GlobalIndex `json:"global_index"` - }{} - if err := json.Unmarshal(data, &aux); err != nil { - return err - } - c.BridgeExit = aux.BridgeExit - c.ClaimData = aux.ClaimData.GetObject() - c.GlobalIndex = aux.GlobalIndex - return nil -} - -// Hash returns a hash that uniquely identifies the imported bridge exit -func (c *ImportedBridgeExit) Hash() common.Hash { - return crypto.Keccak256Hash( - c.BridgeExit.Hash().Bytes(), - c.ClaimData.Hash().Bytes(), - c.GlobalIndex.Hash().Bytes(), - ) -} - -var _ error = (*GenericError)(nil) - -type GenericError struct { - Key string - Value string -} - -func (p *GenericError) Error() string { - return fmt.Sprintf("[Agglayer Error] %s: %s", p.Key, p.Value) -} - -// CertificateHeader is the structure returned by the interop_getCertificateHeader RPC call -type CertificateHeader struct { - NetworkID uint32 `json:"network_id"` - Height uint64 `json:"height"` - EpochNumber *uint64 `json:"epoch_number"` - CertificateIndex *uint64 `json:"certificate_index"` - CertificateID common.Hash `json:"certificate_id"` - PreviousLocalExitRoot *common.Hash `json:"prev_local_exit_root,omitempty"` - NewLocalExitRoot common.Hash `json:"new_local_exit_root"` - Status CertificateStatus `json:"status"` - Metadata common.Hash `json:"metadata"` - Error error `json:"-"` -} - -// ID returns a string with the ident of this cert (height/certID) -func (c *CertificateHeader) ID() string { - if c == nil { - return nilStr - } - return fmt.Sprintf("%d/%s", c.Height, c.CertificateID.String()) -} - -func (c *CertificateHeader) String() string { - if c == nil { - return nilStr - } - errors := "" - if c.Error != nil { - errors = c.Error.Error() - } - previousLocalExitRoot := nilStr - if c.PreviousLocalExitRoot != nil { - previousLocalExitRoot = c.PreviousLocalExitRoot.String() - } - return fmt.Sprintf("Height: %d, CertificateID: %s, PreviousLocalExitRoot: %s, NewLocalExitRoot: %s. Status: %s."+ - " Errors: [%s]", - c.Height, c.CertificateID.String(), previousLocalExitRoot, c.NewLocalExitRoot.String(), c.Status.String(), errors) -} - -func (c *CertificateHeader) UnmarshalJSON(data []byte) error { - // we define an alias to avoid infinite recursion - type Alias CertificateHeader - aux := &struct { - Status interface{} `json:"status"` - *Alias - }{ - Alias: (*Alias)(c), - } - - if err := json.Unmarshal(data, &aux); err != nil { - return err - } - - // Process Status field - switch status := aux.Status.(type) { - case string: // certificate not InError - if err := c.Status.UnmarshalJSON([]byte(status)); err != nil { - return err - } - case map[string]interface{}: // certificate has errors - inErrMap, err := convertMapValue[map[string]interface{}](status, "InError") - if err != nil { - return err - } - - inErrDataMap, err := convertMapValue[map[string]interface{}](inErrMap, "error") - if err != nil { - return err - } - - var agglayerErr error - - for errKey, errValueRaw := range inErrDataMap { - if errValueJSON, err := json.Marshal(errValueRaw); err != nil { - agglayerErr = &GenericError{ - Key: errKey, - Value: fmt.Sprintf("failed to marshal the agglayer error to the JSON. Raw value: %+v\nReason: %+v", - errValueRaw, err), - } - } else { - agglayerErr = &GenericError{Key: errKey, Value: string(errValueJSON)} - } - } - - c.Status = InError - c.Error = agglayerErr - default: - return errors.New("invalid status type") - } - - return nil -} - -// convertMapValue converts the value of a key in a map to a target type. -func convertMapValue[T any](data map[string]interface{}, key string) (T, error) { - value, ok := data[key] - if !ok { - var zero T - return zero, fmt.Errorf("key %s not found in map", key) - } - - // Try a direct type assertion - if convertedValue, ok := value.(T); ok { - return convertedValue, nil - } - - // If direct assertion fails, handle numeric type conversions - var target T - targetType := reflect.TypeOf(target) - - // Check if value is a float64 (default JSON number type) and target is a numeric type - if floatValue, ok := value.(float64); ok && targetType.Kind() >= reflect.Int && targetType.Kind() <= reflect.Uint64 { - convertedValue, err := convertNumeric(floatValue, targetType) - if err != nil { - return target, fmt.Errorf("conversion error for key %s: %w", key, err) - } - return convertedValue.(T), nil //nolint:forcetypeassert - } - - return target, fmt.Errorf("value of key %s is not of type %T", key, target) -} - -// convertNumeric converts a float64 to the specified numeric type. -func convertNumeric(value float64, targetType reflect.Type) (interface{}, error) { - switch targetType.Kind() { - case reflect.Int: - return int(value), nil - case reflect.Int8: - return int8(value), nil - case reflect.Int16: - return int16(value), nil - case reflect.Int32: - return int32(value), nil - case reflect.Int64: - return int64(value), nil - case reflect.Uint: - return uint(value), nil - case reflect.Uint8: - return uint8(value), nil - case reflect.Uint16: - return uint16(value), nil - case reflect.Uint32: - return uint32(value), nil - case reflect.Uint64: - return uint64(value), nil - case reflect.Float32: - return float32(value), nil - case reflect.Float64: - return value, nil - default: - return nil, fmt.Errorf("unsupported target type %v", targetType) - } -} - -// ClockConfiguration represents the configuration of the epoch clock -// returned by the interop_GetEpochConfiguration RPC call -type ClockConfiguration struct { - EpochDuration uint64 `json:"epoch_duration"` - GenesisBlock uint64 `json:"genesis_block"` -} - -func (c ClockConfiguration) String() string { - return fmt.Sprintf("EpochDuration: %d, GenesisBlock: %d", c.EpochDuration, c.GenesisBlock) -} diff --git a/agglayer/types_helpers_test.go b/agglayer/types_helpers_test.go deleted file mode 100644 index 1bd03a3fe..000000000 --- a/agglayer/types_helpers_test.go +++ /dev/null @@ -1,71 +0,0 @@ -package agglayer - -import ( - "fmt" - "testing" - - "github.com/0xPolygon/cdk/tree/types" - "github.com/ethereum/go-ethereum/common" -) - -// Helper function to create a dummy TokenInfo -func createDummyTokenInfo(t *testing.T) *TokenInfo { - t.Helper() - - return &TokenInfo{ - OriginNetwork: 1, - OriginTokenAddress: common.HexToAddress("0x2345"), - } -} - -// Helper function to create a dummy GlobalIndex -func createDummyGlobalIndex(t *testing.T) *GlobalIndex { - t.Helper() - - return &GlobalIndex{ - MainnetFlag: false, - RollupIndex: 10, - LeafIndex: 1, - } -} - -// Helper function to create a dummy Claim -func createDummyClaim(t *testing.T) *ClaimFromMainnnet { - t.Helper() - - return &ClaimFromMainnnet{ - ProofLeafMER: &MerkleProof{ - Root: common.HexToHash("0x1234"), - Proof: [common.HashLength]common.Hash{ - common.HexToHash("0x1234"), - common.HexToHash("0x5678"), - }, - }, - ProofGERToL1Root: &MerkleProof{ - Root: common.HexToHash("0x5678"), - Proof: [common.HashLength]common.Hash{ - common.HexToHash("0x5678"), - common.HexToHash("0x1234"), - }, - }, - L1Leaf: &L1InfoTreeLeaf{ - L1InfoTreeIndex: 1, - RollupExitRoot: common.HexToHash("0x987654321"), - MainnetExitRoot: common.HexToHash("0x123456789"), - Inner: &L1InfoTreeLeafInner{}, - }, - } -} - -// Helper function to create a dummy proof -func createDummyProof(t *testing.T) types.Proof { - t.Helper() - - proof := types.Proof{} - - for i := 0; i < int(types.DefaultHeight); i++ { - proof[i] = common.HexToHash(fmt.Sprintf("0x%x", i)) - } - - return proof -} diff --git a/agglayer/types_test.go b/agglayer/types_test.go deleted file mode 100644 index 9e7e4d4b5..000000000 --- a/agglayer/types_test.go +++ /dev/null @@ -1,1203 +0,0 @@ -package agglayer - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "math/big" - "reflect" - "testing" - - cdkcommon "github.com/0xPolygon/cdk/common" - "github.com/0xPolygon/cdk/log" - "github.com/0xPolygon/cdk/tree/types" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" - "github.com/stretchr/testify/require" -) - -const ( - expectedSignedCertificateEmptyMetadataJSON = `{"network_id":1,"height":1,"prev_local_exit_root":"0x0000000000000000000000000000000000000000000000000000000000000000","new_local_exit_root":"0x0000000000000000000000000000000000000000000000000000000000000000","bridge_exits":[{"leaf_type":"Transfer","token_info":null,"dest_network":0,"dest_address":"0x0000000000000000000000000000000000000000","amount":"1","metadata":null}],"imported_bridge_exits":[{"bridge_exit":{"leaf_type":"Transfer","token_info":null,"dest_network":0,"dest_address":"0x0000000000000000000000000000000000000000","amount":"1","metadata":null},"claim_data":null,"global_index":{"mainnet_flag":false,"rollup_index":1,"leaf_index":1}}],"metadata":"0x0000000000000000000000000000000000000000000000000000000000000000","signature":{"r":"0x0000000000000000000000000000000000000000000000000000000000000000","s":"0x0000000000000000000000000000000000000000000000000000000000000000","odd_y_parity":false}}` - expectedSignedCertificateMetadataJSON = `{"network_id":1,"height":1,"prev_local_exit_root":"0x0000000000000000000000000000000000000000000000000000000000000000","new_local_exit_root":"0x0000000000000000000000000000000000000000000000000000000000000000","bridge_exits":[{"leaf_type":"Transfer","token_info":null,"dest_network":0,"dest_address":"0x0000000000000000000000000000000000000000","amount":"1","metadata":[1,2,3]}],"imported_bridge_exits":[{"bridge_exit":{"leaf_type":"Transfer","token_info":null,"dest_network":0,"dest_address":"0x0000000000000000000000000000000000000000","amount":"1","metadata":null},"claim_data":null,"global_index":{"mainnet_flag":false,"rollup_index":1,"leaf_index":1}}],"metadata":"0x0000000000000000000000000000000000000000000000000000000000000000","signature":{"r":"0x0000000000000000000000000000000000000000000000000000000000000000","s":"0x0000000000000000000000000000000000000000000000000000000000000000","odd_y_parity":false}}` - fullCertificateJSON = `{"network_id":1,"height":0,"prev_local_exit_root":"0x27ae5ba08d7291c96c8cbddcc148bf48a6d68c7974b94356f53754ef6171d757","new_local_exit_root":"0x79011be874bf6f229d8473eb251aa932210bc3ab843a316492d5bc0e4b9e945b","bridge_exits":[{"leaf_type":"Transfer","token_info":{"origin_network":0,"origin_token_address":"0x0000000000000000000000000000000000000000"},"dest_network":0,"dest_address":"0xbece3a31343c6019cde0d5a4df2af8df17ebcb0f","amount":"10000005400000000","metadata":null}],"imported_bridge_exits":[{"bridge_exit":{"leaf_type":"Transfer","token_info":{"origin_network":0,"origin_token_address":"0x0000000000000000000000000000000000000000"},"dest_network":1,"dest_address":"0xbece3a31343c6019cde0d5a4df2af8df17ebcb0f","amount":"20000005400000000","metadata":null},"claim_data":{"Mainnet":{"l1_leaf":{"l1_info_tree_index":3,"rer":"0x0000000000000000000000000000000000000000000000000000000000000000","mer":"0x34c7e5206c4c793171805029b5a3a5c6f2d3e5344731cd69912142dc083768bf","inner":{"global_exit_root":"0xefb4efc883a8d7ab7c414684a4f44fac0f522d5eef9144dbad85a6b7756d770d","block_hash":"0x02224ad091ae2762001610174fb70885734761b3518aca77b8af63308f3c0b67","timestamp":1734434917}},"proof_ger_l1root":{"root":"0x73011c89c4cb976b1feeec2185dba22ecdac6d424afeb83ed5cacfdaae735e95","proof":{"siblings":["0x628d0adbb4d8c80a15f0743fa32385efd0798189228dd83c73e09409d94c2273","0x0ffee9fcedabc3f74d1b86e97c2b3039f7b2c617a100d6ca1ad5c0e613939b05","0xe7dcc1ef21d4705f16b0c2976a5358719a60361f2435bd342e3f97c287ae5040","0x21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85","0xe58769b32a1beaf1ea27375a44095a0d1fb664ce2dd358e7fcbfb78c26a19344","0x0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d","0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968","0xffd70157e48063fc33c97a050f7f640233bf646cc98d9524c6b92bcf3ab56f83","0x9867cc5f7f196b93bae1e27e6320742445d290f2263827498b54fec539f756af","0xcefad4e508c098b9a7e1d8feb19955fb02ba9675585078710969d3440f5054e0","0xf9dc3e7fe016e050eff260334f18a5d4fe391d82092319f5964f2e2eb7c1c3a5","0xf8b13a49e282f609c317a833fb8d976d11517c571d1221a265d25af778ecf892","0x3490c6ceeb450aecdc82e28293031d10c7d73bf85e57bf041a97360aa2c5d99c","0xc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb","0x5c67add7c6caf302256adedf7ab114da0acfe870d449a3a489f781d659e8becc","0xda7bce9f4e8618b6bd2f4132ce798cdc7a60e7e1460a7299e3c6342a579626d2","0x2733e50f526ec2fa19a22b31e8ed50f23cd1fdf94c9154ed3a7609a2f1ff981f","0xe1d3b5c807b281e4683cc6d6315cf95b9ade8641defcb32372f1c126e398ef7a","0x5a2dce0a8a7f68bb74560f8f71837c2c2ebbcbf7fffb42ae1896f13f7c7479a0","0xb46a28b6f55540f89444f63de0378e3d121be09e06cc9ded1c20e65876d36aa0","0xc65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2","0xf4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd9","0x5a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e377","0x4df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652","0xcdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef","0x0abf5ac974a1ed57f4050aa510dd9c74f508277b39d7973bb2dfccc5eeb0618d","0xb8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d0","0x838c5655cb21c6cb83313b5a631175dff4963772cce9108188b34ac87c81c41e","0x662ee4dd2dd7b2bc707961b1e646c4047669dcb6584f0d8d770daf5d7e7deb2e","0x388ab20e2573d171a88108e79d820e98f26c0b84aa8b2f4aa4968dbb818ea322","0x93237c50ba75ee485f4c22adf2f741400bdf8d6a9cc7df7ecae576221665d735","0x8448818bb4ae4562849e949e17ac16e0be16688e156b5cf15e098c627c0056a9"]}},"proof_leaf_mer":{"root":"0x34c7e5206c4c793171805029b5a3a5c6f2d3e5344731cd69912142dc083768bf","proof":{"siblings":["0x7e5dddb55a966fa6ccd6d470bb326a4fcef563567d6897c45b7ed885de710757","0x4b274df9344e005bfd46536d791100a85234bef4fab0348d1b2ffc0e7a709d33","0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30","0x21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85","0xe58769b32a1beaf1ea27375a44095a0d1fb664ce2dd358e7fcbfb78c26a19344","0x0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d","0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968","0xffd70157e48063fc33c97a050f7f640233bf646cc98d9524c6b92bcf3ab56f83","0x9867cc5f7f196b93bae1e27e6320742445d290f2263827498b54fec539f756af","0xcefad4e508c098b9a7e1d8feb19955fb02ba9675585078710969d3440f5054e0","0xf9dc3e7fe016e050eff260334f18a5d4fe391d82092319f5964f2e2eb7c1c3a5","0xf8b13a49e282f609c317a833fb8d976d11517c571d1221a265d25af778ecf892","0x3490c6ceeb450aecdc82e28293031d10c7d73bf85e57bf041a97360aa2c5d99c","0xc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb","0x5c67add7c6caf302256adedf7ab114da0acfe870d449a3a489f781d659e8becc","0xda7bce9f4e8618b6bd2f4132ce798cdc7a60e7e1460a7299e3c6342a579626d2","0x2733e50f526ec2fa19a22b31e8ed50f23cd1fdf94c9154ed3a7609a2f1ff981f","0xe1d3b5c807b281e4683cc6d6315cf95b9ade8641defcb32372f1c126e398ef7a","0x5a2dce0a8a7f68bb74560f8f71837c2c2ebbcbf7fffb42ae1896f13f7c7479a0","0xb46a28b6f55540f89444f63de0378e3d121be09e06cc9ded1c20e65876d36aa0","0xc65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2","0xf4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd9","0x5a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e377","0x4df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652","0xcdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef","0x0abf5ac974a1ed57f4050aa510dd9c74f508277b39d7973bb2dfccc5eeb0618d","0xb8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d0","0x838c5655cb21c6cb83313b5a631175dff4963772cce9108188b34ac87c81c41e","0x662ee4dd2dd7b2bc707961b1e646c4047669dcb6584f0d8d770daf5d7e7deb2e","0x388ab20e2573d171a88108e79d820e98f26c0b84aa8b2f4aa4968dbb818ea322","0x93237c50ba75ee485f4c22adf2f741400bdf8d6a9cc7df7ecae576221665d735","0x8448818bb4ae4562849e949e17ac16e0be16688e156b5cf15e098c627c0056a9"]}}}},"global_index":{"mainnet_flag":true,"rollup_index":0,"leaf_index":2}},{"bridge_exit":{"leaf_type":"Transfer","token_info":{"origin_network":0,"origin_token_address":"0x0000000000000000000000000000000000000000"},"dest_network":1,"dest_address":"0xbece3a31343c6019cde0d5a4df2af8df17ebcb0f","amount":"1234567","metadata":null},"claim_data":{"Rollup":{"l1_leaf":{"l1_info_tree_index":4,"rer":"0x33267c0646fee979e59af1cd62f9e46cd0917f62aba82658e1a92a50e1d7b4d1","mer":"0x34c7e5206c4c793171805029b5a3a5c6f2d3e5344731cd69912142dc083768bf","inner":{"global_exit_root":"0x6df4684b75569ffa9c0d352d1293c5d98950ecc1ea34226194842d10b14f47d0","block_hash":"0x52bbc4079dcaaac2f6e950a0fe5aed613473faf48a721709ce347c4ddfe0b50d","timestamp":1734435263}},"proof_ger_l1root":{"root":"0x73011c89c4cb976b1feeec2185dba22ecdac6d424afeb83ed5cacfdaae735e95","proof":{"siblings":["0x0000000000000000000000000000000000000000000000000000000000000000","0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5","0x20a8c649fbea68114dca04c42bf16e23c6b39d4eafcc54378c5b7516c3a3c9d2","0x21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85","0xe58769b32a1beaf1ea27375a44095a0d1fb664ce2dd358e7fcbfb78c26a19344","0x0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d","0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968","0xffd70157e48063fc33c97a050f7f640233bf646cc98d9524c6b92bcf3ab56f83","0x9867cc5f7f196b93bae1e27e6320742445d290f2263827498b54fec539f756af","0xcefad4e508c098b9a7e1d8feb19955fb02ba9675585078710969d3440f5054e0","0xf9dc3e7fe016e050eff260334f18a5d4fe391d82092319f5964f2e2eb7c1c3a5","0xf8b13a49e282f609c317a833fb8d976d11517c571d1221a265d25af778ecf892","0x3490c6ceeb450aecdc82e28293031d10c7d73bf85e57bf041a97360aa2c5d99c","0xc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb","0x5c67add7c6caf302256adedf7ab114da0acfe870d449a3a489f781d659e8becc","0xda7bce9f4e8618b6bd2f4132ce798cdc7a60e7e1460a7299e3c6342a579626d2","0x2733e50f526ec2fa19a22b31e8ed50f23cd1fdf94c9154ed3a7609a2f1ff981f","0xe1d3b5c807b281e4683cc6d6315cf95b9ade8641defcb32372f1c126e398ef7a","0x5a2dce0a8a7f68bb74560f8f71837c2c2ebbcbf7fffb42ae1896f13f7c7479a0","0xb46a28b6f55540f89444f63de0378e3d121be09e06cc9ded1c20e65876d36aa0","0xc65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2","0xf4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd9","0x5a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e377","0x4df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652","0xcdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef","0x0abf5ac974a1ed57f4050aa510dd9c74f508277b39d7973bb2dfccc5eeb0618d","0xb8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d0","0x838c5655cb21c6cb83313b5a631175dff4963772cce9108188b34ac87c81c41e","0x662ee4dd2dd7b2bc707961b1e646c4047669dcb6584f0d8d770daf5d7e7deb2e","0x388ab20e2573d171a88108e79d820e98f26c0b84aa8b2f4aa4968dbb818ea322","0x93237c50ba75ee485f4c22adf2f741400bdf8d6a9cc7df7ecae576221665d735","0x8448818bb4ae4562849e949e17ac16e0be16688e156b5cf15e098c627c0056a9"]}},"proof_leaf_ler":{"root":"0x156ab7795d0bb31ed548c43f90e71b8f06f71e5776a5ba444f3f3cb0935b4647","proof":{"siblings":["0x0000000000000000000000000000000000000000000000000000000000000000","0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5","0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30","0x21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85","0xe58769b32a1beaf1ea27375a44095a0d1fb664ce2dd358e7fcbfb78c26a19344","0x0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d","0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968","0xffd70157e48063fc33c97a050f7f640233bf646cc98d9524c6b92bcf3ab56f83","0x9867cc5f7f196b93bae1e27e6320742445d290f2263827498b54fec539f756af","0xcefad4e508c098b9a7e1d8feb19955fb02ba9675585078710969d3440f5054e0","0xf9dc3e7fe016e050eff260334f18a5d4fe391d82092319f5964f2e2eb7c1c3a5","0xf8b13a49e282f609c317a833fb8d976d11517c571d1221a265d25af778ecf892","0x3490c6ceeb450aecdc82e28293031d10c7d73bf85e57bf041a97360aa2c5d99c","0xc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb","0x5c67add7c6caf302256adedf7ab114da0acfe870d449a3a489f781d659e8becc","0xda7bce9f4e8618b6bd2f4132ce798cdc7a60e7e1460a7299e3c6342a579626d2","0x2733e50f526ec2fa19a22b31e8ed50f23cd1fdf94c9154ed3a7609a2f1ff981f","0xe1d3b5c807b281e4683cc6d6315cf95b9ade8641defcb32372f1c126e398ef7a","0x5a2dce0a8a7f68bb74560f8f71837c2c2ebbcbf7fffb42ae1896f13f7c7479a0","0xb46a28b6f55540f89444f63de0378e3d121be09e06cc9ded1c20e65876d36aa0","0xc65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2","0xf4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd9","0x5a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e377","0x4df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652","0xcdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef","0x0abf5ac974a1ed57f4050aa510dd9c74f508277b39d7973bb2dfccc5eeb0618d","0xb8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d0","0x838c5655cb21c6cb83313b5a631175dff4963772cce9108188b34ac87c81c41e","0x662ee4dd2dd7b2bc707961b1e646c4047669dcb6584f0d8d770daf5d7e7deb2e","0x388ab20e2573d171a88108e79d820e98f26c0b84aa8b2f4aa4968dbb818ea322","0x93237c50ba75ee485f4c22adf2f741400bdf8d6a9cc7df7ecae576221665d735","0x8448818bb4ae4562849e949e17ac16e0be16688e156b5cf15e098c627c0056a9"]}},"proof_ler_rer":{"root":"0x33267c0646fee979e59af1cd62f9e46cd0917f62aba82658e1a92a50e1d7b4d1","proof":{"siblings":["0x0000000000000000000000000000000000000000000000000000000000000000","0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5","0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30","0x21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85","0xe58769b32a1beaf1ea27375a44095a0d1fb664ce2dd358e7fcbfb78c26a19344","0x0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d","0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968","0xffd70157e48063fc33c97a050f7f640233bf646cc98d9524c6b92bcf3ab56f83","0x9867cc5f7f196b93bae1e27e6320742445d290f2263827498b54fec539f756af","0xcefad4e508c098b9a7e1d8feb19955fb02ba9675585078710969d3440f5054e0","0xf9dc3e7fe016e050eff260334f18a5d4fe391d82092319f5964f2e2eb7c1c3a5","0xf8b13a49e282f609c317a833fb8d976d11517c571d1221a265d25af778ecf892","0x3490c6ceeb450aecdc82e28293031d10c7d73bf85e57bf041a97360aa2c5d99c","0xc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb","0x5c67add7c6caf302256adedf7ab114da0acfe870d449a3a489f781d659e8becc","0xda7bce9f4e8618b6bd2f4132ce798cdc7a60e7e1460a7299e3c6342a579626d2","0x2733e50f526ec2fa19a22b31e8ed50f23cd1fdf94c9154ed3a7609a2f1ff981f","0xe1d3b5c807b281e4683cc6d6315cf95b9ade8641defcb32372f1c126e398ef7a","0x5a2dce0a8a7f68bb74560f8f71837c2c2ebbcbf7fffb42ae1896f13f7c7479a0","0xb46a28b6f55540f89444f63de0378e3d121be09e06cc9ded1c20e65876d36aa0","0xc65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2","0xf4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd9","0x5a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e377","0x4df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652","0xcdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef","0x0abf5ac974a1ed57f4050aa510dd9c74f508277b39d7973bb2dfccc5eeb0618d","0xb8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d0","0x838c5655cb21c6cb83313b5a631175dff4963772cce9108188b34ac87c81c41e","0x662ee4dd2dd7b2bc707961b1e646c4047669dcb6584f0d8d770daf5d7e7deb2e","0x388ab20e2573d171a88108e79d820e98f26c0b84aa8b2f4aa4968dbb818ea322","0x93237c50ba75ee485f4c22adf2f741400bdf8d6a9cc7df7ecae576221665d735","0x8448818bb4ae4562849e949e17ac16e0be16688e156b5cf15e098c627c0056a9"]}}}},"global_index":{"mainnet_flag":false,"rollup_index":1,"leaf_index":0}}],"metadata":"0x0100000000000000010000047867616580000000000000000000000000000000","signature":{"r":"0x4798dc4c299dfa4299c1992624271e2f1953cac3a909742ec4ca9549582c9273","s":"0x29762f0418ab0fc6019c1ea4c0722fe19477708e29e3d1416727339deba5660e","odd_y_parity":false}}` -) - -func TestBridgeExit_Hash(t *testing.T) { - t.Parallel() - - MetadaHash := common.HexToHash("0x1234") - bridge := BridgeExit{ - TokenInfo: &TokenInfo{}, - IsMetadataHashed: true, - Metadata: MetadaHash[:], - } - require.Equal(t, "0xaa57e4bf430fe25ca5068f9e1a25e8aef15744905cdf7635e0d5a468bd26bb18", - bridge.Hash().String(), "use the hashed metadata, instead of calculating hash") - - bridge.IsMetadataHashed = false - require.Equal(t, "0x79d5362ad609e06e022277ede4fd10899dc189c0ed56e1a2c6982d0563fe1be7", - bridge.Hash().String(), "metadata is not hashed, calculate hash") - - bridge.IsMetadataHashed = false - bridge.Metadata = []byte{} - require.Equal(t, "0xe3e297278c7df4ae4f235be10155ac62c53b08e2a14ed09b7dd6b688952ee883", - bridge.Hash().String(), "metadata is not hashed and it's empty, calculate hash") - - bridge.IsMetadataHashed = true - bridge.Metadata = []byte{} - require.Equal(t, "0x51980562e41978f15369c21f26920284ac6836d53b02cd89edf4fedc97e68215", - bridge.Hash().String(), "metadata is a hashed and it's empty,use it") -} - -func TestGenericError_Error(t *testing.T) { - t.Parallel() - - err := GenericError{"test", "value"} - require.Equal(t, "[Agglayer Error] test: value", err.Error()) -} - -func TestCertificateHeader_ID(t *testing.T) { - t.Parallel() - - certificate := CertificateHeader{ - Height: 1, - CertificateID: common.HexToHash("0x123"), - } - require.Equal(t, "1/0x0000000000000000000000000000000000000000000000000000000000000123", certificate.ID()) - - var certNil *CertificateHeader - require.Equal(t, "nil", certNil.ID()) -} - -func TestCertificateHeaderString(t *testing.T) { - t.Parallel() - - certificate := CertificateHeader{ - Height: 1, - CertificateID: common.HexToHash("0x123"), - } - require.Equal(t, "Height: 1, CertificateID: 0x0000000000000000000000000000000000000000000000000000000000000123, PreviousLocalExitRoot: nil, NewLocalExitRoot: 0x0000000000000000000000000000000000000000000000000000000000000000. Status: Pending. Errors: []", - certificate.String()) - - var certNil *CertificateHeader - require.Equal(t, "nil", certNil.String()) -} - -func TestMarshalJSON(t *testing.T) { - t.Parallel() - - t.Run("MarshalJSON with empty proofs", func(t *testing.T) { - t.Parallel() - - cert := SignedCertificate{ - Certificate: &Certificate{ - NetworkID: 1, - Height: 1, - PrevLocalExitRoot: common.Hash{}, - NewLocalExitRoot: common.Hash{}, - BridgeExits: []*BridgeExit{ - { - LeafType: LeafTypeAsset, - DestinationAddress: common.Address{}, - Amount: big.NewInt(1), - }, - }, - ImportedBridgeExits: []*ImportedBridgeExit{ - { - BridgeExit: &BridgeExit{ - LeafType: LeafTypeAsset, - DestinationAddress: common.Address{}, - Amount: big.NewInt(1), - Metadata: []byte{}, - }, - ClaimData: nil, - GlobalIndex: &GlobalIndex{ - MainnetFlag: false, - RollupIndex: 1, - LeafIndex: 1, - }, - }, - }, - }, - - Signature: &Signature{ - R: common.Hash{}, - S: common.Hash{}, - OddParity: false, - }, - } - data, err := json.Marshal(cert) - require.NoError(t, err) - log.Info(string(data)) - require.Equal(t, expectedSignedCertificateEmptyMetadataJSON, string(data)) - - cert.BridgeExits[0].Metadata = []byte{1, 2, 3} - data, err = json.Marshal(cert) - require.NoError(t, err) - log.Info(string(data)) - require.Equal(t, expectedSignedCertificateMetadataJSON, string(data)) - }) - - t.Run("MarshalJSON with proofs", func(t *testing.T) { - t.Parallel() - - cert := SignedCertificate{ - Certificate: &Certificate{ - NetworkID: 11, - Height: 111, - PrevLocalExitRoot: common.HexToHash("0x111"), - NewLocalExitRoot: common.HexToHash("0x222"), - BridgeExits: []*BridgeExit{ - { - LeafType: LeafTypeAsset, - TokenInfo: &TokenInfo{OriginNetwork: 1, OriginTokenAddress: common.HexToAddress("0x123")}, - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x456"), - Amount: big.NewInt(1000), - Metadata: []byte{}, // we leave it empty on purpose to see when marshaled it will be null - }, - }, - ImportedBridgeExits: []*ImportedBridgeExit{ - { - BridgeExit: &BridgeExit{ - LeafType: LeafTypeMessage, - TokenInfo: &TokenInfo{OriginNetwork: 1, OriginTokenAddress: common.HexToAddress("0x789")}, - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0xabc"), - Amount: big.NewInt(2000), - Metadata: []byte{0x03, 0x04}, - }, - GlobalIndex: &GlobalIndex{ - MainnetFlag: true, - RollupIndex: 0, - LeafIndex: 1, - }, - ClaimData: &ClaimFromMainnnet{ - ProofLeafMER: &MerkleProof{ - Root: common.HexToHash("0x333"), - Proof: createDummyProof(t), - }, - ProofGERToL1Root: &MerkleProof{ - Root: common.HexToHash("0x444"), - Proof: createDummyProof(t), - }, - L1Leaf: &L1InfoTreeLeaf{ - L1InfoTreeIndex: 1, - RollupExitRoot: common.HexToHash("0x555"), - MainnetExitRoot: common.HexToHash("0x123456"), - Inner: &L1InfoTreeLeafInner{ - GlobalExitRoot: common.HexToHash("0x777"), - BlockHash: common.HexToHash("0x888"), - Timestamp: 12345678, - }, - }, - }, - }, - { - BridgeExit: &BridgeExit{ - LeafType: LeafTypeAsset, - TokenInfo: &TokenInfo{OriginNetwork: 1, OriginTokenAddress: common.HexToAddress("0x789")}, - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0xabcdef"), - Amount: big.NewInt(2201), - Metadata: []byte{0x05, 0x08}, - }, - GlobalIndex: &GlobalIndex{ - MainnetFlag: false, - RollupIndex: 1, - LeafIndex: 2, - }, - ClaimData: &ClaimFromRollup{ - ProofLeafLER: &MerkleProof{ - Root: common.HexToHash("0x333"), - Proof: createDummyProof(t), - }, - ProofLERToRER: &MerkleProof{ - Root: common.HexToHash("0x444"), - Proof: createDummyProof(t), - }, - ProofGERToL1Root: &MerkleProof{ - Root: common.HexToHash("0x555"), - Proof: createDummyProof(t), - }, - L1Leaf: &L1InfoTreeLeaf{ - L1InfoTreeIndex: 2, - RollupExitRoot: common.HexToHash("0x532"), - MainnetExitRoot: common.HexToHash("0x654321"), - Inner: &L1InfoTreeLeafInner{ - GlobalExitRoot: common.HexToHash("0x777"), - BlockHash: common.HexToHash("0x888"), - Timestamp: 12345678, - }, - }, - }, - }, - }, - Metadata: common.HexToHash("0xdef"), - }, - Signature: &Signature{ - R: common.HexToHash("0x111"), - S: common.HexToHash("0x222"), - OddParity: true, - }, - } - - expectedJSON := `{"network_id":11,"height":111,"prev_local_exit_root":"0x0000000000000000000000000000000000000000000000000000000000000111","new_local_exit_root":"0x0000000000000000000000000000000000000000000000000000000000000222","bridge_exits":[{"leaf_type":"Transfer","token_info":{"origin_network":1,"origin_token_address":"0x0000000000000000000000000000000000000123"},"dest_network":2,"dest_address":"0x0000000000000000000000000000000000000456","amount":"1000","metadata":null}],"imported_bridge_exits":[{"bridge_exit":{"leaf_type":"Message","token_info":{"origin_network":1,"origin_token_address":"0x0000000000000000000000000000000000000789"},"dest_network":2,"dest_address":"0x0000000000000000000000000000000000000abc","amount":"2000","metadata":[3,4]},"claim_data":{"Mainnet":{"l1_leaf":{"l1_info_tree_index":1,"rer":"0x0000000000000000000000000000000000000000000000000000000000000555","mer":"0x0000000000000000000000000000000000000000000000000000000000123456","inner":{"global_exit_root":"0x0000000000000000000000000000000000000000000000000000000000000777","block_hash":"0x0000000000000000000000000000000000000000000000000000000000000888","timestamp":12345678}},"proof_ger_l1root":{"root":"0x0000000000000000000000000000000000000000000000000000000000000444","proof":{"siblings":["0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000001","0x0000000000000000000000000000000000000000000000000000000000000002","0x0000000000000000000000000000000000000000000000000000000000000003","0x0000000000000000000000000000000000000000000000000000000000000004","0x0000000000000000000000000000000000000000000000000000000000000005","0x0000000000000000000000000000000000000000000000000000000000000006","0x0000000000000000000000000000000000000000000000000000000000000007","0x0000000000000000000000000000000000000000000000000000000000000008","0x0000000000000000000000000000000000000000000000000000000000000009","0x000000000000000000000000000000000000000000000000000000000000000a","0x000000000000000000000000000000000000000000000000000000000000000b","0x000000000000000000000000000000000000000000000000000000000000000c","0x000000000000000000000000000000000000000000000000000000000000000d","0x000000000000000000000000000000000000000000000000000000000000000e","0x000000000000000000000000000000000000000000000000000000000000000f","0x0000000000000000000000000000000000000000000000000000000000000010","0x0000000000000000000000000000000000000000000000000000000000000011","0x0000000000000000000000000000000000000000000000000000000000000012","0x0000000000000000000000000000000000000000000000000000000000000013","0x0000000000000000000000000000000000000000000000000000000000000014","0x0000000000000000000000000000000000000000000000000000000000000015","0x0000000000000000000000000000000000000000000000000000000000000016","0x0000000000000000000000000000000000000000000000000000000000000017","0x0000000000000000000000000000000000000000000000000000000000000018","0x0000000000000000000000000000000000000000000000000000000000000019","0x000000000000000000000000000000000000000000000000000000000000001a","0x000000000000000000000000000000000000000000000000000000000000001b","0x000000000000000000000000000000000000000000000000000000000000001c","0x000000000000000000000000000000000000000000000000000000000000001d","0x000000000000000000000000000000000000000000000000000000000000001e","0x000000000000000000000000000000000000000000000000000000000000001f"]}},"proof_leaf_mer":{"root":"0x0000000000000000000000000000000000000000000000000000000000000333","proof":{"siblings":["0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000001","0x0000000000000000000000000000000000000000000000000000000000000002","0x0000000000000000000000000000000000000000000000000000000000000003","0x0000000000000000000000000000000000000000000000000000000000000004","0x0000000000000000000000000000000000000000000000000000000000000005","0x0000000000000000000000000000000000000000000000000000000000000006","0x0000000000000000000000000000000000000000000000000000000000000007","0x0000000000000000000000000000000000000000000000000000000000000008","0x0000000000000000000000000000000000000000000000000000000000000009","0x000000000000000000000000000000000000000000000000000000000000000a","0x000000000000000000000000000000000000000000000000000000000000000b","0x000000000000000000000000000000000000000000000000000000000000000c","0x000000000000000000000000000000000000000000000000000000000000000d","0x000000000000000000000000000000000000000000000000000000000000000e","0x000000000000000000000000000000000000000000000000000000000000000f","0x0000000000000000000000000000000000000000000000000000000000000010","0x0000000000000000000000000000000000000000000000000000000000000011","0x0000000000000000000000000000000000000000000000000000000000000012","0x0000000000000000000000000000000000000000000000000000000000000013","0x0000000000000000000000000000000000000000000000000000000000000014","0x0000000000000000000000000000000000000000000000000000000000000015","0x0000000000000000000000000000000000000000000000000000000000000016","0x0000000000000000000000000000000000000000000000000000000000000017","0x0000000000000000000000000000000000000000000000000000000000000018","0x0000000000000000000000000000000000000000000000000000000000000019","0x000000000000000000000000000000000000000000000000000000000000001a","0x000000000000000000000000000000000000000000000000000000000000001b","0x000000000000000000000000000000000000000000000000000000000000001c","0x000000000000000000000000000000000000000000000000000000000000001d","0x000000000000000000000000000000000000000000000000000000000000001e","0x000000000000000000000000000000000000000000000000000000000000001f"]}}}},"global_index":{"mainnet_flag":true,"rollup_index":0,"leaf_index":1}},{"bridge_exit":{"leaf_type":"Transfer","token_info":{"origin_network":1,"origin_token_address":"0x0000000000000000000000000000000000000789"},"dest_network":2,"dest_address":"0x0000000000000000000000000000000000abcdef","amount":"2201","metadata":[5,8]},"claim_data":{"Rollup":{"l1_leaf":{"l1_info_tree_index":2,"rer":"0x0000000000000000000000000000000000000000000000000000000000000532","mer":"0x0000000000000000000000000000000000000000000000000000000000654321","inner":{"global_exit_root":"0x0000000000000000000000000000000000000000000000000000000000000777","block_hash":"0x0000000000000000000000000000000000000000000000000000000000000888","timestamp":12345678}},"proof_ger_l1root":{"root":"0x0000000000000000000000000000000000000000000000000000000000000555","proof":{"siblings":["0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000001","0x0000000000000000000000000000000000000000000000000000000000000002","0x0000000000000000000000000000000000000000000000000000000000000003","0x0000000000000000000000000000000000000000000000000000000000000004","0x0000000000000000000000000000000000000000000000000000000000000005","0x0000000000000000000000000000000000000000000000000000000000000006","0x0000000000000000000000000000000000000000000000000000000000000007","0x0000000000000000000000000000000000000000000000000000000000000008","0x0000000000000000000000000000000000000000000000000000000000000009","0x000000000000000000000000000000000000000000000000000000000000000a","0x000000000000000000000000000000000000000000000000000000000000000b","0x000000000000000000000000000000000000000000000000000000000000000c","0x000000000000000000000000000000000000000000000000000000000000000d","0x000000000000000000000000000000000000000000000000000000000000000e","0x000000000000000000000000000000000000000000000000000000000000000f","0x0000000000000000000000000000000000000000000000000000000000000010","0x0000000000000000000000000000000000000000000000000000000000000011","0x0000000000000000000000000000000000000000000000000000000000000012","0x0000000000000000000000000000000000000000000000000000000000000013","0x0000000000000000000000000000000000000000000000000000000000000014","0x0000000000000000000000000000000000000000000000000000000000000015","0x0000000000000000000000000000000000000000000000000000000000000016","0x0000000000000000000000000000000000000000000000000000000000000017","0x0000000000000000000000000000000000000000000000000000000000000018","0x0000000000000000000000000000000000000000000000000000000000000019","0x000000000000000000000000000000000000000000000000000000000000001a","0x000000000000000000000000000000000000000000000000000000000000001b","0x000000000000000000000000000000000000000000000000000000000000001c","0x000000000000000000000000000000000000000000000000000000000000001d","0x000000000000000000000000000000000000000000000000000000000000001e","0x000000000000000000000000000000000000000000000000000000000000001f"]}},"proof_leaf_ler":{"root":"0x0000000000000000000000000000000000000000000000000000000000000333","proof":{"siblings":["0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000001","0x0000000000000000000000000000000000000000000000000000000000000002","0x0000000000000000000000000000000000000000000000000000000000000003","0x0000000000000000000000000000000000000000000000000000000000000004","0x0000000000000000000000000000000000000000000000000000000000000005","0x0000000000000000000000000000000000000000000000000000000000000006","0x0000000000000000000000000000000000000000000000000000000000000007","0x0000000000000000000000000000000000000000000000000000000000000008","0x0000000000000000000000000000000000000000000000000000000000000009","0x000000000000000000000000000000000000000000000000000000000000000a","0x000000000000000000000000000000000000000000000000000000000000000b","0x000000000000000000000000000000000000000000000000000000000000000c","0x000000000000000000000000000000000000000000000000000000000000000d","0x000000000000000000000000000000000000000000000000000000000000000e","0x000000000000000000000000000000000000000000000000000000000000000f","0x0000000000000000000000000000000000000000000000000000000000000010","0x0000000000000000000000000000000000000000000000000000000000000011","0x0000000000000000000000000000000000000000000000000000000000000012","0x0000000000000000000000000000000000000000000000000000000000000013","0x0000000000000000000000000000000000000000000000000000000000000014","0x0000000000000000000000000000000000000000000000000000000000000015","0x0000000000000000000000000000000000000000000000000000000000000016","0x0000000000000000000000000000000000000000000000000000000000000017","0x0000000000000000000000000000000000000000000000000000000000000018","0x0000000000000000000000000000000000000000000000000000000000000019","0x000000000000000000000000000000000000000000000000000000000000001a","0x000000000000000000000000000000000000000000000000000000000000001b","0x000000000000000000000000000000000000000000000000000000000000001c","0x000000000000000000000000000000000000000000000000000000000000001d","0x000000000000000000000000000000000000000000000000000000000000001e","0x000000000000000000000000000000000000000000000000000000000000001f"]}},"proof_ler_rer":{"root":"0x0000000000000000000000000000000000000000000000000000000000000444","proof":{"siblings":["0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000001","0x0000000000000000000000000000000000000000000000000000000000000002","0x0000000000000000000000000000000000000000000000000000000000000003","0x0000000000000000000000000000000000000000000000000000000000000004","0x0000000000000000000000000000000000000000000000000000000000000005","0x0000000000000000000000000000000000000000000000000000000000000006","0x0000000000000000000000000000000000000000000000000000000000000007","0x0000000000000000000000000000000000000000000000000000000000000008","0x0000000000000000000000000000000000000000000000000000000000000009","0x000000000000000000000000000000000000000000000000000000000000000a","0x000000000000000000000000000000000000000000000000000000000000000b","0x000000000000000000000000000000000000000000000000000000000000000c","0x000000000000000000000000000000000000000000000000000000000000000d","0x000000000000000000000000000000000000000000000000000000000000000e","0x000000000000000000000000000000000000000000000000000000000000000f","0x0000000000000000000000000000000000000000000000000000000000000010","0x0000000000000000000000000000000000000000000000000000000000000011","0x0000000000000000000000000000000000000000000000000000000000000012","0x0000000000000000000000000000000000000000000000000000000000000013","0x0000000000000000000000000000000000000000000000000000000000000014","0x0000000000000000000000000000000000000000000000000000000000000015","0x0000000000000000000000000000000000000000000000000000000000000016","0x0000000000000000000000000000000000000000000000000000000000000017","0x0000000000000000000000000000000000000000000000000000000000000018","0x0000000000000000000000000000000000000000000000000000000000000019","0x000000000000000000000000000000000000000000000000000000000000001a","0x000000000000000000000000000000000000000000000000000000000000001b","0x000000000000000000000000000000000000000000000000000000000000001c","0x000000000000000000000000000000000000000000000000000000000000001d","0x000000000000000000000000000000000000000000000000000000000000001e","0x000000000000000000000000000000000000000000000000000000000000001f"]}}}},"global_index":{"mainnet_flag":false,"rollup_index":1,"leaf_index":2}}],"metadata":"0x0000000000000000000000000000000000000000000000000000000000000def","signature":{"r":"0x0000000000000000000000000000000000000000000000000000000000000111","s":"0x0000000000000000000000000000000000000000000000000000000000000222","odd_y_parity":true}}` - - data, err := json.Marshal(cert) - require.NoError(t, err) - require.Equal(t, expectedJSON, string(data)) - - require.Equal(t, "0xda355a601420351a0c950ebb34b6278580978d7b6a215338531d543a8f03574a", cert.Hash().String()) - require.Equal(t, "0x2f01782930cbf2bc2ab4ec16759a2288ad7df865dea387aadf55f96136269cf4", cert.BridgeExits[0].Hash().String()) - require.Equal(t, "0xac83b106ad2ca491828d49613c8356a15e3de298c794e1abd9632dc4d03b7c79", cert.ImportedBridgeExits[0].Hash().String()) - require.Equal(t, "0x6d9dc59396058ef7845fd872a87e77f1a58d010a760957f8814bd3d2ca5914a1", cert.ImportedBridgeExits[1].Hash().String()) - }) -} - -func TestSignedCertificate_Copy(t *testing.T) { - t.Parallel() - - t.Run("copy with non-nil fields", func(t *testing.T) { - t.Parallel() - - original := &SignedCertificate{ - Certificate: &Certificate{ - NetworkID: 1, - Height: 100, - PrevLocalExitRoot: [32]byte{0x01}, - NewLocalExitRoot: [32]byte{0x02}, - BridgeExits: []*BridgeExit{ - { - LeafType: LeafTypeAsset, - TokenInfo: &TokenInfo{OriginNetwork: 1, OriginTokenAddress: common.HexToAddress("0x123")}, - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x456"), - Amount: big.NewInt(1000), - Metadata: []byte{0x01, 0x02}, - }, - }, - ImportedBridgeExits: []*ImportedBridgeExit{ - { - BridgeExit: &BridgeExit{ - LeafType: LeafTypeMessage, - TokenInfo: &TokenInfo{OriginNetwork: 1, OriginTokenAddress: common.HexToAddress("0x789")}, - DestinationNetwork: 3, - DestinationAddress: common.HexToAddress("0xabc"), - Amount: big.NewInt(2000), - Metadata: []byte{0x03, 0x04}, - }, - ClaimData: &ClaimFromMainnnet{}, - GlobalIndex: &GlobalIndex{MainnetFlag: true, RollupIndex: 1, LeafIndex: 2}, - }, - }, - Metadata: common.HexToHash("0xdef"), - }, - Signature: &Signature{ - R: common.HexToHash("0x111"), - S: common.HexToHash("0x222"), - OddParity: true, - }, - } - - certificateCopy := original.CopyWithDefaulting() - - require.NotNil(t, certificateCopy) - require.NotSame(t, original, certificateCopy) - require.NotSame(t, original.Certificate, certificateCopy.Certificate) - require.Same(t, original.Signature, certificateCopy.Signature) - require.Equal(t, original, certificateCopy) - }) - - t.Run("copy with nil BridgeExits, ImportedBridgeExits and Signature", func(t *testing.T) { - t.Parallel() - - original := &SignedCertificate{ - Certificate: &Certificate{ - NetworkID: 1, - Height: 100, - PrevLocalExitRoot: [32]byte{0x01}, - NewLocalExitRoot: [32]byte{0x02}, - BridgeExits: nil, - ImportedBridgeExits: nil, - Metadata: common.HexToHash("0xdef"), - }, - Signature: nil, - } - - certificateCopy := original.CopyWithDefaulting() - - require.NotNil(t, certificateCopy) - require.NotSame(t, original, certificateCopy) - require.NotSame(t, original.Certificate, certificateCopy.Certificate) - require.NotNil(t, certificateCopy.Signature) - require.Equal(t, original.NetworkID, certificateCopy.NetworkID) - require.Equal(t, original.Height, certificateCopy.Height) - require.Equal(t, original.PrevLocalExitRoot, certificateCopy.PrevLocalExitRoot) - require.Equal(t, original.NewLocalExitRoot, certificateCopy.NewLocalExitRoot) - require.Equal(t, original.Metadata, certificateCopy.Metadata) - require.NotNil(t, certificateCopy.BridgeExits) - require.NotNil(t, certificateCopy.ImportedBridgeExits) - require.Empty(t, certificateCopy.BridgeExits) - require.Empty(t, certificateCopy.ImportedBridgeExits) - }) -} - -func TestGlobalIndex_UnmarshalFromMap(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - data map[string]interface{} - want *GlobalIndex - wantErr bool - }{ - { - name: "valid data", - data: map[string]interface{}{ - "rollup_index": uint32(0), - "leaf_index": uint32(2), - "mainnet_flag": true, - }, - want: &GlobalIndex{ - RollupIndex: 0, - LeafIndex: 2, - MainnetFlag: true, - }, - wantErr: false, - }, - { - name: "missing rollup_index", - data: map[string]interface{}{ - "leaf_index": uint32(2), - "mainnet_flag": true, - }, - want: &GlobalIndex{}, - wantErr: true, - }, - { - name: "invalid rollup_index type", - data: map[string]interface{}{ - "rollup_index": "invalid", - "leaf_index": uint32(2), - "mainnet_flag": true, - }, - want: &GlobalIndex{}, - wantErr: true, - }, - { - name: "missing leaf_index", - data: map[string]interface{}{ - "rollup_index": uint32(1), - "mainnet_flag": true, - }, - want: &GlobalIndex{}, - wantErr: true, - }, - { - name: "invalid leaf_index type", - data: map[string]interface{}{ - "rollup_index": uint32(1), - "leaf_index": "invalid", - "mainnet_flag": true, - }, - want: &GlobalIndex{}, - wantErr: true, - }, - { - name: "missing mainnet_flag", - data: map[string]interface{}{ - "rollup_index": uint32(1), - "leaf_index": uint32(2), - }, - want: &GlobalIndex{}, - wantErr: true, - }, - { - name: "invalid mainnet_flag type", - data: map[string]interface{}{ - "rollup_index": uint32(1), - "leaf_index": uint32(2), - "mainnet_flag": "invalid", - }, - want: &GlobalIndex{}, - wantErr: true, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - g := &GlobalIndex{} - err := g.UnmarshalFromMap(tt.data) - if tt.wantErr { - require.Error(t, err) - } else { - require.NoError(t, err) - require.Equal(t, tt.want, g) - } - }) - } -} - -func TestUnmarshalCertificateHeader_UnknownError(t *testing.T) { - t.Parallel() - - rawCertificateHeader := `{ - "network_id": 14, - "height": 0, - "epoch_number": null, - "certificate_index": null, - "certificate_id": "0x3af88c9ca106822bd141fdc680dcb888f4e9d4997fad1645ba3d5d747059eb32", - "new_local_exit_root": "0x625e889ced3c31277c6653229096374d396a2fd3564a8894aaad2ff935d2fc8c", - "metadata": "0x0000000000000000000000000000000000000000000000000000000000002f3d", - "status": { - "InError": { - "error": { - "ProofVerificationFailed": { - "Plonk": "the verifying key does not match the inner plonk bn254 proof's committed verifying key" - } - } - } - } - }` - - var result *CertificateHeader - err := json.Unmarshal([]byte(rawCertificateHeader), &result) - require.NoError(t, err) - require.NotNil(t, result) - - expectedErr := &GenericError{ - Key: "ProofVerificationFailed", - Value: "{\"Plonk\":\"the verifying key does not match the inner plonk bn254 proof's committed verifying key\"}", - } - - require.Equal(t, expectedErr, result.Error) -} - -func TestConvertNumeric(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - value float64 - target reflect.Type - expected interface{} - expectedErr error - }{ - // Integer conversions - {"FloatToInt", 42.5, reflect.TypeOf(int(0)), int(42), nil}, - {"FloatToInt8", 127.5, reflect.TypeOf(int8(0)), int8(127), nil}, - {"FloatToInt16", 32767.5, reflect.TypeOf(int16(0)), int16(32767), nil}, - {"FloatToInt32", 2147483647.5, reflect.TypeOf(int32(0)), int32(2147483647), nil}, - {"FloatToInt64", -10000000000000000.9, reflect.TypeOf(int64(0)), int64(-10000000000000000), nil}, - - // Unsigned integer conversions - {"FloatToUint", 42.5, reflect.TypeOf(uint(0)), uint(42), nil}, - {"FloatToUint8", 255.5, reflect.TypeOf(uint8(0)), uint8(255), nil}, - {"FloatToUint16", 65535.5, reflect.TypeOf(uint16(0)), uint16(65535), nil}, - {"FloatToUint32", 4294967295.5, reflect.TypeOf(uint32(0)), uint32(4294967295), nil}, - {"FloatToUint64", 10000000000000000.9, reflect.TypeOf(uint64(0)), uint64(10000000000000000), nil}, - - // Float conversions - {"FloatToFloat32", 3.14, reflect.TypeOf(float32(0)), float32(3.14), nil}, - {"FloatToFloat64", 3.14, reflect.TypeOf(float64(0)), float64(3.14), nil}, - - // Unsupported type - {"UnsupportedType", 3.14, reflect.TypeOf("string"), nil, errors.New("unsupported target type string")}, - } - - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - result, err := convertNumeric(tt.value, tt.target) - if tt.expectedErr != nil { - require.ErrorContains(t, err, tt.expectedErr.Error()) - } else { - require.NoError(t, err) - } - require.Equal(t, tt.expected, result) - }) - } -} - -func TestCertificate_Hash(t *testing.T) { - t.Parallel() - - // Test inputs - prevLocalExitRoot := [common.HashLength]byte{} - newLocalExitRoot := [common.HashLength]byte{} - copy(prevLocalExitRoot[:], bytes.Repeat([]byte{0x01}, common.HashLength)) - copy(newLocalExitRoot[:], bytes.Repeat([]byte{0x02}, common.HashLength)) - - // Create dummy BridgeExits - bridgeExits := []*BridgeExit{ - { - LeafType: LeafTypeAsset, - TokenInfo: createDummyTokenInfo(t), - DestinationNetwork: 1, - DestinationAddress: common.HexToAddress("0x0000000000000000000000000000000000000001"), - Amount: big.NewInt(100), - Metadata: []byte("metadata1"), - }, - { - LeafType: LeafTypeMessage, - TokenInfo: createDummyTokenInfo(t), - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x0000000000000000000000000000000000000002"), - Amount: big.NewInt(200), - Metadata: []byte("metadata2"), - }, - } - - // Create dummy ImportedBridgeExits - importedBridgeExits := []*ImportedBridgeExit{ - { - BridgeExit: &BridgeExit{ - LeafType: LeafTypeAsset, - TokenInfo: createDummyTokenInfo(t), - DestinationNetwork: 3, - DestinationAddress: common.HexToAddress("0x0000000000000000000000000000000000000003"), - Amount: big.NewInt(300), - Metadata: []byte("metadata3"), - }, - ClaimData: createDummyClaim(t), - GlobalIndex: createDummyGlobalIndex(t), - }, - { - BridgeExit: &BridgeExit{ - LeafType: LeafTypeAsset, - TokenInfo: createDummyTokenInfo(t), - DestinationNetwork: 4, - DestinationAddress: common.HexToAddress("0x0000000000000000000000000000000000000004"), - Amount: big.NewInt(400), - Metadata: []byte("metadata4"), - }, - ClaimData: createDummyClaim(t), - GlobalIndex: createDummyGlobalIndex(t), - }, - } - - metadata := common.HexToHash("0x123456789abcdef123456789abcdef123456789abcdef123456789abcdef1234") - - // Create the certificate - certificate := &Certificate{ - NetworkID: 1, - Height: 100, - PrevLocalExitRoot: prevLocalExitRoot, - NewLocalExitRoot: newLocalExitRoot, - BridgeExits: bridgeExits, - ImportedBridgeExits: importedBridgeExits, - Metadata: metadata, - } - - // Manually calculate the expected hash - bridgeExitsHashes := [][]byte{ - bridgeExits[0].Hash().Bytes(), - bridgeExits[1].Hash().Bytes(), - } - importedBridgeExitsHashes := [][]byte{ - importedBridgeExits[0].Hash().Bytes(), - importedBridgeExits[1].Hash().Bytes(), - } - - bridgeExitsPart := crypto.Keccak256(bridgeExitsHashes...) - importedBridgeExitsPart := crypto.Keccak256(importedBridgeExitsHashes...) - - expectedHash := crypto.Keccak256Hash( - cdkcommon.Uint32ToBytes(1), - cdkcommon.Uint64ToBytes(100), - prevLocalExitRoot[:], - newLocalExitRoot[:], - bridgeExitsPart, - importedBridgeExitsPart, - ) - - // Test the certificate hash - calculatedHash := certificate.Hash() - - require.Equal(t, calculatedHash, expectedHash) -} - -func TestCertificate_HashToSign(t *testing.T) { - t.Parallel() - - c := &Certificate{ - NewLocalExitRoot: common.HexToHash("0xabcd"), - ImportedBridgeExits: []*ImportedBridgeExit{ - { - GlobalIndex: &GlobalIndex{ - MainnetFlag: true, - RollupIndex: 23, - LeafIndex: 1, - }, - }, - { - GlobalIndex: &GlobalIndex{ - MainnetFlag: false, - RollupIndex: 15, - LeafIndex: 2, - }, - }, - }, - } - - globalIndexHashes := make([][]byte, len(c.ImportedBridgeExits)) - for i, importedBridgeExit := range c.ImportedBridgeExits { - globalIndexHashes[i] = importedBridgeExit.GlobalIndex.Hash().Bytes() - } - - expectedHash := crypto.Keccak256Hash( - c.NewLocalExitRoot[:], - crypto.Keccak256Hash(globalIndexHashes...).Bytes(), - ) - - certHash := c.HashToSign() - require.Equal(t, expectedHash, certHash) -} - -func TestClaimFromMainnnet_MarshalJSON(t *testing.T) { - t.Parallel() - - // Test data - merkleProof := &MerkleProof{ - Root: common.HexToHash("0x1"), - Proof: [types.DefaultHeight]common.Hash{ - common.HexToHash("0x2"), - common.HexToHash("0x3"), - }, - } - - l1InfoTreeLeaf := &L1InfoTreeLeaf{ - L1InfoTreeIndex: 42, - RollupExitRoot: [common.HashLength]byte{0xaa, 0xbb, 0xcc}, - MainnetExitRoot: [common.HashLength]byte{0xdd, 0xee, 0xff}, - Inner: &L1InfoTreeLeafInner{ - GlobalExitRoot: common.HexToHash("0x1"), - BlockHash: common.HexToHash("0x2"), - Timestamp: 1672531200, // Example timestamp - }, - } - - claim := &ClaimFromMainnnet{ - ProofLeafMER: merkleProof, - ProofGERToL1Root: merkleProof, - L1Leaf: l1InfoTreeLeaf, - } - - // Marshal the ClaimFromMainnnet struct to JSON - expectedJSON, err := claim.MarshalJSON() - require.NoError(t, err) - - var actualClaim ClaimFromMainnnet - err = json.Unmarshal(expectedJSON, &actualClaim) - require.NoError(t, err) -} - -func TestBridgeExit_String(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - bridgeExit *BridgeExit - expectedOutput string - }{ - { - name: "With TokenInfo", - bridgeExit: &BridgeExit{ - LeafType: LeafTypeAsset, - TokenInfo: createDummyTokenInfo(t), - DestinationNetwork: 100, - DestinationAddress: common.HexToAddress("0x2"), - Amount: big.NewInt(1000), - Metadata: []byte{0x01, 0x02, 0x03}, - }, - expectedOutput: "LeafType: Transfer, DestinationNetwork: 100, DestinationAddress: 0x0000000000000000000000000000000000000002, Amount: 1000, Metadata: 010203, TokenInfo: OriginNetwork: 1, OriginTokenAddress: 0x0000000000000000000000000000000000002345", - }, - { - name: "Without TokenInfo", - bridgeExit: &BridgeExit{ - LeafType: LeafTypeMessage, - DestinationNetwork: 200, - DestinationAddress: common.HexToAddress("0x1"), - Amount: big.NewInt(5000), - Metadata: []byte{0xff, 0xee, 0xdd}, - }, - expectedOutput: "LeafType: Message, DestinationNetwork: 200, DestinationAddress: 0x0000000000000000000000000000000000000001, Amount: 5000, Metadata: ffeedd, TokenInfo: nil", - }, - } - - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - actualOutput := tt.bridgeExit.String() - require.Equal(t, tt.expectedOutput, actualOutput) - }) - } -} - -func TestCertificateStatus_UnmarshalJSON(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - input string - expected CertificateStatus - expectError bool - }{ - { - name: "Valid status - Pending", - input: `"Pending"`, - expected: Pending, - expectError: false, - }, - { - name: "Valid status - Proven", - input: `"Proven"`, - expected: Proven, - expectError: false, - }, - { - name: "Valid status - Candidate", - input: `"Candidate"`, - expected: Candidate, - expectError: false, - }, - { - name: "Valid status - InError", - input: `"InError"`, - expected: InError, - expectError: false, - }, - { - name: "Valid status - Settled", - input: `"Settled"`, - expected: Settled, - expectError: false, - }, - { - name: "Invalid status", - input: `"InvalidStatus"`, - expected: 0, // Unchanged (default value of CertificateStatus) - expectError: true, - }, - { - name: "Contains 'InError' string", - input: `"SomeStringWithInError"`, - expected: InError, - expectError: false, - }, - } - - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - var status CertificateStatus - err := json.Unmarshal([]byte(tt.input), &status) - - if tt.expectError { - require.Error(t, err) - } else { - require.NoError(t, err) - require.Equal(t, tt.expected, status) - } - }) - } -} - -func TestMerkleProof_String(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - proof MerkleProof - expected string - }{ - { - name: "Empty Root and Empty Proof", - proof: MerkleProof{ - Root: common.Hash{}, - Proof: [types.DefaultHeight]common.Hash{}, - }, - expected: fmt.Sprintf("Root: %s, Proof: %v", common.Hash{}.String(), [types.DefaultHeight]common.Hash{}), - }, - { - name: "Non-Empty Root and Empty Proof", - proof: MerkleProof{ - Root: common.HexToHash("0xabc123"), - Proof: [types.DefaultHeight]common.Hash{}, - }, - expected: fmt.Sprintf("Root: %s, Proof: %v", common.HexToHash("0xabc123").String(), [types.DefaultHeight]common.Hash{}), - }, - { - name: "Non-Empty Root and Partially Populated Proof", - proof: MerkleProof{ - Root: common.HexToHash("0xabc123"), - Proof: [types.DefaultHeight]common.Hash{ - common.HexToHash("0xdef456"), - common.HexToHash("0x123789"), - }, - }, - expected: fmt.Sprintf("Root: %s, Proof: %v", - common.HexToHash("0xabc123").String(), - [types.DefaultHeight]common.Hash{ - common.HexToHash("0xdef456"), - common.HexToHash("0x123789"), - }), - }, - } - - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - result := tt.proof.String() - require.Equal(t, tt.expected, result) - }) - } -} - -func TestGlobalIndexString(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - input GlobalIndex - expected string - }{ - { - name: "All fields zero", - input: GlobalIndex{ - MainnetFlag: false, - RollupIndex: 0, - LeafIndex: 0, - }, - expected: "MainnetFlag: false, RollupIndex: 0, LeafIndex: 0", - }, - { - name: "MainnetFlag true, non-zero indices", - input: GlobalIndex{ - MainnetFlag: true, - RollupIndex: 123, - LeafIndex: 456, - }, - expected: "MainnetFlag: true, RollupIndex: 123, LeafIndex: 456", - }, - { - name: "MainnetFlag false, large indices", - input: GlobalIndex{ - MainnetFlag: false, - RollupIndex: 4294967295, // Maximum value of uint32 - LeafIndex: 2147483647, // Large but within uint32 range - }, - expected: "MainnetFlag: false, RollupIndex: 4294967295, LeafIndex: 2147483647", - }, - } - - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - result := tt.input.String() - require.Equal(t, tt.expected, result) - }) - } -} - -func TestL1InfoTreeLeafString(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - input L1InfoTreeLeaf - expected string - }{ - { - name: "With valid Inner", - input: L1InfoTreeLeaf{ - L1InfoTreeIndex: 1, - RollupExitRoot: common.HexToHash("0x01"), - MainnetExitRoot: common.HexToHash("0x02"), - Inner: &L1InfoTreeLeafInner{ - GlobalExitRoot: common.HexToHash("0x03"), - BlockHash: common.HexToHash("0x04"), - Timestamp: 1234567890, - }, - }, - expected: "L1InfoTreeIndex: 1, RollupExitRoot: 0x0000000000000000000000000000000000000000000000000000000000000001, " + - "MainnetExitRoot: 0x0000000000000000000000000000000000000000000000000000000000000002, " + - "Inner: GlobalExitRoot: 0x0000000000000000000000000000000000000000000000000000000000000003, " + - "BlockHash: 0x0000000000000000000000000000000000000000000000000000000000000004, Timestamp: 1234567890", - }, - } - - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - result := tt.input.String() - require.Equal(t, tt.expected, result) - }) - } -} - -func TestClaimType(t *testing.T) { - t.Parallel() - - cases := []struct { - name string - claim Claim - expectedType string - }{ - { - name: "Mainnet claim", - claim: &ClaimFromMainnnet{}, - expectedType: "Mainnet", - }, - { - name: "Rollup claim", - claim: &ClaimFromRollup{}, - expectedType: "Rollup", - }, - } - - for _, c := range cases { - c := c - - t.Run(c.name, func(t *testing.T) { - t.Parallel() - - actualType := c.claim.Type() - require.Equal(t, c.expectedType, actualType) - }) - } -} - -func Test_ProblematicBridgeExitHash(t *testing.T) { - bridgeExit := &BridgeExit{ - LeafType: LeafTypeAsset, - TokenInfo: &TokenInfo{ - OriginNetwork: 0, - OriginTokenAddress: common.HexToAddress("0x0000000000000000000000000000000000000000"), - }, - DestinationNetwork: 1, - DestinationAddress: common.HexToAddress("0xc949254d682d8c9ad5682521675b8f43b102aec4"), - Amount: new(big.Int).SetUint64(10000000000000000000), - IsMetadataHashed: false, - } - - require.Equal(t, "0x22ed288677b4c2afd83a6d7d55f7df7f4eaaf60f7310210c030fd27adacbc5e0", bridgeExit.Hash().Hex()) -} - -func Test_UnmarshalCertificate(t *testing.T) { - var cert SignedCertificate - err := json.Unmarshal([]byte(fullCertificateJSON), &cert) - require.NoError(t, err) - marshalData, err := json.Marshal(cert) - require.NoError(t, err) - require.JSONEq(t, fullCertificateJSON, string(marshalData)) -} - -func Test_UnmarshalImportedBridgeExit(t *testing.T) { - cases := []struct { - name string - importedBridge ImportedBridgeExit - }{ - { - name: "Empty", - importedBridge: ImportedBridgeExit{}, - }, - { - name: "Empty/BridgeExit", - importedBridge: ImportedBridgeExit{ - BridgeExit: &BridgeExit{}, - }, - }, - { - name: "Empty/GlobalIndex", - importedBridge: ImportedBridgeExit{ - GlobalIndex: &GlobalIndex{}, - }, - }, - { - name: "Empty/ClaimFromRollup", - importedBridge: ImportedBridgeExit{ - GlobalIndex: &GlobalIndex{}, - ClaimData: &ClaimFromRollup{}, - }, - }, - { - name: "Empty/ClaimFromMainnnet", - importedBridge: ImportedBridgeExit{ - ClaimData: &ClaimFromMainnnet{ - ProofLeafMER: &MerkleProof{ - Root: common.HexToHash("0x1"), - Proof: [types.DefaultHeight]common.Hash{common.HexToHash("0x2"), common.HexToHash("0x3")}, - }, - ProofGERToL1Root: &MerkleProof{}, - L1Leaf: &L1InfoTreeLeaf{}, - }, - }, - }, - - { - name: "Mainnet claim", - importedBridge: ImportedBridgeExit{ - BridgeExit: &BridgeExit{ - LeafType: LeafTypeAsset, - TokenInfo: &TokenInfo{ - OriginNetwork: 0, - OriginTokenAddress: common.HexToAddress("0x1234"), - }, - DestinationNetwork: 1, - DestinationAddress: common.HexToAddress("0x1234"), - Amount: big.NewInt(1000), - IsMetadataHashed: false, - Metadata: []byte{0x01, 0x02, 0x03}, - }, - ClaimData: &ClaimFromMainnnet{}, - GlobalIndex: &GlobalIndex{ - MainnetFlag: true, - RollupIndex: 1, - LeafIndex: 2, - }, - }, - }, - } - for _, tt := range cases { - t.Run(tt.name, func(t *testing.T) { - ser, err := json.Marshal(tt.importedBridge) - require.NoError(t, err) - unmarshallBridge := &ImportedBridgeExit{} - err = json.Unmarshal(ser, unmarshallBridge) - require.NoError(t, err) - require.Equal(t, tt.importedBridge, *unmarshallBridge) - }) - } -} - -func Test_UnmarshalMerkleProof(t *testing.T) { - mp := &MerkleProof{ - Root: common.HexToHash("0x1"), - Proof: [types.DefaultHeight]common.Hash{common.HexToHash("0x2"), common.HexToHash("0x3")}, - } - ser, err := json.Marshal(mp) - require.NoError(t, err) - unmarshallMp := &MerkleProof{} - err = json.Unmarshal(ser, unmarshallMp) - require.NoError(t, err) - require.Equal(t, mp, unmarshallMp) -} - -func Test_UnmarshalL1InfoTreeLeaf(t *testing.T) { - data := L1InfoTreeLeaf{ - L1InfoTreeIndex: 1, - RollupExitRoot: [common.HashLength]byte{0xaa, 0xbb, 0xcc}, - MainnetExitRoot: [common.HashLength]byte{0xdd, 0xee, 0xff}, - Inner: &L1InfoTreeLeafInner{ - GlobalExitRoot: common.HexToHash("0x1"), - BlockHash: common.HexToHash("0x2"), - Timestamp: 1672531200, // Example timestamp - }, - } - ser, err := json.Marshal(data) - require.NoError(t, err) - unmarshalled := &L1InfoTreeLeaf{} - err = json.Unmarshal(ser, unmarshalled) - require.NoError(t, err) - require.Equal(t, data, *unmarshalled) -} - -func Test_UnmarshalBridgeExit(t *testing.T) { - cases := []struct { - name string - data *BridgeExit - }{ - { - name: "metadataHashed", - data: &BridgeExit{ - LeafType: LeafTypeAsset, - IsMetadataHashed: true, - Metadata: []byte{0x01, 0x02, 0x03}, - }, - }, - { - name: "metadata no hashed", - data: &BridgeExit{ - LeafType: LeafTypeAsset, - IsMetadataHashed: false, - Metadata: []byte{0x01, 0x02, 0x03}, - }, - }, - } - - for _, tt := range cases { - t.Run(tt.name, func(t *testing.T) { - ser, err := json.Marshal(tt.data) - require.NoError(t, err) - unmarshalled := &BridgeExit{} - err = json.Unmarshal(ser, unmarshalled) - require.NoError(t, err) - require.Equal(t, tt.data, unmarshalled) - }) - } -} - -func Test_UnmarshalClaimFromMainnnet(t *testing.T) { - claim := &ClaimFromMainnnet{ - ProofLeafMER: &MerkleProof{ - Root: common.HexToHash("0x1"), - Proof: [types.DefaultHeight]common.Hash{common.HexToHash("0x2"), common.HexToHash("0x3")}, - }, - ProofGERToL1Root: &MerkleProof{}, - L1Leaf: &L1InfoTreeLeaf{}, - } - ser, err := json.Marshal(claim) - require.NoError(t, err) - unmarshalled := &ClaimFromMainnnet{} - err = json.Unmarshal(ser, unmarshalled) - require.NoError(t, err) - require.Equal(t, claim, unmarshalled) -} - -func Test_UnmarshalClaimFromRollup(t *testing.T) { - claim := &ClaimFromRollup{ - ProofLeafLER: &MerkleProof{ - Root: common.HexToHash("0x1"), - Proof: [types.DefaultHeight]common.Hash{common.HexToHash("0x2"), common.HexToHash("0x3")}, - }, - ProofLERToRER: &MerkleProof{ - Root: common.HexToHash("0x4"), - }, - ProofGERToL1Root: &MerkleProof{ - Root: common.HexToHash("0x5"), - }, - - L1Leaf: &L1InfoTreeLeaf{ - L1InfoTreeIndex: 1, - }, - } - ser, err := json.Marshal(claim) - require.NoError(t, err) - unmarshalled := &ClaimFromRollup{} - err = json.Unmarshal(ser, unmarshalled) - require.NoError(t, err) - require.Equal(t, claim, unmarshalled) -} diff --git a/aggsender/aggsender.go b/aggsender/aggsender.go deleted file mode 100644 index 133fb4fa3..000000000 --- a/aggsender/aggsender.go +++ /dev/null @@ -1,882 +0,0 @@ -package aggsender - -import ( - "context" - "crypto/ecdsa" - "encoding/json" - "errors" - "fmt" - "os" - "time" - - zkevm "github.com/0xPolygon/cdk" - jRPC "github.com/0xPolygon/cdk-rpc/rpc" - "github.com/0xPolygon/cdk/agglayer" - "github.com/0xPolygon/cdk/aggsender/db" - aggsenderrpc "github.com/0xPolygon/cdk/aggsender/rpc" - "github.com/0xPolygon/cdk/aggsender/types" - "github.com/0xPolygon/cdk/bridgesync" - cdkcommon "github.com/0xPolygon/cdk/common" - "github.com/0xPolygon/cdk/l1infotreesync" - "github.com/0xPolygon/cdk/log" - "github.com/0xPolygon/cdk/tree" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" -) - -const signatureSize = 65 - -var ( - errNoBridgesAndClaims = errors.New("no bridges and claims to build certificate") - errInvalidSignatureSize = errors.New("invalid signature size") - - zeroLER = common.HexToHash("0x27ae5ba08d7291c96c8cbddcc148bf48a6d68c7974b94356f53754ef6171d757") -) - -// AggSender is a component that will send certificates to the aggLayer -type AggSender struct { - log types.Logger - - l2Syncer types.L2BridgeSyncer - l1infoTreeSyncer types.L1InfoTreeSyncer - epochNotifier types.EpochNotifier - - storage db.AggSenderStorage - aggLayerClient agglayer.AgglayerClientInterface - - cfg Config - - sequencerKey *ecdsa.PrivateKey - - status types.AggsenderStatus -} - -// New returns a new AggSender -func New( - ctx context.Context, - logger *log.Logger, - cfg Config, - aggLayerClient agglayer.AgglayerClientInterface, - l1InfoTreeSyncer *l1infotreesync.L1InfoTreeSync, - l2Syncer types.L2BridgeSyncer, - epochNotifier types.EpochNotifier) (*AggSender, error) { - storageConfig := db.AggSenderSQLStorageConfig{ - DBPath: cfg.StoragePath, - KeepCertificatesHistory: cfg.KeepCertificatesHistory, - } - storage, err := db.NewAggSenderSQLStorage(logger, storageConfig) - if err != nil { - return nil, err - } - - sequencerPrivateKey, err := cdkcommon.NewKeyFromKeystore(cfg.AggsenderPrivateKey) - if err != nil { - return nil, err - } - - logger.Infof("Aggsender Config: %s.", cfg.String()) - - return &AggSender{ - cfg: cfg, - log: logger, - storage: storage, - l2Syncer: l2Syncer, - aggLayerClient: aggLayerClient, - l1infoTreeSyncer: l1InfoTreeSyncer, - sequencerKey: sequencerPrivateKey, - epochNotifier: epochNotifier, - status: types.AggsenderStatus{Status: types.StatusNone}, - }, nil -} - -func (a *AggSender) Info() types.AggsenderInfo { - res := types.AggsenderInfo{ - AggsenderStatus: a.status, - Version: zkevm.GetVersion(), - EpochNotifierDescription: a.epochNotifier.String(), - NetworkID: a.l2Syncer.OriginNetwork(), - } - return res -} - -// GetRPCServices returns the list of services that the RPC provider exposes -func (a *AggSender) GetRPCServices() []jRPC.Service { - if !a.cfg.EnableRPC { - return []jRPC.Service{} - } - - logger := log.WithFields("aggsender-rpc", cdkcommon.BRIDGE) - return []jRPC.Service{ - { - Name: "aggsender", - Service: aggsenderrpc.NewAggsenderRPC(logger, a.storage, a), - }, - } -} - -// Start starts the AggSender -func (a *AggSender) Start(ctx context.Context) { - a.log.Info("AggSender started") - a.status.Start(time.Now().UTC()) - a.checkInitialStatus(ctx) - a.sendCertificates(ctx) -} - -// checkInitialStatus check local status vs agglayer status -func (a *AggSender) checkInitialStatus(ctx context.Context) { - ticker := time.NewTicker(a.cfg.DelayBeetweenRetries.Duration) - defer ticker.Stop() - a.status.Status = types.StatusCheckingInitialStage - for { - err := a.checkLastCertificateFromAgglayer(ctx) - a.status.SetLastError(err) - if err != nil { - a.log.Errorf("error checking initial status: %w, retrying in %s", err, a.cfg.DelayBeetweenRetries.String()) - } else { - a.log.Info("Initial status checked successfully") - return - } - select { - case <-ctx.Done(): - return - case <-ticker.C: - } - } -} - -// sendCertificates sends certificates to the aggLayer -func (a *AggSender) sendCertificates(ctx context.Context) { - chEpoch := a.epochNotifier.Subscribe("aggsender") - a.status.Status = types.StatusCertificateStage - for { - select { - case epoch := <-chEpoch: - a.log.Infof("Epoch received: %s", epoch.String()) - thereArePendingCerts := a.checkPendingCertificatesStatus(ctx) - if !thereArePendingCerts { - _, err := a.sendCertificate(ctx) - a.status.SetLastError(err) - if err != nil { - a.log.Error(err) - } - } else { - log.Infof("Skipping epoch %s because there are pending certificates", - epoch.String()) - } - case <-ctx.Done(): - a.log.Info("AggSender stopped") - return - } - } -} - -// sendCertificate sends certificate for a network -func (a *AggSender) sendCertificate(ctx context.Context) (*agglayer.SignedCertificate, error) { - a.log.Infof("trying to send a new certificate...") - - shouldSend, err := a.shouldSendCertificate() - if err != nil { - return nil, err - } - - if !shouldSend { - a.log.Infof("waiting for pending certificates to be settled") - return nil, nil - } - - lastL2BlockSynced, err := a.l2Syncer.GetLastProcessedBlock(ctx) - if err != nil { - return nil, fmt.Errorf("error getting last processed block from l2: %w", err) - } - - lastSentCertificateInfo, err := a.storage.GetLastSentCertificate() - if err != nil { - return nil, err - } - - previousToBlock, retryCount := getLastSentBlockAndRetryCount(lastSentCertificateInfo) - - if previousToBlock >= lastL2BlockSynced { - a.log.Infof("no new blocks to send a certificate, last certificate block: %d, last L2 block: %d", - previousToBlock, lastL2BlockSynced) - return nil, nil - } - - fromBlock := previousToBlock + 1 - toBlock := lastL2BlockSynced - - bridges, err := a.l2Syncer.GetBridgesPublished(ctx, fromBlock, toBlock) - if err != nil { - return nil, fmt.Errorf("error getting bridges: %w", err) - } - - if len(bridges) == 0 { - a.log.Infof("no bridges consumed, no need to send a certificate from block: %d to block: %d", fromBlock, toBlock) - return nil, nil - } - - claims, err := a.l2Syncer.GetClaims(ctx, fromBlock, toBlock) - if err != nil { - return nil, fmt.Errorf("error getting claims: %w", err) - } - certificateParams := &types.CertificateBuildParams{ - FromBlock: fromBlock, - ToBlock: toBlock, - Bridges: bridges, - Claims: claims, - CreatedAt: uint32(time.Now().UTC().Unix()), - } - - certificateParams, err = a.limitCertSize(certificateParams) - if err != nil { - return nil, fmt.Errorf("error limitCertSize: %w", err) - } - a.log.Infof("building certificate for %s estimatedSize=%d", - certificateParams.String(), certificateParams.EstimatedSize()) - - certificate, err := a.buildCertificate(ctx, certificateParams, lastSentCertificateInfo) - if err != nil { - return nil, fmt.Errorf("error building certificate: %w", err) - } - - signedCertificate, err := a.signCertificate(certificate) - if err != nil { - return nil, fmt.Errorf("error signing certificate: %w", err) - } - - a.saveCertificateToFile(signedCertificate) - a.log.Infof("certificate ready to be send to AggLayer: %s", signedCertificate.Brief()) - if a.cfg.DryRun { - a.log.Warn("dry run mode enabled, skipping sending certificate") - return signedCertificate, nil - } - certificateHash, err := a.aggLayerClient.SendCertificate(signedCertificate) - if err != nil { - return nil, fmt.Errorf("error sending certificate: %w", err) - } - - a.log.Debugf("certificate send: Height: %d cert: %s", signedCertificate.Height, signedCertificate.Brief()) - - raw, err := json.Marshal(signedCertificate) - if err != nil { - return nil, fmt.Errorf("error marshalling signed certificate. Cert:%s. Err: %w", signedCertificate.Brief(), err) - } - - prevLER := common.BytesToHash(certificate.PrevLocalExitRoot[:]) - certInfo := types.CertificateInfo{ - Height: certificate.Height, - RetryCount: retryCount, - CertificateID: certificateHash, - NewLocalExitRoot: certificate.NewLocalExitRoot, - PreviousLocalExitRoot: &prevLER, - FromBlock: fromBlock, - ToBlock: toBlock, - CreatedAt: certificateParams.CreatedAt, - UpdatedAt: certificateParams.CreatedAt, - SignedCertificate: string(raw), - } - // TODO: Improve this case, if a cert is not save in the storage, we are going to settle a unknown certificate - err = a.saveCertificateToStorage(ctx, certInfo, a.cfg.MaxRetriesStoreCertificate) - if err != nil { - a.log.Errorf("error saving certificate to storage. Cert:%s Err: %w", certInfo.String(), err) - return nil, fmt.Errorf("error saving last sent certificate %s in db: %w", certInfo.String(), err) - } - - a.log.Infof("certificate: %s sent successfully for range of l2 blocks (from block: %d, to block: %d) cert:%s", - certInfo.ID(), fromBlock, toBlock, signedCertificate.Brief()) - - return signedCertificate, nil -} - -// saveCertificateToStorage saves the certificate to the storage -// it retries if it fails. if param retries == 0 it retries indefinitely -func (a *AggSender) saveCertificateToStorage(ctx context.Context, cert types.CertificateInfo, maxRetries int) error { - retries := 1 - err := fmt.Errorf("initial_error") - for err != nil { - if err = a.storage.SaveLastSentCertificate(ctx, cert); err != nil { - // If this happens we can't work as normal, because local DB is outdated, we have to retry - a.log.Errorf("error saving last sent certificate %s in db: %w", cert.String(), err) - if retries == maxRetries { - return fmt.Errorf("error saving last sent certificate %s in db: %w", cert.String(), err) - } else { - retries++ - time.Sleep(a.cfg.DelayBeetweenRetries.Duration) - } - } - } - return nil -} - -func (a *AggSender) limitCertSize(fullCert *types.CertificateBuildParams) (*types.CertificateBuildParams, error) { - currentCert := fullCert - var previousCert *types.CertificateBuildParams - var err error - for { - if currentCert.NumberOfBridges() == 0 { - // We can't reduce more the certificate, so this is the minium size - a.log.Warnf("We reach the minium size of bridge.Certificate size: %d >max size: %d", - previousCert.EstimatedSize(), a.cfg.MaxCertSize) - return previousCert, nil - } - - if a.cfg.MaxCertSize == 0 || currentCert.EstimatedSize() < a.cfg.MaxCertSize { - return currentCert, nil - } - - // Minimum size of the certificate - if currentCert.NumberOfBlocks() <= 1 { - a.log.Warnf("reach the minium num blocks [%d to %d].Certificate size: %d >max size: %d", - currentCert.FromBlock, currentCert.ToBlock, currentCert.EstimatedSize(), a.cfg.MaxCertSize) - return currentCert, nil - } - previousCert = currentCert - currentCert, err = currentCert.Range(currentCert.FromBlock, currentCert.ToBlock-1) - if err != nil { - return nil, fmt.Errorf("error reducing certificate: %w", err) - } - } -} - -// saveCertificate saves the certificate to a tmp file -func (a *AggSender) saveCertificateToFile(signedCertificate *agglayer.SignedCertificate) { - if signedCertificate == nil || a.cfg.SaveCertificatesToFilesPath == "" { - return - } - fn := fmt.Sprintf("%s/certificate_%04d-%07d.json", - a.cfg.SaveCertificatesToFilesPath, signedCertificate.Height, time.Now().Unix()) - a.log.Infof("saving certificate to file: %s", fn) - jsonData, err := json.MarshalIndent(signedCertificate, "", " ") - if err != nil { - a.log.Errorf("error marshalling certificate: %w", err) - } - - if err = os.WriteFile(fn, jsonData, 0644); err != nil { //nolint:gosec,mnd // we are writing to a tmp file - a.log.Errorf("error writing certificate to file: %w", err) - } -} - -// getNextHeightAndPreviousLER returns the height and previous LER for the new certificate -func (a *AggSender) getNextHeightAndPreviousLER( - lastSentCertificateInfo *types.CertificateInfo) (uint64, common.Hash, error) { - if lastSentCertificateInfo == nil { - return 0, zeroLER, nil - } - if !lastSentCertificateInfo.Status.IsClosed() { - return 0, zeroLER, fmt.Errorf("last certificate %s is not closed (status: %s)", - lastSentCertificateInfo.ID(), lastSentCertificateInfo.Status.String()) - } - if lastSentCertificateInfo.Status.IsSettled() { - return lastSentCertificateInfo.Height + 1, lastSentCertificateInfo.NewLocalExitRoot, nil - } - - if lastSentCertificateInfo.Status.IsInError() { - // We can reuse last one of lastCert? - if lastSentCertificateInfo.PreviousLocalExitRoot != nil { - return lastSentCertificateInfo.Height, *lastSentCertificateInfo.PreviousLocalExitRoot, nil - } - // Is the first one, so we can set the zeroLER - if lastSentCertificateInfo.Height == 0 { - return 0, zeroLER, nil - } - // We get previous certificate that must be settled - a.log.Debugf("last certificate %s is in error, getting previous settled certificate height:%d", - lastSentCertificateInfo.Height-1) - lastSettleCert, err := a.storage.GetCertificateByHeight(lastSentCertificateInfo.Height - 1) - if err != nil { - return 0, common.Hash{}, fmt.Errorf("error getting last settled certificate: %w", err) - } - if lastSettleCert == nil { - return 0, common.Hash{}, fmt.Errorf("none settled certificate: %w", err) - } - if !lastSettleCert.Status.IsSettled() { - return 0, common.Hash{}, fmt.Errorf("last settled certificate %s is not settled (status: %s)", - lastSettleCert.ID(), lastSettleCert.Status.String()) - } - - return lastSentCertificateInfo.Height, lastSettleCert.NewLocalExitRoot, nil - } - return 0, zeroLER, fmt.Errorf("last certificate %s has an unknown status: %s", - lastSentCertificateInfo.ID(), lastSentCertificateInfo.Status.String()) -} - -// buildCertificate builds a certificate from the bridge events -func (a *AggSender) buildCertificate(ctx context.Context, - certParams *types.CertificateBuildParams, - lastSentCertificateInfo *types.CertificateInfo) (*agglayer.Certificate, error) { - if certParams.IsEmpty() { - return nil, errNoBridgesAndClaims - } - - bridgeExits := a.getBridgeExits(certParams.Bridges) - importedBridgeExits, err := a.getImportedBridgeExits(ctx, certParams.Claims) - if err != nil { - return nil, fmt.Errorf("error getting imported bridge exits: %w", err) - } - - depositCount := certParams.MaxDepositCount() - - exitRoot, err := a.l2Syncer.GetExitRootByIndex(ctx, depositCount) - if err != nil { - return nil, fmt.Errorf("error getting exit root by index: %d. Error: %w", depositCount, err) - } - - height, previousLER, err := a.getNextHeightAndPreviousLER(lastSentCertificateInfo) - if err != nil { - return nil, fmt.Errorf("error getting next height and previous LER: %w", err) - } - - meta := types.NewCertificateMetadata( - certParams.FromBlock, - uint32(certParams.ToBlock-certParams.FromBlock), - certParams.CreatedAt, - ) - - return &agglayer.Certificate{ - NetworkID: a.l2Syncer.OriginNetwork(), - PrevLocalExitRoot: previousLER, - NewLocalExitRoot: exitRoot.Hash, - BridgeExits: bridgeExits, - ImportedBridgeExits: importedBridgeExits, - Height: height, - Metadata: meta.ToHash(), - }, nil -} - -// createCertificateMetadata creates the metadata for the certificate -// it returns: newMetadata + bool if the metadata is hashed or not -func convertBridgeMetadata(metadata []byte, importedBridgeMetadataAsHash bool) ([]byte, bool) { - var metaData []byte - var isMetadataHashed bool - if importedBridgeMetadataAsHash && len(metadata) > 0 { - metaData = crypto.Keccak256(metadata) - isMetadataHashed = true - } else { - metaData = metadata - isMetadataHashed = false - } - return metaData, isMetadataHashed -} - -// convertClaimToImportedBridgeExit converts a claim to an ImportedBridgeExit object -func (a *AggSender) convertClaimToImportedBridgeExit(claim bridgesync.Claim) (*agglayer.ImportedBridgeExit, error) { - leafType := agglayer.LeafTypeAsset - if claim.IsMessage { - leafType = agglayer.LeafTypeMessage - } - metaData, isMetadataIsHashed := convertBridgeMetadata(claim.Metadata, a.cfg.BridgeMetadataAsHash) - - bridgeExit := &agglayer.BridgeExit{ - LeafType: leafType, - TokenInfo: &agglayer.TokenInfo{ - OriginNetwork: claim.OriginNetwork, - OriginTokenAddress: claim.OriginAddress, - }, - DestinationNetwork: claim.DestinationNetwork, - DestinationAddress: claim.DestinationAddress, - Amount: claim.Amount, - IsMetadataHashed: isMetadataIsHashed, - Metadata: metaData, - } - - mainnetFlag, rollupIndex, leafIndex, err := bridgesync.DecodeGlobalIndex(claim.GlobalIndex) - if err != nil { - return nil, fmt.Errorf("error decoding global index: %w", err) - } - - return &agglayer.ImportedBridgeExit{ - BridgeExit: bridgeExit, - GlobalIndex: &agglayer.GlobalIndex{ - MainnetFlag: mainnetFlag, - RollupIndex: rollupIndex, - LeafIndex: leafIndex, - }, - }, nil -} - -// getBridgeExits converts bridges to agglayer.BridgeExit objects -func (a *AggSender) getBridgeExits(bridges []bridgesync.Bridge) []*agglayer.BridgeExit { - bridgeExits := make([]*agglayer.BridgeExit, 0, len(bridges)) - - for _, bridge := range bridges { - metaData, isMetadataHashed := convertBridgeMetadata(bridge.Metadata, a.cfg.BridgeMetadataAsHash) - bridgeExits = append(bridgeExits, &agglayer.BridgeExit{ - LeafType: agglayer.LeafType(bridge.LeafType), - TokenInfo: &agglayer.TokenInfo{ - OriginNetwork: bridge.OriginNetwork, - OriginTokenAddress: bridge.OriginAddress, - }, - DestinationNetwork: bridge.DestinationNetwork, - DestinationAddress: bridge.DestinationAddress, - Amount: bridge.Amount, - IsMetadataHashed: isMetadataHashed, - Metadata: metaData, - }) - } - - return bridgeExits -} - -// getImportedBridgeExits converts claims to agglayer.ImportedBridgeExit objects and calculates necessary proofs -func (a *AggSender) getImportedBridgeExits( - ctx context.Context, claims []bridgesync.Claim, -) ([]*agglayer.ImportedBridgeExit, error) { - if len(claims) == 0 { - // no claims to convert - return []*agglayer.ImportedBridgeExit{}, nil - } - - var ( - greatestL1InfoTreeIndexUsed uint32 - importedBridgeExits = make([]*agglayer.ImportedBridgeExit, 0, len(claims)) - claimL1Info = make([]*l1infotreesync.L1InfoTreeLeaf, 0, len(claims)) - ) - - for _, claim := range claims { - info, err := a.l1infoTreeSyncer.GetInfoByGlobalExitRoot(claim.GlobalExitRoot) - if err != nil { - return nil, fmt.Errorf("error getting info by global exit root: %w", err) - } - - claimL1Info = append(claimL1Info, info) - - if info.L1InfoTreeIndex > greatestL1InfoTreeIndexUsed { - greatestL1InfoTreeIndexUsed = info.L1InfoTreeIndex - } - } - - rootToProve, err := a.l1infoTreeSyncer.GetL1InfoTreeRootByIndex(ctx, greatestL1InfoTreeIndexUsed) - if err != nil { - return nil, fmt.Errorf("error getting L1 Info tree root by index: %d. Error: %w", greatestL1InfoTreeIndexUsed, err) - } - - for i, claim := range claims { - l1Info := claimL1Info[i] - - a.log.Debugf("claim[%d]: destAddr: %s GER: %s Block: %d Pos: %d GlobalIndex: 0x%x", - i, claim.DestinationAddress.String(), claim.GlobalExitRoot.String(), - claim.BlockNum, claim.BlockPos, claim.GlobalIndex) - ibe, err := a.convertClaimToImportedBridgeExit(claim) - if err != nil { - return nil, fmt.Errorf("error converting claim to imported bridge exit: %w", err) - } - - importedBridgeExits = append(importedBridgeExits, ibe) - - gerToL1Proof, err := a.l1infoTreeSyncer.GetL1InfoTreeMerkleProofFromIndexToRoot( - ctx, l1Info.L1InfoTreeIndex, rootToProve.Hash, - ) - if err != nil { - return nil, fmt.Errorf( - "error getting L1 Info tree merkle proof for leaf index: %d and root: %s. Error: %w", - l1Info.L1InfoTreeIndex, rootToProve.Hash, err, - ) - } - - claim := claims[i] - if ibe.GlobalIndex.MainnetFlag { - ibe.ClaimData = &agglayer.ClaimFromMainnnet{ - L1Leaf: &agglayer.L1InfoTreeLeaf{ - L1InfoTreeIndex: l1Info.L1InfoTreeIndex, - RollupExitRoot: claim.RollupExitRoot, - MainnetExitRoot: claim.MainnetExitRoot, - Inner: &agglayer.L1InfoTreeLeafInner{ - GlobalExitRoot: l1Info.GlobalExitRoot, - Timestamp: l1Info.Timestamp, - BlockHash: l1Info.PreviousBlockHash, - }, - }, - ProofLeafMER: &agglayer.MerkleProof{ - Root: claim.MainnetExitRoot, - Proof: claim.ProofLocalExitRoot, - }, - ProofGERToL1Root: &agglayer.MerkleProof{ - Root: rootToProve.Hash, - Proof: gerToL1Proof, - }, - } - } else { - ibe.ClaimData = &agglayer.ClaimFromRollup{ - L1Leaf: &agglayer.L1InfoTreeLeaf{ - L1InfoTreeIndex: l1Info.L1InfoTreeIndex, - RollupExitRoot: claim.RollupExitRoot, - MainnetExitRoot: claim.MainnetExitRoot, - Inner: &agglayer.L1InfoTreeLeafInner{ - GlobalExitRoot: l1Info.GlobalExitRoot, - Timestamp: l1Info.Timestamp, - BlockHash: l1Info.PreviousBlockHash, - }, - }, - ProofLeafLER: &agglayer.MerkleProof{ - Root: tree.CalculateRoot(ibe.BridgeExit.Hash(), - claim.ProofLocalExitRoot, ibe.GlobalIndex.LeafIndex), - Proof: claim.ProofLocalExitRoot, - }, - ProofLERToRER: &agglayer.MerkleProof{ - Root: claim.RollupExitRoot, - Proof: claim.ProofRollupExitRoot, - }, - ProofGERToL1Root: &agglayer.MerkleProof{ - Root: rootToProve.Hash, - Proof: gerToL1Proof, - }, - } - } - } - - return importedBridgeExits, nil -} - -// signCertificate signs a certificate with the sequencer key -func (a *AggSender) signCertificate(certificate *agglayer.Certificate) (*agglayer.SignedCertificate, error) { - hashToSign := certificate.HashToSign() - - sig, err := crypto.Sign(hashToSign.Bytes(), a.sequencerKey) - if err != nil { - return nil, err - } - - a.log.Infof("Signed certificate. sequencer address: %s. New local exit root: %s Hash signed: %s", - crypto.PubkeyToAddress(a.sequencerKey.PublicKey).String(), - common.BytesToHash(certificate.NewLocalExitRoot[:]).String(), - hashToSign.String(), - ) - - r, s, isOddParity, err := extractSignatureData(sig) - if err != nil { - return nil, err - } - - return &agglayer.SignedCertificate{ - Certificate: certificate, - Signature: &agglayer.Signature{ - R: r, - S: s, - OddParity: isOddParity, - }, - }, nil -} - -// checkPendingCertificatesStatus checks the status of pending certificates -// and updates in the storage if it changed on agglayer -// It returns: -// bool -> if there are pending certificates -func (a *AggSender) checkPendingCertificatesStatus(ctx context.Context) bool { - pendingCertificates, err := a.storage.GetCertificatesByStatus(agglayer.NonSettledStatuses) - if err != nil { - a.log.Errorf("error getting pending certificates: %w", err) - return true - } - - a.log.Debugf("checkPendingCertificatesStatus num of pendingCertificates: %d", len(pendingCertificates)) - thereArePendingCerts := false - - for _, certificate := range pendingCertificates { - certificateHeader, err := a.aggLayerClient.GetCertificateHeader(certificate.CertificateID) - if err != nil { - a.log.Errorf("error getting certificate header of %s from agglayer: %w", - certificate.ID(), err) - return true - } - - a.log.Debugf("aggLayerClient.GetCertificateHeader status [%s] of certificate %s elapsed time:%s", - certificateHeader.Status, - certificateHeader.ID(), - certificate.ElapsedTimeSinceCreation()) - - if err := a.updateCertificateStatus(ctx, certificate, certificateHeader); err != nil { - a.log.Errorf("error updating certificate %s status in storage: %w", certificateHeader.String(), err) - return true - } - - if !certificate.IsClosed() { - a.log.Infof("certificate %s is still pending, elapsed time:%s ", - certificateHeader.ID(), certificate.ElapsedTimeSinceCreation()) - thereArePendingCerts = true - } - } - return thereArePendingCerts -} - -// updateCertificate updates the certificate status in the storage -func (a *AggSender) updateCertificateStatus(ctx context.Context, - localCert *types.CertificateInfo, - agglayerCert *agglayer.CertificateHeader) error { - if localCert.Status == agglayerCert.Status { - return nil - } - a.log.Infof("certificate %s changed status from [%s] to [%s] elapsed time: %s full_cert (agglayer): %s", - localCert.ID(), localCert.Status, agglayerCert.Status, localCert.ElapsedTimeSinceCreation(), - agglayerCert.String()) - - // That is a strange situation - if agglayerCert.Status.IsOpen() && localCert.Status.IsClosed() { - a.log.Warnf("certificate %s is reopened! from [%s] to [%s]", - localCert.ID(), localCert.Status, agglayerCert.Status) - } - - localCert.Status = agglayerCert.Status - localCert.UpdatedAt = uint32(time.Now().UTC().Unix()) - if err := a.storage.UpdateCertificate(ctx, *localCert); err != nil { - a.log.Errorf("error updating certificate %s status in storage: %w", agglayerCert.ID(), err) - return fmt.Errorf("error updating certificate. Err: %w", err) - } - return nil -} - -// shouldSendCertificate checks if a certificate should be sent at given time -// if we have pending certificates, then we wait until they are settled -func (a *AggSender) shouldSendCertificate() (bool, error) { - pendingCertificates, err := a.storage.GetCertificatesByStatus(agglayer.NonSettledStatuses) - if err != nil { - return false, fmt.Errorf("error getting pending certificates: %w", err) - } - - return len(pendingCertificates) == 0, nil -} - -// checkLastCertificateFromAgglayer checks the last certificate from agglayer -func (a *AggSender) checkLastCertificateFromAgglayer(ctx context.Context) error { - networkID := a.l2Syncer.OriginNetwork() - a.log.Infof("recovery: checking last certificate from AggLayer for network %d", networkID) - aggLayerLastCert, err := a.aggLayerClient.GetLatestKnownCertificateHeader(networkID) - if err != nil { - return fmt.Errorf("recovery: error getting latest known certificate header from agglayer: %w", err) - } - a.log.Infof("recovery: last certificate from AggLayer: %s", aggLayerLastCert.String()) - localLastCert, err := a.storage.GetLastSentCertificate() - if err != nil { - return fmt.Errorf("recovery: error getting last sent certificate from local storage: %w", err) - } - a.log.Infof("recovery: last certificate in storage: %s", localLastCert.String()) - - // CASE 1: No certificates in local storage and agglayer - if localLastCert == nil && aggLayerLastCert == nil { - a.log.Info("recovery: No certificates in local storage and agglayer: initial state") - return nil - } - // CASE 2: No certificates in local storage but agglayer has one - if localLastCert == nil && aggLayerLastCert != nil { - a.log.Info("recovery: No certificates in local storage but agglayer have one: recovery aggSender cert: %s", - aggLayerLastCert.String()) - if _, err := a.updateLocalStorageWithAggLayerCert(ctx, aggLayerLastCert); err != nil { - return fmt.Errorf("recovery: error updating local storage with agglayer certificate: %w", err) - } - return nil - } - // CASE 2.1: certificate in storage but not in agglayer - // this is a non-sense, so throw an error - if localLastCert != nil && aggLayerLastCert == nil { - return fmt.Errorf("recovery: certificate exists in storage but not in agglayer. Inconsistency") - } - // CASE 3.1: the certificate on the agglayer has less height than the one stored in the local storage - if aggLayerLastCert.Height < localLastCert.Height { - return fmt.Errorf("recovery: the last certificate in the agglayer has less height (%d) "+ - "than the one in the local storage (%d)", aggLayerLastCert.Height, localLastCert.Height) - } - // CASE 3.2: aggsender stopped between sending to agglayer and storing to the local storage - if aggLayerLastCert.Height == localLastCert.Height+1 { - a.log.Infof("recovery: AggLayer has the next cert (height: %d), so is a recovery case: storing cert: %s", - aggLayerLastCert.Height, aggLayerLastCert.String()) - // we need to store the certificate in the local storage. - localLastCert, err = a.updateLocalStorageWithAggLayerCert(ctx, aggLayerLastCert) - if err != nil { - log.Errorf("recovery: error updating certificate: %s, reason: %w", aggLayerLastCert.String(), err) - return fmt.Errorf("recovery: error updating certificate: %w", err) - } - } - // CASE 4: AggSender and AggLayer are not on the same page - // note: we don't need to check individual fields of the certificate - // because CertificateID is a hash of all the fields - if localLastCert.CertificateID != aggLayerLastCert.CertificateID { - a.log.Errorf("recovery: Local certificate:\n %s \n is different from agglayer certificate:\n %s", - localLastCert.String(), aggLayerLastCert.String()) - return fmt.Errorf("recovery: mismatch between local and agglayer certificates") - } - // CASE 5: AggSender and AggLayer are at same page - // just update status - err = a.updateCertificateStatus(ctx, localLastCert, aggLayerLastCert) - if err != nil { - a.log.Errorf("recovery: error updating status certificate: %s status: %w", aggLayerLastCert.String(), err) - return fmt.Errorf("recovery: error updating certificate status: %w", err) - } - - a.log.Infof("recovery: successfully checked last certificate from AggLayer for network %d", networkID) - return nil -} - -// updateLocalStorageWithAggLayerCert updates the local storage with the certificate from the AggLayer -func (a *AggSender) updateLocalStorageWithAggLayerCert(ctx context.Context, - aggLayerCert *agglayer.CertificateHeader) (*types.CertificateInfo, error) { - certInfo := NewCertificateInfoFromAgglayerCertHeader(aggLayerCert) - a.log.Infof("setting initial certificate from AggLayer: %s", certInfo.String()) - return certInfo, a.storage.SaveLastSentCertificate(ctx, *certInfo) -} - -// extractSignatureData extracts the R, S, and V from a 65-byte signature -func extractSignatureData(signature []byte) (r, s common.Hash, isOddParity bool, err error) { - if len(signature) != signatureSize { - err = errInvalidSignatureSize - return - } - - r = common.BytesToHash(signature[:32]) // First 32 bytes are R - s = common.BytesToHash(signature[32:64]) // Next 32 bytes are S - isOddParity = signature[64]%2 == 1 //nolint:mnd // Last byte is V - - return -} - -func NewCertificateInfoFromAgglayerCertHeader(c *agglayer.CertificateHeader) *types.CertificateInfo { - if c == nil { - return nil - } - now := uint32(time.Now().UTC().Unix()) - meta := types.NewCertificateMetadataFromHash(c.Metadata) - toBlock := meta.FromBlock + uint64(meta.Offset) - createdAt := meta.CreatedAt - - if meta.Version < 1 { - toBlock = meta.ToBlock - createdAt = now - } - - res := &types.CertificateInfo{ - Height: c.Height, - CertificateID: c.CertificateID, - NewLocalExitRoot: c.NewLocalExitRoot, - FromBlock: meta.FromBlock, - ToBlock: toBlock, - Status: c.Status, - CreatedAt: createdAt, - UpdatedAt: now, - SignedCertificate: "na/agglayer header", - } - if c.PreviousLocalExitRoot != nil { - res.PreviousLocalExitRoot = c.PreviousLocalExitRoot - } - return res -} - -// getLastSentBlockAndRetryCount returns the last sent block of the last sent certificate -// if there is no previosly sent certificate, it returns 0 and 0 -func getLastSentBlockAndRetryCount(lastSentCertificateInfo *types.CertificateInfo) (uint64, int) { - if lastSentCertificateInfo == nil { - return 0, 0 - } - - retryCount := 0 - lastSentBlock := lastSentCertificateInfo.ToBlock - - if lastSentCertificateInfo.Status == agglayer.InError { - // if the last certificate was in error, we need to resend it - // from the block before the error - if lastSentCertificateInfo.FromBlock > 0 { - lastSentBlock = lastSentCertificateInfo.FromBlock - 1 - } - - retryCount = lastSentCertificateInfo.RetryCount + 1 - } - - return lastSentBlock, retryCount -} diff --git a/aggsender/aggsender_test.go b/aggsender/aggsender_test.go deleted file mode 100644 index 02efccf17..000000000 --- a/aggsender/aggsender_test.go +++ /dev/null @@ -1,2151 +0,0 @@ -package aggsender - -import ( - "context" - "crypto/ecdsa" - "encoding/json" - "errors" - "fmt" - "math/big" - "os" - "path" - "testing" - "time" - - "github.com/0xPolygon/cdk/agglayer" - "github.com/0xPolygon/cdk/aggsender/db" - "github.com/0xPolygon/cdk/aggsender/mocks" - aggsendertypes "github.com/0xPolygon/cdk/aggsender/types" - "github.com/0xPolygon/cdk/bridgesync" - "github.com/0xPolygon/cdk/config/types" - "github.com/0xPolygon/cdk/l1infotreesync" - "github.com/0xPolygon/cdk/log" - treeTypes "github.com/0xPolygon/cdk/tree/types" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" -) - -const ( - networkIDTest = uint32(1234) -) - -var ( - errTest = errors.New("unitest error") - ler1 = common.HexToHash("0x123") -) - -func TestConfigString(t *testing.T) { - config := Config{ - StoragePath: "/path/to/storage", - AggLayerURL: "http://agglayer.url", - AggsenderPrivateKey: types.KeystoreFileConfig{Path: "/path/to/key", Password: "password"}, - URLRPCL2: "http://l2.rpc.url", - BlockFinality: "latestBlock", - EpochNotificationPercentage: 50, - SaveCertificatesToFilesPath: "/path/to/certificates", - } - - expected := "StoragePath: /path/to/storage\n" + - "AggLayerURL: http://agglayer.url\n" + - "AggsenderPrivateKeyPath: /path/to/key\n" + - "URLRPCL2: http://l2.rpc.url\n" + - "BlockFinality: latestBlock\n" + - "EpochNotificationPercentage: 50\n" + - "SaveCertificatesToFilesPath: /path/to/certificates\n" - - require.Equal(t, expected, config.String()) -} - -func TestConvertClaimToImportedBridgeExit(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - claim bridgesync.Claim - expectedError bool - expectedExit *agglayer.ImportedBridgeExit - }{ - { - name: "Asset claim", - claim: bridgesync.Claim{ - IsMessage: false, - OriginNetwork: 1, - OriginAddress: common.HexToAddress("0x123"), - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x456"), - Amount: big.NewInt(100), - Metadata: []byte("metadata"), - GlobalIndex: big.NewInt(1), - }, - expectedError: false, - expectedExit: &agglayer.ImportedBridgeExit{ - BridgeExit: &agglayer.BridgeExit{ - LeafType: agglayer.LeafTypeAsset, - TokenInfo: &agglayer.TokenInfo{ - OriginNetwork: 1, - OriginTokenAddress: common.HexToAddress("0x123"), - }, - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x456"), - Amount: big.NewInt(100), - Metadata: []byte("metadata"), - }, - GlobalIndex: &agglayer.GlobalIndex{ - MainnetFlag: false, - RollupIndex: 0, - LeafIndex: 1, - }, - }, - }, - { - name: "Message claim", - claim: bridgesync.Claim{ - IsMessage: true, - OriginNetwork: 1, - OriginAddress: common.HexToAddress("0x123"), - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x456"), - Amount: big.NewInt(100), - Metadata: []byte("metadata"), - GlobalIndex: big.NewInt(2), - }, - expectedError: false, - expectedExit: &agglayer.ImportedBridgeExit{ - BridgeExit: &agglayer.BridgeExit{ - LeafType: agglayer.LeafTypeMessage, - TokenInfo: &agglayer.TokenInfo{ - OriginNetwork: 1, - OriginTokenAddress: common.HexToAddress("0x123"), - }, - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x456"), - Amount: big.NewInt(100), - Metadata: []byte("metadata"), - }, - GlobalIndex: &agglayer.GlobalIndex{ - MainnetFlag: false, - RollupIndex: 0, - LeafIndex: 2, - }, - }, - }, - { - name: "Invalid global index", - claim: bridgesync.Claim{ - IsMessage: false, - OriginNetwork: 1, - OriginAddress: common.HexToAddress("0x123"), - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x456"), - Amount: big.NewInt(100), - Metadata: []byte("metadata"), - GlobalIndex: new(big.Int).SetBytes([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}), - }, - expectedError: true, - expectedExit: nil, - }, - } - - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - aggSender := &AggSender{} - exit, err := aggSender.convertClaimToImportedBridgeExit(tt.claim) - - if tt.expectedError { - require.Error(t, err) - } else { - require.NoError(t, err) - require.Equal(t, tt.expectedExit, exit) - } - }) - } -} - -func TestGetBridgeExits(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - bridges []bridgesync.Bridge - expectedExits []*agglayer.BridgeExit - }{ - { - name: "Single bridge", - bridges: []bridgesync.Bridge{ - { - LeafType: agglayer.LeafTypeAsset.Uint8(), - OriginNetwork: 1, - OriginAddress: common.HexToAddress("0x123"), - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x456"), - Amount: big.NewInt(100), - Metadata: []byte("metadata"), - }, - }, - expectedExits: []*agglayer.BridgeExit{ - { - LeafType: agglayer.LeafTypeAsset, - TokenInfo: &agglayer.TokenInfo{ - OriginNetwork: 1, - OriginTokenAddress: common.HexToAddress("0x123"), - }, - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x456"), - Amount: big.NewInt(100), - Metadata: []byte("metadata"), - }, - }, - }, - { - name: "Multiple bridges", - bridges: []bridgesync.Bridge{ - { - LeafType: agglayer.LeafTypeAsset.Uint8(), - OriginNetwork: 1, - OriginAddress: common.HexToAddress("0x123"), - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x456"), - Amount: big.NewInt(100), - Metadata: []byte("metadata"), - }, - { - LeafType: agglayer.LeafTypeMessage.Uint8(), - OriginNetwork: 3, - OriginAddress: common.HexToAddress("0x789"), - DestinationNetwork: 4, - DestinationAddress: common.HexToAddress("0xabc"), - Amount: big.NewInt(200), - Metadata: []byte("data"), - }, - }, - expectedExits: []*agglayer.BridgeExit{ - { - LeafType: agglayer.LeafTypeAsset, - TokenInfo: &agglayer.TokenInfo{ - OriginNetwork: 1, - OriginTokenAddress: common.HexToAddress("0x123"), - }, - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x456"), - Amount: big.NewInt(100), - Metadata: []byte("metadata"), - }, - { - LeafType: agglayer.LeafTypeMessage, - TokenInfo: &agglayer.TokenInfo{ - OriginNetwork: 3, - OriginTokenAddress: common.HexToAddress("0x789"), - }, - DestinationNetwork: 4, - DestinationAddress: common.HexToAddress("0xabc"), - Amount: big.NewInt(200), - Metadata: []byte("data"), - }, - }, - }, - { - name: "No bridges", - bridges: []bridgesync.Bridge{}, - expectedExits: []*agglayer.BridgeExit{}, - }, - } - - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - aggSender := &AggSender{} - exits := aggSender.getBridgeExits(tt.bridges) - - require.Equal(t, tt.expectedExits, exits) - }) - } -} - -func TestAggSenderStart(t *testing.T) { - aggLayerMock := agglayer.NewAgglayerClientMock(t) - epochNotifierMock := mocks.NewEpochNotifier(t) - bridgeL2SyncerMock := mocks.NewL2BridgeSyncer(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - aggSender, err := New( - ctx, - log.WithFields("test", "unittest"), - Config{ - StoragePath: path.Join(t.TempDir(), "aggsenderTestAggSenderStart.sqlite"), - DelayBeetweenRetries: types.Duration{Duration: 1 * time.Microsecond}, - }, - aggLayerMock, - nil, - bridgeL2SyncerMock, - epochNotifierMock) - require.NoError(t, err) - require.NotNil(t, aggSender) - ch := make(chan aggsendertypes.EpochEvent) - epochNotifierMock.EXPECT().Subscribe("aggsender").Return(ch) - bridgeL2SyncerMock.EXPECT().OriginNetwork().Return(uint32(1)) - bridgeL2SyncerMock.EXPECT().GetLastProcessedBlock(mock.Anything).Return(uint64(0), nil) - aggLayerMock.EXPECT().GetLatestKnownCertificateHeader(mock.Anything).Return(nil, nil) - - go aggSender.Start(ctx) - ch <- aggsendertypes.EpochEvent{ - Epoch: 1, - } - time.Sleep(200 * time.Millisecond) -} - -func TestAggSenderSendCertificates(t *testing.T) { - AggLayerMock := agglayer.NewAgglayerClientMock(t) - epochNotifierMock := mocks.NewEpochNotifier(t) - bridgeL2SyncerMock := mocks.NewL2BridgeSyncer(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - aggSender, err := New( - ctx, - log.WithFields("test", "unittest"), - Config{ - StoragePath: path.Join(t.TempDir(), "aggsenderTestAggSenderSendCertificates.sqlite"), - }, - AggLayerMock, - nil, - bridgeL2SyncerMock, - epochNotifierMock) - require.NoError(t, err) - require.NotNil(t, aggSender) - ch := make(chan aggsendertypes.EpochEvent, 2) - epochNotifierMock.EXPECT().Subscribe("aggsender").Return(ch) - err = aggSender.storage.SaveLastSentCertificate(ctx, aggsendertypes.CertificateInfo{ - Height: 1, - Status: agglayer.Pending, - }) - AggLayerMock.EXPECT().GetCertificateHeader(mock.Anything).Return(&agglayer.CertificateHeader{ - Status: agglayer.Pending, - }, nil) - require.NoError(t, err) - ch <- aggsendertypes.EpochEvent{ - Epoch: 1, - } - go aggSender.sendCertificates(ctx) - time.Sleep(200 * time.Millisecond) -} - -//nolint:dupl -func TestGetImportedBridgeExits(t *testing.T) { - t.Parallel() - - mockProof := generateTestProof(t) - - mockL1InfoTreeSyncer := mocks.NewL1InfoTreeSyncer(t) - mockL1InfoTreeSyncer.On("GetInfoByGlobalExitRoot", mock.Anything).Return(&l1infotreesync.L1InfoTreeLeaf{ - L1InfoTreeIndex: 1, - Timestamp: 123456789, - PreviousBlockHash: common.HexToHash("0xabc"), - GlobalExitRoot: common.HexToHash("0x7891"), - }, nil) - mockL1InfoTreeSyncer.On("GetL1InfoTreeRootByIndex", mock.Anything, mock.Anything).Return( - treeTypes.Root{Hash: common.HexToHash("0x7891")}, nil) - mockL1InfoTreeSyncer.On("GetL1InfoTreeMerkleProofFromIndexToRoot", mock.Anything, - mock.Anything, mock.Anything).Return(mockProof, nil) - - tests := []struct { - name string - claims []bridgesync.Claim - expectedError bool - expectedExits []*agglayer.ImportedBridgeExit - }{ - { - name: "Single claim", - claims: []bridgesync.Claim{ - { - IsMessage: false, - OriginNetwork: 1, - OriginAddress: common.HexToAddress("0x1234"), - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x4567"), - Amount: big.NewInt(111), - Metadata: []byte("metadata1"), - GlobalIndex: bridgesync.GenerateGlobalIndex(false, 1, 1), - GlobalExitRoot: common.HexToHash("0x7891"), - RollupExitRoot: common.HexToHash("0xaaab"), - MainnetExitRoot: common.HexToHash("0xbbba"), - ProofLocalExitRoot: mockProof, - ProofRollupExitRoot: mockProof, - }, - }, - expectedError: false, - expectedExits: []*agglayer.ImportedBridgeExit{ - { - BridgeExit: &agglayer.BridgeExit{ - LeafType: agglayer.LeafTypeAsset, - TokenInfo: &agglayer.TokenInfo{ - OriginNetwork: 1, - OriginTokenAddress: common.HexToAddress("0x1234"), - }, - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x4567"), - Amount: big.NewInt(111), - Metadata: []byte("metadata1"), - }, - GlobalIndex: &agglayer.GlobalIndex{ - MainnetFlag: false, - RollupIndex: 1, - LeafIndex: 1, - }, - ClaimData: &agglayer.ClaimFromRollup{ - L1Leaf: &agglayer.L1InfoTreeLeaf{ - L1InfoTreeIndex: 1, - RollupExitRoot: common.HexToHash("0xaaab"), - MainnetExitRoot: common.HexToHash("0xbbba"), - Inner: &agglayer.L1InfoTreeLeafInner{ - GlobalExitRoot: common.HexToHash("0x7891"), - Timestamp: 123456789, - BlockHash: common.HexToHash("0xabc"), - }, - }, - ProofLeafLER: &agglayer.MerkleProof{ - Root: common.HexToHash("0xc52019815b51acf67a715cae6794a20083d63fd9af45783b7adf69123dae92c8"), - Proof: mockProof, - }, - ProofLERToRER: &agglayer.MerkleProof{ - Root: common.HexToHash("0xaaab"), - Proof: mockProof, - }, - ProofGERToL1Root: &agglayer.MerkleProof{ - Root: common.HexToHash("0x7891"), - Proof: mockProof, - }, - }, - }, - }, - }, - { - name: "Multiple claims", - claims: []bridgesync.Claim{ - { - IsMessage: false, - OriginNetwork: 1, - OriginAddress: common.HexToAddress("0x123"), - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x456"), - Amount: big.NewInt(100), - Metadata: []byte("metadata"), - GlobalIndex: big.NewInt(1), - GlobalExitRoot: common.HexToHash("0x7891"), - RollupExitRoot: common.HexToHash("0xaaa"), - MainnetExitRoot: common.HexToHash("0xbbb"), - ProofLocalExitRoot: mockProof, - ProofRollupExitRoot: mockProof, - }, - { - IsMessage: true, - OriginNetwork: 3, - OriginAddress: common.HexToAddress("0x789"), - DestinationNetwork: 4, - DestinationAddress: common.HexToAddress("0xabc"), - Amount: big.NewInt(200), - Metadata: []byte("data"), - GlobalIndex: bridgesync.GenerateGlobalIndex(true, 0, 2), - GlobalExitRoot: common.HexToHash("0x7891"), - RollupExitRoot: common.HexToHash("0xbbb"), - MainnetExitRoot: common.HexToHash("0xccc"), - ProofLocalExitRoot: mockProof, - ProofRollupExitRoot: mockProof, - }, - }, - expectedError: false, - expectedExits: []*agglayer.ImportedBridgeExit{ - { - BridgeExit: &agglayer.BridgeExit{ - LeafType: agglayer.LeafTypeAsset, - TokenInfo: &agglayer.TokenInfo{ - OriginNetwork: 1, - OriginTokenAddress: common.HexToAddress("0x123"), - }, - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x456"), - Amount: big.NewInt(100), - Metadata: []byte("metadata"), - }, - GlobalIndex: &agglayer.GlobalIndex{ - MainnetFlag: false, - RollupIndex: 0, - LeafIndex: 1, - }, - ClaimData: &agglayer.ClaimFromRollup{ - L1Leaf: &agglayer.L1InfoTreeLeaf{ - L1InfoTreeIndex: 1, - RollupExitRoot: common.HexToHash("0xaaa"), - MainnetExitRoot: common.HexToHash("0xbbb"), - Inner: &agglayer.L1InfoTreeLeafInner{ - GlobalExitRoot: common.HexToHash("0x7891"), - Timestamp: 123456789, - BlockHash: common.HexToHash("0xabc"), - }, - }, - ProofLeafLER: &agglayer.MerkleProof{ - Root: common.HexToHash("0x105e0f1144e57f6fb63f1dfc5083b1f59be3512be7cf5e63523779ad14a4d987"), - Proof: mockProof, - }, - ProofLERToRER: &agglayer.MerkleProof{ - Root: common.HexToHash("0xaaa"), - Proof: mockProof, - }, - ProofGERToL1Root: &agglayer.MerkleProof{ - Root: common.HexToHash("0x7891"), - Proof: mockProof, - }, - }, - }, - { - BridgeExit: &agglayer.BridgeExit{ - LeafType: agglayer.LeafTypeMessage, - TokenInfo: &agglayer.TokenInfo{ - OriginNetwork: 3, - OriginTokenAddress: common.HexToAddress("0x789"), - }, - DestinationNetwork: 4, - DestinationAddress: common.HexToAddress("0xabc"), - Amount: big.NewInt(200), - Metadata: []byte("data"), - }, - GlobalIndex: &agglayer.GlobalIndex{ - MainnetFlag: true, - RollupIndex: 0, - LeafIndex: 2, - }, - ClaimData: &agglayer.ClaimFromMainnnet{ - L1Leaf: &agglayer.L1InfoTreeLeaf{ - L1InfoTreeIndex: 1, - RollupExitRoot: common.HexToHash("0xbbb"), - MainnetExitRoot: common.HexToHash("0xccc"), - Inner: &agglayer.L1InfoTreeLeafInner{ - GlobalExitRoot: common.HexToHash("0x7891"), - Timestamp: 123456789, - BlockHash: common.HexToHash("0xabc"), - }, - }, - ProofLeafMER: &agglayer.MerkleProof{ - Root: common.HexToHash("0xccc"), - Proof: mockProof, - }, - ProofGERToL1Root: &agglayer.MerkleProof{ - Root: common.HexToHash("0x7891"), - Proof: mockProof, - }, - }, - }, - }, - }, - { - name: "No claims", - claims: []bridgesync.Claim{}, - expectedError: false, - expectedExits: []*agglayer.ImportedBridgeExit{}, - }, - } - - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - aggSender := &AggSender{ - l1infoTreeSyncer: mockL1InfoTreeSyncer, - log: log.WithFields("test", "unittest"), - } - exits, err := aggSender.getImportedBridgeExits(context.Background(), tt.claims) - - if tt.expectedError { - require.Error(t, err) - require.Nil(t, exits) - } else { - require.NoError(t, err) - require.Equal(t, tt.expectedExits, exits) - } - }) - } -} - -func TestBuildCertificate(t *testing.T) { - mockL2BridgeSyncer := mocks.NewL2BridgeSyncer(t) - mockL1InfoTreeSyncer := mocks.NewL1InfoTreeSyncer(t) - mockProof := generateTestProof(t) - - tests := []struct { - name string - bridges []bridgesync.Bridge - claims []bridgesync.Claim - lastSentCertificateInfo aggsendertypes.CertificateInfo - fromBlock uint64 - toBlock uint64 - mockFn func() - expectedCert *agglayer.Certificate - expectedError bool - }{ - { - name: "Valid certificate with bridges and claims", - bridges: []bridgesync.Bridge{ - { - LeafType: agglayer.LeafTypeAsset.Uint8(), - OriginNetwork: 1, - OriginAddress: common.HexToAddress("0x123"), - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x456"), - Amount: big.NewInt(100), - Metadata: []byte("metadata"), - DepositCount: 1, - }, - }, - claims: []bridgesync.Claim{ - { - IsMessage: false, - OriginNetwork: 1, - OriginAddress: common.HexToAddress("0x1234"), - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x4567"), - Amount: big.NewInt(111), - Metadata: []byte("metadata1"), - GlobalIndex: big.NewInt(1), - GlobalExitRoot: common.HexToHash("0x7891"), - RollupExitRoot: common.HexToHash("0xaaab"), - MainnetExitRoot: common.HexToHash("0xbbba"), - ProofLocalExitRoot: mockProof, - ProofRollupExitRoot: mockProof, - }, - }, - lastSentCertificateInfo: aggsendertypes.CertificateInfo{ - NewLocalExitRoot: common.HexToHash("0x123"), - Height: 1, - Status: agglayer.Settled, - }, - fromBlock: 0, - toBlock: 10, - expectedCert: &agglayer.Certificate{ - NetworkID: 1, - PrevLocalExitRoot: common.HexToHash("0x123"), - NewLocalExitRoot: common.HexToHash("0x789"), - Metadata: aggsendertypes.NewCertificateMetadata(0, 10, 0).ToHash(), - BridgeExits: []*agglayer.BridgeExit{ - { - LeafType: agglayer.LeafTypeAsset, - TokenInfo: &agglayer.TokenInfo{ - OriginNetwork: 1, - OriginTokenAddress: common.HexToAddress("0x123"), - }, - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x456"), - Amount: big.NewInt(100), - Metadata: []byte("metadata"), - }, - }, - ImportedBridgeExits: []*agglayer.ImportedBridgeExit{ - { - BridgeExit: &agglayer.BridgeExit{ - LeafType: agglayer.LeafTypeAsset, - TokenInfo: &agglayer.TokenInfo{ - OriginNetwork: 1, - OriginTokenAddress: common.HexToAddress("0x1234"), - }, - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x4567"), - Amount: big.NewInt(111), - Metadata: []byte("metadata1"), - }, - GlobalIndex: &agglayer.GlobalIndex{ - MainnetFlag: false, - RollupIndex: 0, - LeafIndex: 1, - }, - ClaimData: &agglayer.ClaimFromRollup{ - L1Leaf: &agglayer.L1InfoTreeLeaf{ - L1InfoTreeIndex: 1, - RollupExitRoot: common.HexToHash("0xaaab"), - MainnetExitRoot: common.HexToHash("0xbbba"), - Inner: &agglayer.L1InfoTreeLeafInner{ - GlobalExitRoot: common.HexToHash("0x7891"), - Timestamp: 123456789, - BlockHash: common.HexToHash("0xabc"), - }, - }, - ProofLeafLER: &agglayer.MerkleProof{ - Root: common.HexToHash("0xc52019815b51acf67a715cae6794a20083d63fd9af45783b7adf69123dae92c8"), - Proof: mockProof, - }, - ProofLERToRER: &agglayer.MerkleProof{ - Root: common.HexToHash("0xaaab"), - Proof: mockProof, - }, - ProofGERToL1Root: &agglayer.MerkleProof{ - Root: common.HexToHash("0x7891"), - Proof: mockProof, - }, - }, - }, - }, - Height: 2, - }, - mockFn: func() { - mockL2BridgeSyncer.On("OriginNetwork").Return(uint32(1)) - mockL2BridgeSyncer.On("GetExitRootByIndex", mock.Anything, mock.Anything).Return(treeTypes.Root{Hash: common.HexToHash("0x789")}, nil) - - mockL1InfoTreeSyncer.On("GetInfoByGlobalExitRoot", mock.Anything).Return(&l1infotreesync.L1InfoTreeLeaf{ - L1InfoTreeIndex: 1, - Timestamp: 123456789, - PreviousBlockHash: common.HexToHash("0xabc"), - GlobalExitRoot: common.HexToHash("0x7891"), - }, nil) - mockL1InfoTreeSyncer.On("GetL1InfoTreeRootByIndex", mock.Anything, mock.Anything).Return(treeTypes.Root{Hash: common.HexToHash("0x7891")}, nil) - mockL1InfoTreeSyncer.On("GetL1InfoTreeMerkleProofFromIndexToRoot", mock.Anything, mock.Anything, mock.Anything).Return(mockProof, nil) - }, - expectedError: false, - }, - { - name: "No bridges or claims", - bridges: []bridgesync.Bridge{}, - claims: []bridgesync.Claim{}, - lastSentCertificateInfo: aggsendertypes.CertificateInfo{ - NewLocalExitRoot: common.HexToHash("0x123"), - Height: 1, - }, - expectedCert: nil, - expectedError: true, - }, - { - name: "Error getting imported bridge exits", - bridges: []bridgesync.Bridge{ - { - LeafType: agglayer.LeafTypeAsset.Uint8(), - OriginNetwork: 1, - OriginAddress: common.HexToAddress("0x123"), - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x456"), - Amount: big.NewInt(100), - Metadata: []byte("metadata"), - DepositCount: 1, - }, - }, - claims: []bridgesync.Claim{ - { - IsMessage: false, - OriginNetwork: 1, - OriginAddress: common.HexToAddress("0x1234"), - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x4567"), - Amount: big.NewInt(111), - Metadata: []byte("metadata1"), - GlobalIndex: new(big.Int).SetBytes([]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}), - GlobalExitRoot: common.HexToHash("0x7891"), - RollupExitRoot: common.HexToHash("0xaaab"), - MainnetExitRoot: common.HexToHash("0xbbba"), - ProofLocalExitRoot: mockProof, - }, - }, - lastSentCertificateInfo: aggsendertypes.CertificateInfo{ - NewLocalExitRoot: common.HexToHash("0x123"), - Height: 1, - }, - mockFn: func() { - mockL1InfoTreeSyncer.On("GetInfoByGlobalExitRoot", mock.Anything).Return(&l1infotreesync.L1InfoTreeLeaf{ - L1InfoTreeIndex: 1, - Timestamp: 123456789, - PreviousBlockHash: common.HexToHash("0xabc"), - GlobalExitRoot: common.HexToHash("0x7891"), - }, nil) - mockL1InfoTreeSyncer.On("GetL1InfoTreeRootByIndex", mock.Anything, mock.Anything).Return( - treeTypes.Root{Hash: common.HexToHash("0x7891")}, nil) - }, - expectedCert: nil, - expectedError: true, - }, - } - - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - mockL1InfoTreeSyncer.ExpectedCalls = nil - mockL2BridgeSyncer.ExpectedCalls = nil - - if tt.mockFn != nil { - tt.mockFn() - } - - aggSender := &AggSender{ - l2Syncer: mockL2BridgeSyncer, - l1infoTreeSyncer: mockL1InfoTreeSyncer, - log: log.WithFields("test", "unittest"), - } - - certParam := &aggsendertypes.CertificateBuildParams{ - ToBlock: tt.toBlock, - Bridges: tt.bridges, - Claims: tt.claims, - } - cert, err := aggSender.buildCertificate(context.Background(), certParam, &tt.lastSentCertificateInfo) - - if tt.expectedError { - require.Error(t, err) - require.Nil(t, cert) - } else { - require.NoError(t, err) - require.Equal(t, tt.expectedCert, cert) - } - }) - } -} - -func generateTestProof(t *testing.T) treeTypes.Proof { - t.Helper() - - proof := treeTypes.Proof{} - - for i := 0; i < int(treeTypes.DefaultHeight) && i < 10; i++ { - proof[i] = common.HexToHash(fmt.Sprintf("0x%d", i)) - } - - return proof -} - -func TestCheckIfCertificatesAreSettled(t *testing.T) { - tests := []struct { - name string - pendingCertificates []*aggsendertypes.CertificateInfo - certificateHeaders map[common.Hash]*agglayer.CertificateHeader - getFromDBError error - clientError error - updateDBError error - expectedErrorLogMessages []string - expectedInfoMessages []string - expectedError bool - }{ - { - name: "All certificates settled - update successful", - pendingCertificates: []*aggsendertypes.CertificateInfo{ - {CertificateID: common.HexToHash("0x1"), Height: 1}, - {CertificateID: common.HexToHash("0x2"), Height: 2}, - }, - certificateHeaders: map[common.Hash]*agglayer.CertificateHeader{ - common.HexToHash("0x1"): {Status: agglayer.Settled}, - common.HexToHash("0x2"): {Status: agglayer.Settled}, - }, - expectedInfoMessages: []string{ - "certificate %s changed status to %s", - }, - }, - { - name: "Some certificates in error - update successful", - pendingCertificates: []*aggsendertypes.CertificateInfo{ - {CertificateID: common.HexToHash("0x1"), Height: 1}, - {CertificateID: common.HexToHash("0x2"), Height: 2}, - }, - certificateHeaders: map[common.Hash]*agglayer.CertificateHeader{ - common.HexToHash("0x1"): {Status: agglayer.InError}, - common.HexToHash("0x2"): {Status: agglayer.Settled}, - }, - expectedInfoMessages: []string{ - "certificate %s changed status to %s", - }, - }, - { - name: "Error getting pending certificates", - getFromDBError: fmt.Errorf("storage error"), - expectedErrorLogMessages: []string{ - "error getting pending certificates: %w", - }, - expectedError: true, - }, - { - name: "Error getting certificate header", - pendingCertificates: []*aggsendertypes.CertificateInfo{ - {CertificateID: common.HexToHash("0x1"), Height: 1}, - }, - certificateHeaders: map[common.Hash]*agglayer.CertificateHeader{ - common.HexToHash("0x1"): {Status: agglayer.InError}, - }, - clientError: fmt.Errorf("client error"), - expectedErrorLogMessages: []string{ - "error getting header of certificate %s with height: %d from agglayer: %w", - }, - expectedError: true, - }, - { - name: "Error updating certificate status", - pendingCertificates: []*aggsendertypes.CertificateInfo{ - {CertificateID: common.HexToHash("0x1"), Height: 1}, - }, - certificateHeaders: map[common.Hash]*agglayer.CertificateHeader{ - common.HexToHash("0x1"): {Status: agglayer.Settled}, - }, - updateDBError: fmt.Errorf("update error"), - expectedErrorLogMessages: []string{ - "error updating certificate status in storage: %w", - }, - expectedInfoMessages: []string{ - "certificate %s changed status to %s", - }, - expectedError: true, - }, - } - - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - mockStorage := mocks.NewAggSenderStorage(t) - mockAggLayerClient := agglayer.NewAgglayerClientMock(t) - mockLogger := log.WithFields("test", "unittest") - - mockStorage.On("GetCertificatesByStatus", agglayer.NonSettledStatuses).Return( - tt.pendingCertificates, tt.getFromDBError) - for certID, header := range tt.certificateHeaders { - mockAggLayerClient.On("GetCertificateHeader", certID).Return(header, tt.clientError) - } - if tt.updateDBError != nil { - mockStorage.On("UpdateCertificate", mock.Anything, mock.Anything).Return(tt.updateDBError) - } else if tt.clientError == nil && tt.getFromDBError == nil { - mockStorage.On("UpdateCertificate", mock.Anything, mock.Anything).Return(nil) - } - - aggSender := &AggSender{ - log: mockLogger, - storage: mockStorage, - aggLayerClient: mockAggLayerClient, - cfg: Config{}, - } - - ctx := context.TODO() - thereArePendingCerts := aggSender.checkPendingCertificatesStatus(ctx) - require.Equal(t, tt.expectedError, thereArePendingCerts) - mockAggLayerClient.AssertExpectations(t) - mockStorage.AssertExpectations(t) - }) - } -} - -func TestSendCertificate(t *testing.T) { - t.Parallel() - - privateKey, err := crypto.GenerateKey() - require.NoError(t, err) - - type testCfg struct { - name string - sequencerKey *ecdsa.PrivateKey - shouldSendCertificate []interface{} - getLastSentCertificate []interface{} - lastL2BlockProcessed []interface{} - getBridges []interface{} - getClaims []interface{} - getInfoByGlobalExitRoot []interface{} - getL1InfoTreeRootByIndex []interface{} - getL1InfoTreeMerkleProofFromIndexToRoot []interface{} - getExitRootByIndex []interface{} - originNetwork []interface{} - sendCertificate []interface{} - saveLastSentCertificate []interface{} - expectedError string - } - - setupTest := func(cfg testCfg) (*AggSender, *mocks.AggSenderStorage, *mocks.L2BridgeSyncer, - *agglayer.AgglayerClientMock, *mocks.L1InfoTreeSyncer) { - var ( - aggsender = &AggSender{ - log: log.WithFields("aggsender", 1), - cfg: Config{MaxRetriesStoreCertificate: 1}, - sequencerKey: cfg.sequencerKey, - } - mockStorage *mocks.AggSenderStorage - mockL2Syncer *mocks.L2BridgeSyncer - mockAggLayerClient *agglayer.AgglayerClientMock - mockL1InfoTreeSyncer *mocks.L1InfoTreeSyncer - ) - - if cfg.shouldSendCertificate != nil || cfg.getLastSentCertificate != nil || - cfg.saveLastSentCertificate != nil { - mockStorage = mocks.NewAggSenderStorage(t) - mockStorage.On("GetCertificatesByStatus", agglayer.NonSettledStatuses). - Return(cfg.shouldSendCertificate...) - - aggsender.storage = mockStorage - - if cfg.getLastSentCertificate != nil { - mockStorage.On("GetLastSentCertificate").Return(cfg.getLastSentCertificate...).Once() - } - - if cfg.saveLastSentCertificate != nil { - mockStorage.On("SaveLastSentCertificate", mock.Anything, mock.Anything).Return(cfg.saveLastSentCertificate...) - } - } - - if cfg.lastL2BlockProcessed != nil || cfg.originNetwork != nil || - cfg.getBridges != nil || cfg.getClaims != nil || cfg.getInfoByGlobalExitRoot != nil { - mockL2Syncer = mocks.NewL2BridgeSyncer(t) - - mockL2Syncer.On("GetLastProcessedBlock", mock.Anything).Return(cfg.lastL2BlockProcessed...).Once() - - if cfg.getBridges != nil { - mockL2Syncer.On("GetBridgesPublished", mock.Anything, mock.Anything, mock.Anything).Return(cfg.getBridges...) - } - - if cfg.getClaims != nil { - mockL2Syncer.On("GetClaims", mock.Anything, mock.Anything, mock.Anything).Return(cfg.getClaims...).Once() - } - - if cfg.getExitRootByIndex != nil { - mockL2Syncer.On("GetExitRootByIndex", mock.Anything, mock.Anything).Return(cfg.getExitRootByIndex...).Once() - } - - if cfg.originNetwork != nil { - mockL2Syncer.On("OriginNetwork").Return(cfg.originNetwork...).Once() - } - - aggsender.l2Syncer = mockL2Syncer - } - - if cfg.sendCertificate != nil { - mockAggLayerClient = agglayer.NewAgglayerClientMock(t) - mockAggLayerClient.On("SendCertificate", mock.Anything).Return(cfg.sendCertificate...).Once() - - aggsender.aggLayerClient = mockAggLayerClient - } - - if cfg.getInfoByGlobalExitRoot != nil || - cfg.getL1InfoTreeRootByIndex != nil || cfg.getL1InfoTreeMerkleProofFromIndexToRoot != nil { - mockL1InfoTreeSyncer = mocks.NewL1InfoTreeSyncer(t) - mockL1InfoTreeSyncer.On("GetInfoByGlobalExitRoot", mock.Anything).Return(cfg.getInfoByGlobalExitRoot...).Once() - - if cfg.getL1InfoTreeRootByIndex != nil { - mockL1InfoTreeSyncer.On("GetL1InfoTreeRootByIndex", mock.Anything, mock.Anything).Return(cfg.getL1InfoTreeRootByIndex...).Once() - } - - if cfg.getL1InfoTreeMerkleProofFromIndexToRoot != nil { - mockL1InfoTreeSyncer.On("GetL1InfoTreeMerkleProofFromIndexToRoot", mock.Anything, mock.Anything, mock.Anything). - Return(cfg.getL1InfoTreeMerkleProofFromIndexToRoot...).Once() - } - - aggsender.l1infoTreeSyncer = mockL1InfoTreeSyncer - } - - return aggsender, mockStorage, mockL2Syncer, mockAggLayerClient, mockL1InfoTreeSyncer - } - - tests := []testCfg{ - { - name: "error getting pending certificates", - shouldSendCertificate: []interface{}{nil, errors.New("error getting pending")}, - expectedError: "error getting pending", - }, - { - name: "should not send certificate", - shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{ - {Status: agglayer.Pending}, - }, nil}, - }, - { - name: "error getting last sent certificate", - shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, - lastL2BlockProcessed: []interface{}{uint64(8), nil}, - getLastSentCertificate: []interface{}{&aggsendertypes.CertificateInfo{}, errors.New("error getting last sent certificate")}, - expectedError: "error getting last sent certificate", - }, - { - name: "no new blocks to send certificate", - shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, - lastL2BlockProcessed: []interface{}{uint64(41), nil}, - getLastSentCertificate: []interface{}{&aggsendertypes.CertificateInfo{ - Height: 41, - CertificateID: common.HexToHash("0x111"), - NewLocalExitRoot: common.HexToHash("0x13223"), - FromBlock: 31, - ToBlock: 41, - }, nil}, - }, - { - name: "get bridges error", - shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, - lastL2BlockProcessed: []interface{}{uint64(59), nil}, - getLastSentCertificate: []interface{}{&aggsendertypes.CertificateInfo{ - Height: 50, - CertificateID: common.HexToHash("0x1111"), - NewLocalExitRoot: common.HexToHash("0x132233"), - FromBlock: 40, - ToBlock: 41, - }, nil}, - getBridges: []interface{}{nil, errors.New("error getting bridges")}, - expectedError: "error getting bridges", - }, - { - name: "no bridges", - shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, - lastL2BlockProcessed: []interface{}{uint64(69), nil}, - getLastSentCertificate: []interface{}{&aggsendertypes.CertificateInfo{ - Height: 60, - CertificateID: common.HexToHash("0x11111"), - NewLocalExitRoot: common.HexToHash("0x1322233"), - FromBlock: 50, - ToBlock: 51, - }, nil}, - getBridges: []interface{}{[]bridgesync.Bridge{}, nil}, - }, - { - name: "get claims error", - shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, - lastL2BlockProcessed: []interface{}{uint64(79), nil}, - getLastSentCertificate: []interface{}{&aggsendertypes.CertificateInfo{ - Height: 70, - CertificateID: common.HexToHash("0x121111"), - NewLocalExitRoot: common.HexToHash("0x13122233"), - FromBlock: 60, - ToBlock: 61, - }, nil}, - getBridges: []interface{}{[]bridgesync.Bridge{ - { - BlockNum: 61, - BlockPos: 0, - LeafType: agglayer.LeafTypeAsset.Uint8(), - OriginNetwork: 1, - }, - }, nil}, - getClaims: []interface{}{nil, errors.New("error getting claims")}, - expectedError: "error getting claims", - }, - { - name: "error getting info by global exit root", - shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, - lastL2BlockProcessed: []interface{}{uint64(89), nil}, - getLastSentCertificate: []interface{}{&aggsendertypes.CertificateInfo{ - Height: 80, - CertificateID: common.HexToHash("0x1321111"), - NewLocalExitRoot: common.HexToHash("0x131122233"), - FromBlock: 70, - ToBlock: 71, - }, nil}, - getBridges: []interface{}{[]bridgesync.Bridge{ - { - BlockNum: 71, - BlockPos: 0, - LeafType: agglayer.LeafTypeAsset.Uint8(), - OriginNetwork: 1, - }, - }, nil}, - getClaims: []interface{}{[]bridgesync.Claim{ - { - IsMessage: false, - }, - }, nil}, - getInfoByGlobalExitRoot: []interface{}{nil, errors.New("error getting info by global exit root")}, - expectedError: "error getting info by global exit root", - }, - { - name: "error getting L1 Info tree root by index", - shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, - lastL2BlockProcessed: []interface{}{uint64(89), nil}, - getLastSentCertificate: []interface{}{&aggsendertypes.CertificateInfo{ - Height: 80, - CertificateID: common.HexToHash("0x1321111"), - NewLocalExitRoot: common.HexToHash("0x131122233"), - FromBlock: 70, - ToBlock: 71, - }, nil}, - getBridges: []interface{}{[]bridgesync.Bridge{ - { - BlockNum: 71, - BlockPos: 0, - LeafType: agglayer.LeafTypeAsset.Uint8(), - OriginNetwork: 1, - }, - }, nil}, - getClaims: []interface{}{[]bridgesync.Claim{ - { - IsMessage: false, - }, - }, nil}, - getInfoByGlobalExitRoot: []interface{}{&l1infotreesync.L1InfoTreeLeaf{ - L1InfoTreeIndex: 1, - BlockNumber: 1, - BlockPosition: 0, - PreviousBlockHash: common.HexToHash("0x123"), - Timestamp: 123456789, - MainnetExitRoot: common.HexToHash("0xccc"), - RollupExitRoot: common.HexToHash("0xddd"), - GlobalExitRoot: common.HexToHash("0xeee"), - }, nil}, - getL1InfoTreeRootByIndex: []interface{}{treeTypes.Root{}, errors.New("error getting L1 Info tree root by index")}, - expectedError: "error getting L1 Info tree root by index", - }, - { - name: "error getting L1 Info tree merkle proof from index to root", - shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, - lastL2BlockProcessed: []interface{}{uint64(89), nil}, - getLastSentCertificate: []interface{}{&aggsendertypes.CertificateInfo{ - Height: 80, - CertificateID: common.HexToHash("0x1321111"), - NewLocalExitRoot: common.HexToHash("0x131122233"), - FromBlock: 70, - ToBlock: 71, - }, nil}, - getBridges: []interface{}{[]bridgesync.Bridge{ - { - BlockNum: 71, - BlockPos: 0, - LeafType: agglayer.LeafTypeAsset.Uint8(), - OriginNetwork: 1, - }, - }, nil}, - getClaims: []interface{}{[]bridgesync.Claim{ - { - IsMessage: false, - GlobalIndex: big.NewInt(1), - }, - }, nil}, - getInfoByGlobalExitRoot: []interface{}{&l1infotreesync.L1InfoTreeLeaf{ - L1InfoTreeIndex: 1, - BlockNumber: 1, - BlockPosition: 0, - PreviousBlockHash: common.HexToHash("0x123"), - Timestamp: 123456789, - MainnetExitRoot: common.HexToHash("0xccc"), - RollupExitRoot: common.HexToHash("0xddd"), - GlobalExitRoot: common.HexToHash("0xeee"), - }, nil}, - getL1InfoTreeRootByIndex: []interface{}{treeTypes.Root{Hash: common.HexToHash("0xeee")}, nil}, - getL1InfoTreeMerkleProofFromIndexToRoot: []interface{}{treeTypes.Proof{}, errors.New("error getting L1 Info tree merkle proof")}, - expectedError: "error getting L1 Info tree merkle proof for leaf index", - }, - { - name: "send certificate error", - shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, - lastL2BlockProcessed: []interface{}{uint64(99), nil}, - getLastSentCertificate: []interface{}{&aggsendertypes.CertificateInfo{ - Height: 90, - CertificateID: common.HexToHash("0x1121111"), - NewLocalExitRoot: common.HexToHash("0x111122211"), - PreviousLocalExitRoot: &ler1, - FromBlock: 80, - ToBlock: 81, - Status: agglayer.Settled, - }, nil}, - getBridges: []interface{}{[]bridgesync.Bridge{ - { - BlockNum: 81, - BlockPos: 0, - LeafType: agglayer.LeafTypeAsset.Uint8(), - OriginNetwork: 1, - DepositCount: 1, - }, - }, nil}, - getClaims: []interface{}{[]bridgesync.Claim{}, nil}, - getExitRootByIndex: []interface{}{treeTypes.Root{}, nil}, - originNetwork: []interface{}{uint32(1), nil}, - sendCertificate: []interface{}{common.Hash{}, errors.New("error sending certificate")}, - sequencerKey: privateKey, - expectedError: "error sending certificate", - }, - { - name: "store last sent certificate error", - shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, - lastL2BlockProcessed: []interface{}{uint64(109), nil}, - getLastSentCertificate: []interface{}{&aggsendertypes.CertificateInfo{ - Height: 100, - CertificateID: common.HexToHash("0x11121111"), - NewLocalExitRoot: common.HexToHash("0x1211122211"), - FromBlock: 90, - ToBlock: 91, - Status: agglayer.Settled, - }, nil}, - getBridges: []interface{}{[]bridgesync.Bridge{ - { - BlockNum: 91, - BlockPos: 0, - LeafType: agglayer.LeafTypeAsset.Uint8(), - OriginNetwork: 1, - DepositCount: 1, - }, - }, nil}, - getClaims: []interface{}{[]bridgesync.Claim{}, nil}, - getExitRootByIndex: []interface{}{treeTypes.Root{}, nil}, - originNetwork: []interface{}{uint32(1), nil}, - sendCertificate: []interface{}{common.Hash{}, nil}, - saveLastSentCertificate: []interface{}{errors.New("error saving last sent certificate in db")}, - sequencerKey: privateKey, - expectedError: "error saving last sent certificate in db", - }, - { - name: "successful sending of certificate", - shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, - lastL2BlockProcessed: []interface{}{uint64(119), nil}, - getLastSentCertificate: []interface{}{&aggsendertypes.CertificateInfo{ - Height: 110, - CertificateID: common.HexToHash("0x12121111"), - NewLocalExitRoot: common.HexToHash("0x1221122211"), - FromBlock: 100, - ToBlock: 101, - Status: agglayer.Settled, - }, nil}, - getBridges: []interface{}{[]bridgesync.Bridge{ - { - BlockNum: 101, - BlockPos: 0, - LeafType: agglayer.LeafTypeAsset.Uint8(), - OriginNetwork: 1, - DepositCount: 1, - }, - }, nil}, - getClaims: []interface{}{[]bridgesync.Claim{}, nil}, - getExitRootByIndex: []interface{}{treeTypes.Root{}, nil}, - originNetwork: []interface{}{uint32(1), nil}, - sendCertificate: []interface{}{common.Hash{}, nil}, - saveLastSentCertificate: []interface{}{nil}, - sequencerKey: privateKey, - }, - } - - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - aggsender, mockStorage, mockL2Syncer, - mockAggLayerClient, mockL1InfoTreeSyncer := setupTest(tt) - - _, err := aggsender.sendCertificate(context.Background()) - - if tt.expectedError != "" { - require.ErrorContains(t, err, tt.expectedError) - } else { - require.NoError(t, err) - } - - if mockStorage != nil { - mockStorage.AssertExpectations(t) - } - - if mockL2Syncer != nil { - mockL2Syncer.AssertExpectations(t) - } - - if mockAggLayerClient != nil { - mockAggLayerClient.AssertExpectations(t) - } - - if mockL1InfoTreeSyncer != nil { - mockL1InfoTreeSyncer.AssertExpectations(t) - } - }) - } -} - -func TestExtractSignatureData(t *testing.T) { - t.Parallel() - - testR := common.HexToHash("0x1") - testV := common.HexToHash("0x2") - - tests := []struct { - name string - signature []byte - expectedR common.Hash - expectedS common.Hash - expectedOddParity bool - expectedError error - }{ - { - name: "Valid signature - odd parity", - signature: append(append(testR.Bytes(), testV.Bytes()...), 1), - expectedR: testR, - expectedS: testV, - expectedOddParity: true, - expectedError: nil, - }, - { - name: "Valid signature - even parity", - signature: append(append(testR.Bytes(), testV.Bytes()...), 2), - expectedR: testR, - expectedS: testV, - expectedOddParity: false, - expectedError: nil, - }, - { - name: "Invalid signature size", - signature: make([]byte, 64), // Invalid size - expectedError: errInvalidSignatureSize, - }, - } - - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - r, s, isOddParity, err := extractSignatureData(tt.signature) - - if tt.expectedError != nil { - require.Error(t, err) - require.Equal(t, tt.expectedError, err) - } else { - require.NoError(t, err) - require.Equal(t, tt.expectedR, r) - require.Equal(t, tt.expectedS, s) - require.Equal(t, tt.expectedOddParity, isOddParity) - } - }) - } -} - -func TestExploratoryGenerateCert(t *testing.T) { - t.Skip("This test is only for exploratory purposes, to generate json format of the certificate") - - key, err := crypto.GenerateKey() - require.NoError(t, err) - - signature, err := crypto.Sign(common.HexToHash("0x1").Bytes(), key) - require.NoError(t, err) - - r, s, v, err := extractSignatureData(signature) - require.NoError(t, err) - - certificate := &agglayer.SignedCertificate{ - Certificate: &agglayer.Certificate{ - NetworkID: 1, - Height: 1, - PrevLocalExitRoot: common.HexToHash("0x1"), - NewLocalExitRoot: common.HexToHash("0x2"), - BridgeExits: []*agglayer.BridgeExit{ - { - LeafType: agglayer.LeafTypeAsset, - TokenInfo: &agglayer.TokenInfo{ - OriginNetwork: 1, - OriginTokenAddress: common.HexToAddress("0x11"), - }, - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x22"), - Amount: big.NewInt(100), - Metadata: []byte("metadata"), - }, - }, - ImportedBridgeExits: []*agglayer.ImportedBridgeExit{ - { - GlobalIndex: &agglayer.GlobalIndex{ - MainnetFlag: false, - RollupIndex: 1, - LeafIndex: 11, - }, - BridgeExit: &agglayer.BridgeExit{ - LeafType: agglayer.LeafTypeAsset, - TokenInfo: &agglayer.TokenInfo{ - OriginNetwork: 1, - OriginTokenAddress: common.HexToAddress("0x11"), - }, - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x22"), - Amount: big.NewInt(100), - Metadata: []byte("metadata"), - }, - ClaimData: &agglayer.ClaimFromMainnnet{ - ProofLeafMER: &agglayer.MerkleProof{ - Root: common.HexToHash("0x1"), - Proof: [32]common.Hash{}, - }, - ProofGERToL1Root: &agglayer.MerkleProof{ - Root: common.HexToHash("0x3"), - Proof: [32]common.Hash{}, - }, - L1Leaf: &agglayer.L1InfoTreeLeaf{ - L1InfoTreeIndex: 1, - RollupExitRoot: common.HexToHash("0x4"), - MainnetExitRoot: common.HexToHash("0x5"), - Inner: &agglayer.L1InfoTreeLeafInner{ - GlobalExitRoot: common.HexToHash("0x6"), - BlockHash: common.HexToHash("0x7"), - Timestamp: 1231, - }, - }, - }, - }, - }, - }, - Signature: &agglayer.Signature{ - R: r, - S: s, - OddParity: v, - }, - } - - file, err := os.Create("test.json") - require.NoError(t, err) - - defer file.Close() - - encoder := json.NewEncoder(file) - encoder.SetIndent("", " ") - require.NoError(t, encoder.Encode(certificate)) -} - -func TestGetNextHeightAndPreviousLER(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - lastSentCertificateInfo *aggsendertypes.CertificateInfo - lastSettleCertificateInfoCall bool - lastSettleCertificateInfo *aggsendertypes.CertificateInfo - lastSettleCertificateInfoError error - expectedHeight uint64 - expectedPreviousLER common.Hash - expectedError bool - }{ - { - name: "Normal case", - lastSentCertificateInfo: &aggsendertypes.CertificateInfo{ - Height: 10, - NewLocalExitRoot: common.HexToHash("0x123"), - Status: agglayer.Settled, - }, - expectedHeight: 11, - expectedPreviousLER: common.HexToHash("0x123"), - }, - { - name: "First certificate", - lastSentCertificateInfo: nil, - expectedHeight: 0, - expectedPreviousLER: zeroLER, - }, - { - name: "First certificate error, with prevLER", - lastSentCertificateInfo: &aggsendertypes.CertificateInfo{ - Height: 0, - NewLocalExitRoot: common.HexToHash("0x123"), - Status: agglayer.InError, - PreviousLocalExitRoot: &ler1, - }, - expectedHeight: 0, - expectedPreviousLER: ler1, - }, - { - name: "First certificate error, no prevLER", - lastSentCertificateInfo: &aggsendertypes.CertificateInfo{ - Height: 0, - NewLocalExitRoot: common.HexToHash("0x123"), - Status: agglayer.InError, - }, - expectedHeight: 0, - expectedPreviousLER: zeroLER, - }, - { - name: "n certificate error, prevLER", - lastSentCertificateInfo: &aggsendertypes.CertificateInfo{ - Height: 10, - NewLocalExitRoot: common.HexToHash("0x123"), - PreviousLocalExitRoot: &ler1, - Status: agglayer.InError, - }, - expectedHeight: 10, - expectedPreviousLER: ler1, - }, - { - name: "last cert not closed, error", - lastSentCertificateInfo: &aggsendertypes.CertificateInfo{ - Height: 10, - NewLocalExitRoot: common.HexToHash("0x123"), - PreviousLocalExitRoot: &ler1, - Status: agglayer.Pending, - }, - expectedHeight: 10, - expectedPreviousLER: ler1, - expectedError: true, - }, - { - name: "Previous certificate in error, no prevLER", - lastSentCertificateInfo: &aggsendertypes.CertificateInfo{ - Height: 10, - NewLocalExitRoot: common.HexToHash("0x123"), - Status: agglayer.InError, - }, - lastSettleCertificateInfo: &aggsendertypes.CertificateInfo{ - Height: 9, - NewLocalExitRoot: common.HexToHash("0x3456"), - Status: agglayer.Settled, - }, - expectedHeight: 10, - expectedPreviousLER: common.HexToHash("0x3456"), - }, - { - name: "Previous certificate in error, no prevLER. Error getting previous cert", - lastSentCertificateInfo: &aggsendertypes.CertificateInfo{ - Height: 10, - NewLocalExitRoot: common.HexToHash("0x123"), - Status: agglayer.InError, - }, - lastSettleCertificateInfo: nil, - lastSettleCertificateInfoError: errors.New("error getting last settle certificate"), - expectedError: true, - }, - { - name: "Previous certificate in error, no prevLER. prev cert not available on storage", - lastSentCertificateInfo: &aggsendertypes.CertificateInfo{ - Height: 10, - NewLocalExitRoot: common.HexToHash("0x123"), - Status: agglayer.InError, - }, - lastSettleCertificateInfoCall: true, - lastSettleCertificateInfo: nil, - lastSettleCertificateInfoError: nil, - expectedError: true, - }, - { - name: "Previous certificate in error, no prevLER. prev cert not available on storage", - lastSentCertificateInfo: &aggsendertypes.CertificateInfo{ - Height: 10, - NewLocalExitRoot: common.HexToHash("0x123"), - Status: agglayer.InError, - }, - lastSettleCertificateInfo: &aggsendertypes.CertificateInfo{ - Height: 9, - NewLocalExitRoot: common.HexToHash("0x3456"), - Status: agglayer.InError, - }, - lastSettleCertificateInfoError: nil, - expectedError: true, - }, - } - - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - storageMock := mocks.NewAggSenderStorage(t) - aggSender := &AggSender{log: log.WithFields("aggsender-test", "getNextHeightAndPreviousLER"), storage: storageMock} - if tt.lastSettleCertificateInfoCall || tt.lastSettleCertificateInfo != nil || tt.lastSettleCertificateInfoError != nil { - storageMock.EXPECT().GetCertificateByHeight(mock.Anything).Return(tt.lastSettleCertificateInfo, tt.lastSettleCertificateInfoError).Once() - } - height, previousLER, err := aggSender.getNextHeightAndPreviousLER(tt.lastSentCertificateInfo) - if tt.expectedError { - require.Error(t, err) - } else { - require.NoError(t, err) - require.Equal(t, tt.expectedHeight, height) - require.Equal(t, tt.expectedPreviousLER, previousLER) - } - }) - } -} - -func TestSendCertificate_NoClaims(t *testing.T) { - privateKey, err := crypto.GenerateKey() - require.NoError(t, err) - - ctx := context.Background() - mockStorage := mocks.NewAggSenderStorage(t) - mockL2Syncer := mocks.NewL2BridgeSyncer(t) - mockAggLayerClient := agglayer.NewAgglayerClientMock(t) - mockL1InfoTreeSyncer := mocks.NewL1InfoTreeSyncer(t) - - aggSender := &AggSender{ - log: log.WithFields("aggsender-test", "no claims test"), - storage: mockStorage, - l2Syncer: mockL2Syncer, - aggLayerClient: mockAggLayerClient, - l1infoTreeSyncer: mockL1InfoTreeSyncer, - sequencerKey: privateKey, - cfg: Config{}, - } - - mockStorage.On("GetCertificatesByStatus", agglayer.NonSettledStatuses).Return([]*aggsendertypes.CertificateInfo{}, nil).Once() - mockStorage.On("GetLastSentCertificate").Return(&aggsendertypes.CertificateInfo{ - NewLocalExitRoot: common.HexToHash("0x123"), - Height: 1, - FromBlock: 0, - ToBlock: 10, - Status: agglayer.Settled, - }, nil).Once() - mockStorage.On("SaveLastSentCertificate", mock.Anything, mock.Anything).Return(nil).Once() - mockL2Syncer.On("GetLastProcessedBlock", mock.Anything).Return(uint64(50), nil) - mockL2Syncer.On("GetBridgesPublished", mock.Anything, uint64(11), uint64(50)).Return([]bridgesync.Bridge{ - { - BlockNum: 30, - BlockPos: 0, - LeafType: agglayer.LeafTypeAsset.Uint8(), - OriginNetwork: 1, - OriginAddress: common.HexToAddress("0x1"), - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x2"), - Amount: big.NewInt(100), - Metadata: []byte("metadata"), - DepositCount: 1, - }, - }, nil) - mockL2Syncer.On("GetClaims", mock.Anything, uint64(11), uint64(50)).Return([]bridgesync.Claim{}, nil) - mockL2Syncer.On("GetExitRootByIndex", mock.Anything, uint32(1)).Return(treeTypes.Root{}, nil).Once() - mockL2Syncer.On("OriginNetwork").Return(uint32(1), nil).Once() - mockAggLayerClient.On("SendCertificate", mock.Anything).Return(common.Hash{}, nil).Once() - - signedCertificate, err := aggSender.sendCertificate(ctx) - require.NoError(t, err) - require.NotNil(t, signedCertificate) - require.NotNil(t, signedCertificate.Signature) - require.NotNil(t, signedCertificate.Certificate) - require.NotNil(t, signedCertificate.Certificate.ImportedBridgeExits) - require.Len(t, signedCertificate.Certificate.BridgeExits, 1) - - mockStorage.AssertExpectations(t) - mockL2Syncer.AssertExpectations(t) - mockAggLayerClient.AssertExpectations(t) - mockL1InfoTreeSyncer.AssertExpectations(t) -} - -func TestExtractFromCertificateMetadataToBlock(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - metadata common.Hash - expected aggsendertypes.CertificateMetadata - }{ - { - name: "Valid metadata", - metadata: aggsendertypes.NewCertificateMetadata(0, 1000, 123567890).ToHash(), - expected: aggsendertypes.CertificateMetadata{ - Version: 1, - FromBlock: 0, - Offset: 1000, - CreatedAt: 123567890, - }, - }, - { - name: "Zero metadata", - metadata: aggsendertypes.NewCertificateMetadata(0, 0, 0).ToHash(), - expected: aggsendertypes.CertificateMetadata{ - Version: 1, - FromBlock: 0, - Offset: 0, - CreatedAt: 0, - }, - }, - } - - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - result := *aggsendertypes.NewCertificateMetadataFromHash(tt.metadata) - require.Equal(t, tt.expected, result) - }) - } -} - -func TestCheckLastCertificateFromAgglayer_ErrorAggLayer(t *testing.T) { - testData := newAggsenderTestData(t, testDataFlagMockStorage) - testData.l2syncerMock.EXPECT().OriginNetwork().Return(networkIDTest).Once() - testData.agglayerClientMock.EXPECT().GetLatestKnownCertificateHeader(networkIDTest).Return(nil, fmt.Errorf("unittest error")).Once() - - err := testData.sut.checkLastCertificateFromAgglayer(testData.ctx) - - require.Error(t, err) -} - -func TestCheckLastCertificateFromAgglayer_ErrorStorageGetLastSentCertificate(t *testing.T) { - testData := newAggsenderTestData(t, testDataFlagMockStorage) - testData.l2syncerMock.EXPECT().OriginNetwork().Return(networkIDTest).Once() - testData.agglayerClientMock.EXPECT().GetLatestKnownCertificateHeader(networkIDTest).Return(nil, nil).Once() - testData.storageMock.EXPECT().GetLastSentCertificate().Return(nil, fmt.Errorf("unittest error")) - - err := testData.sut.checkLastCertificateFromAgglayer(testData.ctx) - - require.Error(t, err) -} - -// TestCheckLastCertificateFromAgglayer_Case1NoCerts -// CASE 1: No certificates in local storage and agglayer -// Aggsender and agglayer are empty so it's ok -func TestCheckLastCertificateFromAgglayer_Case1NoCerts(t *testing.T) { - testData := newAggsenderTestData(t, testDataFlagNone) - testData.l2syncerMock.EXPECT().OriginNetwork().Return(networkIDTest).Once() - testData.agglayerClientMock.EXPECT().GetLatestKnownCertificateHeader(networkIDTest).Return(nil, nil).Once() - - err := testData.sut.checkLastCertificateFromAgglayer(testData.ctx) - - require.NoError(t, err) -} - -// TestCheckLastCertificateFromAgglayer_Case2NoCertLocalCertRemote -// CASE 2: No certificates in local storage but agglayer has one -// The local DB is empty and we set the lastCert reported by AggLayer -func TestCheckLastCertificateFromAgglayer_Case2NoCertLocalCertRemote(t *testing.T) { - testData := newAggsenderTestData(t, testDataFlagNone) - testData.l2syncerMock.EXPECT().OriginNetwork().Return(networkIDTest).Once() - testData.agglayerClientMock.EXPECT().GetLatestKnownCertificateHeader(networkIDTest). - Return(certInfoToCertHeader(t, &testData.testCerts[0], networkIDTest), nil).Once() - - err := testData.sut.checkLastCertificateFromAgglayer(testData.ctx) - - require.NoError(t, err) - localCert, err := testData.sut.storage.GetLastSentCertificate() - require.NoError(t, err) - require.Equal(t, testData.testCerts[0].CertificateID, localCert.CertificateID) -} - -// TestCheckLastCertificateFromAgglayer_Case2NoCertLocalCertRemoteErrorStorage -// sub case of previous one that fails to update local storage -func TestCheckLastCertificateFromAgglayer_Case2NoCertLocalCertRemoteErrorStorage(t *testing.T) { - testData := newAggsenderTestData(t, testDataFlagMockStorage) - testData.l2syncerMock.EXPECT().OriginNetwork().Return(networkIDTest).Once() - testData.agglayerClientMock.EXPECT().GetLatestKnownCertificateHeader(networkIDTest). - Return(certInfoToCertHeader(t, &testData.testCerts[0], networkIDTest), nil).Once() - testData.storageMock.EXPECT().GetLastSentCertificate().Return(nil, nil) - testData.storageMock.EXPECT().SaveLastSentCertificate(mock.Anything, mock.Anything).Return(errTest).Once() - err := testData.sut.checkLastCertificateFromAgglayer(testData.ctx) - - require.Error(t, err) -} - -// CASE 2.1: certificate in storage but not in agglayer -// sub case of previous one that fails to update local storage -func TestCheckLastCertificateFromAgglayer_Case2_1NoCertRemoteButCertLocal(t *testing.T) { - testData := newAggsenderTestData(t, testDataFlagMockStorage) - testData.l2syncerMock.EXPECT().OriginNetwork().Return(networkIDTest).Once() - testData.agglayerClientMock.EXPECT().GetLatestKnownCertificateHeader(networkIDTest). - Return(nil, nil).Once() - testData.storageMock.EXPECT().GetLastSentCertificate().Return(&testData.testCerts[0], nil) - err := testData.sut.checkLastCertificateFromAgglayer(testData.ctx) - - require.Error(t, err) -} - -// CASE 3.1: the certificate on the agglayer has less height than the one stored in the local storage -func TestCheckLastCertificateFromAgglayer_Case3_1LessHeight(t *testing.T) { - testData := newAggsenderTestData(t, testDataFlagMockStorage) - testData.l2syncerMock.EXPECT().OriginNetwork().Return(networkIDTest).Once() - testData.agglayerClientMock.EXPECT().GetLatestKnownCertificateHeader(networkIDTest). - Return(certInfoToCertHeader(t, &testData.testCerts[0], networkIDTest), nil).Once() - testData.storageMock.EXPECT().GetLastSentCertificate().Return(&testData.testCerts[1], nil) - - err := testData.sut.checkLastCertificateFromAgglayer(testData.ctx) - - require.ErrorContains(t, err, "recovery: the last certificate in the agglayer has less height (1) than the one in the local storage (2)") -} - -// CASE 3.2: AggSender and AggLayer not same height. AggLayer has a new certificate -func TestCheckLastCertificateFromAgglayer_Case3_2Mismatch(t *testing.T) { - testData := newAggsenderTestData(t, testDataFlagMockStorage) - testData.l2syncerMock.EXPECT().OriginNetwork().Return(networkIDTest).Once() - testData.agglayerClientMock.EXPECT().GetLatestKnownCertificateHeader(networkIDTest). - Return(certInfoToCertHeader(t, &testData.testCerts[1], networkIDTest), nil).Once() - testData.storageMock.EXPECT().GetLastSentCertificate().Return(&testData.testCerts[0], nil) - testData.storageMock.EXPECT().SaveLastSentCertificate(mock.Anything, mock.Anything).Return(nil).Once() - - err := testData.sut.checkLastCertificateFromAgglayer(testData.ctx) - - require.NoError(t, err) -} - -// CASE 4: AggSender and AggLayer not same certificateID -func TestCheckLastCertificateFromAgglayer_Case4Mismatch(t *testing.T) { - testData := newAggsenderTestData(t, testDataFlagMockStorage) - testData.l2syncerMock.EXPECT().OriginNetwork().Return(networkIDTest).Once() - testData.agglayerClientMock.EXPECT().GetLatestKnownCertificateHeader(networkIDTest). - Return(certInfoToCertHeader(t, &testData.testCerts[0], networkIDTest), nil).Once() - testData.storageMock.EXPECT().GetLastSentCertificate().Return(&testData.testCerts[1], nil) - - err := testData.sut.checkLastCertificateFromAgglayer(testData.ctx) - - require.Error(t, err) -} - -// CASE 5: AggSender and AggLayer same certificateID and same status -func TestCheckLastCertificateFromAgglayer_Case5SameStatus(t *testing.T) { - testData := newAggsenderTestData(t, testDataFlagMockStorage) - testData.l2syncerMock.EXPECT().OriginNetwork().Return(networkIDTest).Once() - testData.agglayerClientMock.EXPECT().GetLatestKnownCertificateHeader(networkIDTest). - Return(certInfoToCertHeader(t, &testData.testCerts[0], networkIDTest), nil).Once() - testData.storageMock.EXPECT().GetLastSentCertificate().Return(&testData.testCerts[0], nil) - - err := testData.sut.checkLastCertificateFromAgglayer(testData.ctx) - - require.NoError(t, err) -} - -// CASE 5: AggSender and AggLayer same certificateID and differ on status -func TestCheckLastCertificateFromAgglayer_Case5UpdateStatus(t *testing.T) { - testData := newAggsenderTestData(t, testDataFlagMockStorage) - testData.l2syncerMock.EXPECT().OriginNetwork().Return(networkIDTest).Once() - aggLayerCert := certInfoToCertHeader(t, &testData.testCerts[0], networkIDTest) - aggLayerCert.Status = agglayer.Settled - testData.agglayerClientMock.EXPECT().GetLatestKnownCertificateHeader(networkIDTest). - Return(aggLayerCert, nil).Once() - testData.storageMock.EXPECT().GetLastSentCertificate().Return(&testData.testCerts[0], nil) - testData.storageMock.EXPECT().UpdateCertificate(mock.Anything, mock.Anything).Return(nil).Once() - - err := testData.sut.checkLastCertificateFromAgglayer(testData.ctx) - - require.NoError(t, err) -} - -// CASE 4: AggSender and AggLayer same certificateID and differ on status but fails update -func TestCheckLastCertificateFromAgglayer_Case4ErrorUpdateStatus(t *testing.T) { - testData := newAggsenderTestData(t, testDataFlagMockStorage) - testData.l2syncerMock.EXPECT().OriginNetwork().Return(networkIDTest).Once() - aggLayerCert := certInfoToCertHeader(t, &testData.testCerts[0], networkIDTest) - aggLayerCert.Status = agglayer.Settled - testData.agglayerClientMock.EXPECT().GetLatestKnownCertificateHeader(networkIDTest). - Return(aggLayerCert, nil).Once() - testData.storageMock.EXPECT().GetLastSentCertificate().Return(&testData.testCerts[0], nil) - testData.storageMock.EXPECT().UpdateCertificate(mock.Anything, mock.Anything).Return(errTest).Once() - - err := testData.sut.checkLastCertificateFromAgglayer(testData.ctx) - - require.Error(t, err) -} - -func TestLimitSize_FirstOneFit(t *testing.T) { - testData := newAggsenderTestData(t, testDataFlagMockStorage) - certParams := &aggsendertypes.CertificateBuildParams{ - FromBlock: uint64(1), - ToBlock: uint64(20), - Bridges: NewBridgesData(t, 1, []uint64{1}), - } - newCert, err := testData.sut.limitCertSize(certParams) - require.NoError(t, err) - require.Equal(t, certParams, newCert) -} - -func TestLimitSize_FirstMinusOneFit(t *testing.T) { - testData := newAggsenderTestData(t, testDataFlagMockStorage) - testData.sut.cfg.MaxCertSize = (aggsendertypes.EstimatedSizeBridgeExit * 3) + 1 - certParams := &aggsendertypes.CertificateBuildParams{ - FromBlock: uint64(1), - ToBlock: uint64(20), - Bridges: NewBridgesData(t, 0, []uint64{19, 19, 19, 20}), - } - newCert, err := testData.sut.limitCertSize(certParams) - require.NoError(t, err) - require.Equal(t, uint64(19), newCert.ToBlock) -} - -func TestLimitSize_NoWayToFitInMaxSize(t *testing.T) { - testData := newAggsenderTestData(t, testDataFlagMockStorage) - testData.sut.cfg.MaxCertSize = (aggsendertypes.EstimatedSizeBridgeExit * 2) + 1 - certParams := &aggsendertypes.CertificateBuildParams{ - FromBlock: uint64(1), - ToBlock: uint64(20), - Bridges: NewBridgesData(t, 0, []uint64{19, 19, 19, 20}), - } - newCert, err := testData.sut.limitCertSize(certParams) - require.NoError(t, err) - require.Equal(t, uint64(19), newCert.ToBlock) -} - -func TestLimitSize_MinNumBlocks(t *testing.T) { - testData := newAggsenderTestData(t, testDataFlagMockStorage) - testData.sut.cfg.MaxCertSize = (aggsendertypes.EstimatedSizeBridgeExit * 2) + 1 - certParams := &aggsendertypes.CertificateBuildParams{ - FromBlock: uint64(1), - ToBlock: uint64(2), - Bridges: NewBridgesData(t, 0, []uint64{1, 1, 1, 2, 2, 2}), - } - newCert, err := testData.sut.limitCertSize(certParams) - require.NoError(t, err) - require.Equal(t, uint64(1), newCert.ToBlock) -} - -func TestGetLastSentBlockAndRetryCount(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - lastSentCertificateInfo *aggsendertypes.CertificateInfo - expectedBlock uint64 - expectedRetryCount int - }{ - { - name: "No last sent certificate", - lastSentCertificateInfo: nil, - expectedBlock: 0, - expectedRetryCount: 0, - }, - { - name: "Last sent certificate with no error", - lastSentCertificateInfo: &aggsendertypes.CertificateInfo{ - ToBlock: 10, - Status: agglayer.Settled, - }, - expectedBlock: 10, - expectedRetryCount: 0, - }, - { - name: "Last sent certificate with error and non-zero FromBlock", - lastSentCertificateInfo: &aggsendertypes.CertificateInfo{ - FromBlock: 5, - ToBlock: 10, - Status: agglayer.InError, - RetryCount: 1, - }, - expectedBlock: 4, - expectedRetryCount: 2, - }, - { - name: "Last sent certificate with error and zero FromBlock", - lastSentCertificateInfo: &aggsendertypes.CertificateInfo{ - FromBlock: 0, - ToBlock: 10, - Status: agglayer.InError, - RetryCount: 1, - }, - expectedBlock: 10, - expectedRetryCount: 2, - }, - } - - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - block, retryCount := getLastSentBlockAndRetryCount(tt.lastSentCertificateInfo) - - require.Equal(t, tt.expectedBlock, block) - require.Equal(t, tt.expectedRetryCount, retryCount) - }) - } -} - -type testDataFlags = int - -const ( - testDataFlagNone testDataFlags = 0 - testDataFlagMockStorage testDataFlags = 1 -) - -type aggsenderTestData struct { - ctx context.Context - agglayerClientMock *agglayer.AgglayerClientMock - l2syncerMock *mocks.L2BridgeSyncer - l1InfoTreeSyncerMock *mocks.L1InfoTreeSyncer - storageMock *mocks.AggSenderStorage - sut *AggSender - testCerts []aggsendertypes.CertificateInfo -} - -func NewBridgesData(t *testing.T, num int, blockNum []uint64) []bridgesync.Bridge { - t.Helper() - if num == 0 { - num = len(blockNum) - } - res := make([]bridgesync.Bridge, 0) - for i := 0; i < num; i++ { - res = append(res, bridgesync.Bridge{ - BlockNum: blockNum[i%len(blockNum)], - BlockPos: 0, - LeafType: agglayer.LeafTypeAsset.Uint8(), - OriginNetwork: 1, - }) - } - return res -} - -func NewClaimData(t *testing.T, num int, blockNum []uint64) []bridgesync.Claim { - t.Helper() - if num == 0 { - num = len(blockNum) - } - res := make([]bridgesync.Claim, 0) - for i := 0; i < num; i++ { - res = append(res, bridgesync.Claim{ - BlockNum: blockNum[i%len(blockNum)], - BlockPos: 0, - }) - } - return res -} - -func certInfoToCertHeader(t *testing.T, certInfo *aggsendertypes.CertificateInfo, networkID uint32) *agglayer.CertificateHeader { - t.Helper() - if certInfo == nil { - return nil - } - return &agglayer.CertificateHeader{ - Height: certInfo.Height, - NetworkID: networkID, - CertificateID: certInfo.CertificateID, - NewLocalExitRoot: certInfo.NewLocalExitRoot, - Status: agglayer.Pending, - Metadata: aggsendertypes.NewCertificateMetadata( - certInfo.FromBlock, - uint32(certInfo.FromBlock-certInfo.ToBlock), - certInfo.CreatedAt, - ).ToHash(), - } -} - -func newAggsenderTestData(t *testing.T, creationFlags testDataFlags) *aggsenderTestData { - t.Helper() - l2syncerMock := mocks.NewL2BridgeSyncer(t) - agglayerClientMock := agglayer.NewAgglayerClientMock(t) - l1InfoTreeSyncerMock := mocks.NewL1InfoTreeSyncer(t) - logger := log.WithFields("aggsender-test", "checkLastCertificateFromAgglayer") - var storageMock *mocks.AggSenderStorage - var storage db.AggSenderStorage - var err error - if creationFlags&testDataFlagMockStorage != 0 { - storageMock = mocks.NewAggSenderStorage(t) - storage = storageMock - } else { - dbPath := path.Join(t.TempDir(), "newAggsenderTestData.sqlite") - storageConfig := db.AggSenderSQLStorageConfig{ - DBPath: dbPath, - KeepCertificatesHistory: true, - } - storage, err = db.NewAggSenderSQLStorage(logger, storageConfig) - require.NoError(t, err) - } - - ctx := context.TODO() - sut := &AggSender{ - log: logger, - l2Syncer: l2syncerMock, - aggLayerClient: agglayerClientMock, - storage: storage, - l1infoTreeSyncer: l1InfoTreeSyncerMock, - cfg: Config{ - MaxCertSize: 1024 * 1024, - }, - } - testCerts := []aggsendertypes.CertificateInfo{ - { - Height: 1, - CertificateID: common.HexToHash("0x1"), - NewLocalExitRoot: common.HexToHash("0x2"), - Status: agglayer.Pending, - }, - { - Height: 2, - CertificateID: common.HexToHash("0x1a111"), - NewLocalExitRoot: common.HexToHash("0x2a2"), - Status: agglayer.Pending, - }, - } - - return &aggsenderTestData{ - ctx: ctx, - agglayerClientMock: agglayerClientMock, - l2syncerMock: l2syncerMock, - l1InfoTreeSyncerMock: l1InfoTreeSyncerMock, - storageMock: storageMock, - sut: sut, - testCerts: testCerts, - } -} diff --git a/aggsender/block_notifier_polling.go b/aggsender/block_notifier_polling.go deleted file mode 100644 index dce860e85..000000000 --- a/aggsender/block_notifier_polling.go +++ /dev/null @@ -1,228 +0,0 @@ -package aggsender - -import ( - "context" - "fmt" - "math/big" - "sync" - "time" - - "github.com/0xPolygon/cdk/aggsender/types" - "github.com/0xPolygon/cdk/etherman" -) - -var ( - timeNowFunc = time.Now -) - -const ( - AutomaticBlockInterval = time.Second * 0 - // minBlockInterval is the minimum interval at which the AggSender will check for new blocks - minBlockInterval = time.Second - // maxBlockInterval is the maximum interval at which the AggSender will check for new blocks - maxBlockInterval = time.Minute - // Percentage period of reach the next block - percentForNextBlock = 80 -) - -type ConfigBlockNotifierPolling struct { - // BlockFinalityType is the finality of the block to be notified - BlockFinalityType etherman.BlockNumberFinality - // CheckNewBlockInterval is the interval at which the AggSender will check for new blocks - // if is 0 it will be calculated automatically - CheckNewBlockInterval time.Duration -} - -type BlockNotifierPolling struct { - ethClient types.EthClient - blockFinality *big.Int - logger types.Logger - config ConfigBlockNotifierPolling - mu sync.Mutex - lastStatus *blockNotifierPollingInternalStatus - types.GenericSubscriber[types.EventNewBlock] -} - -// NewBlockNotifierPolling creates a new BlockNotifierPolling. -// if param `subscriber` is nil a new GenericSubscriberImpl[types.EventNewBlock] will be created. -// To use this class you need to subscribe and each time that a new block appear the subscriber -// will be notified through the channel. (check unit tests TestExploratoryBlockNotifierPolling -// for more information) -func NewBlockNotifierPolling(ethClient types.EthClient, - config ConfigBlockNotifierPolling, - logger types.Logger, - subscriber types.GenericSubscriber[types.EventNewBlock]) (*BlockNotifierPolling, error) { - if subscriber == nil { - subscriber = NewGenericSubscriberImpl[types.EventNewBlock]() - } - finality, err := config.BlockFinalityType.ToBlockNum() - if err != nil { - return nil, fmt.Errorf("failed to convert block finality type to block number: %w", err) - } - - return &BlockNotifierPolling{ - ethClient: ethClient, - blockFinality: finality, - logger: logger, - config: config, - GenericSubscriber: subscriber, - }, nil -} - -func (b *BlockNotifierPolling) String() string { - status := b.getGlobalStatus() - res := fmt.Sprintf("BlockNotifierPolling: finality=%s", b.config.BlockFinalityType) - if status != nil { - res += fmt.Sprintf(" lastBlockSeen=%d", status.lastBlockSeen) - } else { - res += " lastBlockSeen=none" - } - return res -} - -// Start starts the BlockNotifierPolling blocking the current goroutine -func (b *BlockNotifierPolling) Start(ctx context.Context) { - ticker := time.NewTimer(b.config.CheckNewBlockInterval) - defer ticker.Stop() - - var status *blockNotifierPollingInternalStatus = nil - - for { - select { - case <-ctx.Done(): - return - case <-ticker.C: - delay, newStatus, event := b.step(ctx, status) - status = newStatus - b.setGlobalStatus(status) - if event != nil { - b.Publish(*event) - } - ticker.Reset(delay) - } - } -} - -func (b *BlockNotifierPolling) setGlobalStatus(status *blockNotifierPollingInternalStatus) { - b.mu.Lock() - defer b.mu.Unlock() - b.lastStatus = status -} - -func (b *BlockNotifierPolling) getGlobalStatus() *blockNotifierPollingInternalStatus { - b.mu.Lock() - defer b.mu.Unlock() - if b.lastStatus == nil { - return nil - } - copyStatus := *b.lastStatus - return ©Status -} - -// step is the main function of the BlockNotifierPolling, it checks if there is a new block -// it returns: -// - the delay for the next check -// - the new status -// - the new even to emit or nil -func (b *BlockNotifierPolling) step(ctx context.Context, - previousState *blockNotifierPollingInternalStatus) (time.Duration, - *blockNotifierPollingInternalStatus, *types.EventNewBlock) { - currentBlock, err := b.ethClient.HeaderByNumber(ctx, b.blockFinality) - if err == nil && currentBlock == nil { - err = fmt.Errorf("failed to get block number: return a nil block") - } - if err != nil { - b.logger.Errorf("Failed to get block number: %v", err) - newState := previousState.clear() - return b.nextBlockRequestDelay(nil, err), newState, nil - } - if previousState == nil { - newState := previousState.intialBlock(currentBlock.Number.Uint64()) - return b.nextBlockRequestDelay(previousState, nil), newState, nil - } - if currentBlock.Number.Uint64() == previousState.lastBlockSeen { - // No new block, so no changes on state - return b.nextBlockRequestDelay(previousState, nil), previousState, nil - } - // New blockNumber! - eventToEmit := &types.EventNewBlock{ - BlockNumber: currentBlock.Number.Uint64(), - BlockFinalityType: b.config.BlockFinalityType, - } - if previousState.lastBlockSeen > currentBlock.Number.Uint64() { - b.logger.Warnf("Block number decreased [finality:%s]: %d -> %d", - b.config.BlockFinalityType, previousState.lastBlockSeen, currentBlock.Number.Uint64()) - // It start from scratch because something fails in calculation of block period - newState := previousState.intialBlock(currentBlock.Number.Uint64()) - return b.nextBlockRequestDelay(nil, nil), newState, eventToEmit - } - - if currentBlock.Number.Uint64()-previousState.lastBlockSeen != 1 { - b.logger.Warnf("Missed block(s) [finality:%s]: %d -> %d", - b.config.BlockFinalityType, previousState.lastBlockSeen, currentBlock.Number.Uint64()) - // It start from scratch because something fails in calculation of block period - newState := previousState.intialBlock(currentBlock.Number.Uint64()) - return b.nextBlockRequestDelay(nil, nil), newState, eventToEmit - } - newState := previousState.incommingNewBlock(currentBlock.Number.Uint64()) - b.logger.Debugf("New block seen [finality:%s]: %d. blockRate:%s", - b.config.BlockFinalityType, currentBlock.Number.Uint64(), newState.previousBlockTime) - eventToEmit.BlockRate = *newState.previousBlockTime - return b.nextBlockRequestDelay(newState, nil), newState, eventToEmit -} - -func (b *BlockNotifierPolling) nextBlockRequestDelay(status *blockNotifierPollingInternalStatus, - err error) time.Duration { - if b.config.CheckNewBlockInterval != AutomaticBlockInterval { - return b.config.CheckNewBlockInterval - } - // Initial stages wait the minimum interval to increas accuracy - if status == nil || status.previousBlockTime == nil { - return minBlockInterval - } - if err != nil { - // If error we wait twice the min interval - return minBlockInterval * 2 //nolint:mnd // 2 times the interval - } - // we have a previous block time so we can calculate the interval - now := timeNowFunc() - expectedTimeNextBlock := status.lastBlockTime.Add(*status.previousBlockTime) - distanceToNextBlock := expectedTimeNextBlock.Sub(now) - interval := distanceToNextBlock * percentForNextBlock / 100 //nolint:mnd // percent period for reach the next block - return max(minBlockInterval, min(maxBlockInterval, interval)) -} - -type blockNotifierPollingInternalStatus struct { - lastBlockSeen uint64 - lastBlockTime time.Time // first appear of block lastBlockSeen - previousBlockTime *time.Duration // time of the previous block to appear -} - -func (s *blockNotifierPollingInternalStatus) String() string { - if s == nil { - return "nil" - } - return fmt.Sprintf("lastBlockSeen=%d lastBlockTime=%s previousBlockTime=%s", - s.lastBlockSeen, s.lastBlockTime, s.previousBlockTime) -} - -func (s *blockNotifierPollingInternalStatus) clear() *blockNotifierPollingInternalStatus { - return &blockNotifierPollingInternalStatus{} -} - -func (s *blockNotifierPollingInternalStatus) intialBlock(block uint64) *blockNotifierPollingInternalStatus { - return &blockNotifierPollingInternalStatus{ - lastBlockSeen: block, - lastBlockTime: timeNowFunc(), - } -} - -func (s *blockNotifierPollingInternalStatus) incommingNewBlock(block uint64) *blockNotifierPollingInternalStatus { - now := timeNowFunc() - timePreviousBlock := now.Sub(s.lastBlockTime) - return &blockNotifierPollingInternalStatus{ - lastBlockSeen: block, - lastBlockTime: now, - previousBlockTime: &timePreviousBlock, - } -} diff --git a/aggsender/block_notifier_polling_test.go b/aggsender/block_notifier_polling_test.go deleted file mode 100644 index b4c4e6296..000000000 --- a/aggsender/block_notifier_polling_test.go +++ /dev/null @@ -1,208 +0,0 @@ -package aggsender - -import ( - "context" - "fmt" - "math/big" - "os" - "testing" - "time" - - "github.com/0xPolygon/cdk/aggsender/mocks" - aggsendertypes "github.com/0xPolygon/cdk/aggsender/types" - "github.com/0xPolygon/cdk/etherman" - "github.com/0xPolygon/cdk/log" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethclient" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" -) - -func TestExploratoryBlockNotifierPolling(t *testing.T) { - t.Skip() - urlRPCL1 := os.Getenv("L1URL") - fmt.Println("URL=", urlRPCL1) - ethClient, err := ethclient.Dial(urlRPCL1) - require.NoError(t, err) - - sut, errSut := NewBlockNotifierPolling(ethClient, - ConfigBlockNotifierPolling{ - BlockFinalityType: etherman.LatestBlock, - }, log.WithFields("test", "test"), nil) - require.NoError(t, errSut) - go sut.Start(context.Background()) - ch := sut.Subscribe("test") - for block := range ch { - fmt.Println(block) - } -} - -func TestBlockNotifierPollingStep(t *testing.T) { - time0 := time.Unix(1731322117, 0) - period0 := time.Second * 10 - period0_80percent := time.Second * 8 - time1 := time0.Add(period0) - tests := []struct { - name string - previousStatus *blockNotifierPollingInternalStatus - HeaderByNumberError bool - HeaderByNumberErrorNumber uint64 - forcedTime time.Time - expectedStatus *blockNotifierPollingInternalStatus - expectedDelay time.Duration - expectedEvent *aggsendertypes.EventNewBlock - }{ - { - name: "initial->receive block", - previousStatus: nil, - HeaderByNumberError: false, - HeaderByNumberErrorNumber: 100, - forcedTime: time0, - expectedStatus: &blockNotifierPollingInternalStatus{ - lastBlockSeen: 100, - lastBlockTime: time0, - }, - expectedDelay: minBlockInterval, - expectedEvent: nil, - }, - { - name: "received block->error", - previousStatus: nil, - HeaderByNumberError: true, - forcedTime: time0, - expectedStatus: &blockNotifierPollingInternalStatus{}, - expectedDelay: minBlockInterval, - expectedEvent: nil, - }, - - { - name: "have block period->receive new block", - previousStatus: &blockNotifierPollingInternalStatus{ - lastBlockSeen: 100, - lastBlockTime: time0, - previousBlockTime: &period0, - }, - HeaderByNumberError: false, - HeaderByNumberErrorNumber: 101, - forcedTime: time1, - expectedStatus: &blockNotifierPollingInternalStatus{ - lastBlockSeen: 101, - lastBlockTime: time1, - previousBlockTime: &period0, - }, - expectedDelay: period0_80percent, - expectedEvent: &aggsendertypes.EventNewBlock{ - BlockNumber: 101, - }, - }, - } - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - testData := newBlockNotifierPollingTestData(t, nil) - - timeNowFunc = func() time.Time { - return tt.forcedTime - } - - if tt.HeaderByNumberError == false { - hdr1 := &types.Header{ - Number: big.NewInt(int64(tt.HeaderByNumberErrorNumber)), - } - testData.ethClientMock.EXPECT().HeaderByNumber(mock.Anything, mock.Anything).Return(hdr1, nil).Once() - } else { - testData.ethClientMock.EXPECT().HeaderByNumber(mock.Anything, mock.Anything).Return(nil, fmt.Errorf("error")).Once() - } - delay, newStatus, event := testData.sut.step(context.TODO(), tt.previousStatus) - require.Equal(t, tt.expectedDelay, delay, "delay") - require.Equal(t, tt.expectedStatus, newStatus, "new_status") - if tt.expectedEvent == nil { - require.Nil(t, event, "send_event") - } else { - require.Equal(t, tt.expectedEvent.BlockNumber, event.BlockNumber, "send_event") - } - }) - } -} - -func TestDelayNoPreviousBLock(t *testing.T) { - testData := newBlockNotifierPollingTestData(t, nil) - status := blockNotifierPollingInternalStatus{ - lastBlockSeen: 100, - } - delay := testData.sut.nextBlockRequestDelay(&status, nil) - require.Equal(t, minBlockInterval, delay) -} - -func TestDelayBLock(t *testing.T) { - testData := newBlockNotifierPollingTestData(t, nil) - pt := time.Second * 10 - status := blockNotifierPollingInternalStatus{ - lastBlockSeen: 100, - previousBlockTime: &pt, - } - delay := testData.sut.nextBlockRequestDelay(&status, nil) - require.Equal(t, minBlockInterval, delay) -} - -func TestNewBlockNotifierPolling(t *testing.T) { - testData := newBlockNotifierPollingTestData(t, nil) - require.NotNil(t, testData.sut) - _, err := NewBlockNotifierPolling(testData.ethClientMock, ConfigBlockNotifierPolling{ - BlockFinalityType: etherman.BlockNumberFinality("invalid"), - }, log.WithFields("test", "test"), nil) - require.Error(t, err) -} - -func TestBlockNotifierPollingString(t *testing.T) { - testData := newBlockNotifierPollingTestData(t, nil) - require.NotEmpty(t, testData.sut.String()) - testData.sut.lastStatus = &blockNotifierPollingInternalStatus{ - lastBlockSeen: 100, - } - require.NotEmpty(t, testData.sut.String()) -} - -func TestBlockNotifierPollingStart(t *testing.T) { - testData := newBlockNotifierPollingTestData(t, nil) - ch := testData.sut.Subscribe("test") - hdr1 := &types.Header{ - Number: big.NewInt(100), - } - testData.ethClientMock.EXPECT().HeaderByNumber(mock.Anything, mock.Anything).Return(hdr1, nil).Once() - hdr2 := &types.Header{ - Number: big.NewInt(101), - } - testData.ethClientMock.EXPECT().HeaderByNumber(mock.Anything, mock.Anything).Return(hdr2, nil).Once() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - go testData.sut.Start(ctx) - block := <-ch - require.NotNil(t, block) - require.Equal(t, uint64(101), block.BlockNumber) -} - -type blockNotifierPollingTestData struct { - sut *BlockNotifierPolling - ethClientMock *mocks.EthClient - ctx context.Context -} - -func newBlockNotifierPollingTestData(t *testing.T, config *ConfigBlockNotifierPolling) blockNotifierPollingTestData { - t.Helper() - if config == nil { - config = &ConfigBlockNotifierPolling{ - BlockFinalityType: etherman.LatestBlock, - CheckNewBlockInterval: 0, - } - } - ethClientMock := mocks.NewEthClient(t) - logger := log.WithFields("test", "BlockNotifierPolling") - sut, err := NewBlockNotifierPolling(ethClientMock, *config, logger, nil) - require.NoError(t, err) - return blockNotifierPollingTestData{ - sut: sut, - ethClientMock: ethClientMock, - ctx: context.TODO(), - } -} diff --git a/aggsender/config.go b/aggsender/config.go deleted file mode 100644 index a81c12990..000000000 --- a/aggsender/config.go +++ /dev/null @@ -1,58 +0,0 @@ -package aggsender - -import ( - "fmt" - - "github.com/0xPolygon/cdk/config/types" -) - -// Config is the configuration for the AggSender -type Config struct { - // StoragePath is the path of the sqlite db on which the AggSender will store the data - StoragePath string `mapstructure:"StoragePath"` - // AggLayerURL is the URL of the AggLayer - AggLayerURL string `mapstructure:"AggLayerURL"` - // AggsenderPrivateKey is the private key which is used to sign certificates - AggsenderPrivateKey types.KeystoreFileConfig `mapstructure:"AggsenderPrivateKey"` - // URLRPCL2 is the URL of the L2 RPC node - URLRPCL2 string `mapstructure:"URLRPCL2"` - // BlockFinality indicates which finality follows AggLayer - BlockFinality string `jsonschema:"enum=LatestBlock, enum=SafeBlock, enum=PendingBlock, enum=FinalizedBlock, enum=EarliestBlock" mapstructure:"BlockFinality"` //nolint:lll - // EpochNotificationPercentage indicates the percentage of the epoch - // the AggSender should send the certificate - // 0 -> Begin - // 50 -> Middle - EpochNotificationPercentage uint `mapstructure:"EpochNotificationPercentage"` - // SaveCertificatesToFilesPath if != "" tells the AggSender to save the certificates to a file in this path - SaveCertificatesToFilesPath string `mapstructure:"SaveCertificatesToFilesPath"` - - // MaxRetriesStoreCertificate is the maximum number of retries to store a certificate - // 0 is infinite - MaxRetriesStoreCertificate int `mapstructure:"MaxRetriesStoreCertificate"` - // DelayBeetweenRetries is the delay between retries: - // is used on store Certificate and also in initial check - DelayBeetweenRetries types.Duration `mapstructure:"DelayBeetweenRetries"` - // KeepCertificatesHistory is a flag to keep the certificates history on storage - KeepCertificatesHistory bool `mapstructure:"KeepCertificatesHistory"` - // MaxCertSize is the maximum size of the certificate (the emitted certificate can be bigger that this size) - // 0 is infinite - MaxCertSize uint `mapstructure:"MaxCertSize"` - // BridgeMetadataAsHash is a flag to import the bridge metadata as hash - BridgeMetadataAsHash bool `mapstructure:"BridgeMetadataAsHash"` - // DryRun is a flag to enable the dry run mode - // in this mode the AggSender will not send the certificates to Agglayer - DryRun bool `mapstructure:"DryRun"` - // EnableRPC is a flag to enable the RPC for aggsender - EnableRPC bool `mapstructure:"EnableRPC"` -} - -// String returns a string representation of the Config -func (c Config) String() string { - return "StoragePath: " + c.StoragePath + "\n" + - "AggLayerURL: " + c.AggLayerURL + "\n" + - "AggsenderPrivateKeyPath: " + c.AggsenderPrivateKey.Path + "\n" + - "URLRPCL2: " + c.URLRPCL2 + "\n" + - "BlockFinality: " + c.BlockFinality + "\n" + - "EpochNotificationPercentage: " + fmt.Sprintf("%d", c.EpochNotificationPercentage) + "\n" + - "SaveCertificatesToFilesPath: " + c.SaveCertificatesToFilesPath + "\n" -} diff --git a/aggsender/db/aggsender_db_storage.go b/aggsender/db/aggsender_db_storage.go deleted file mode 100644 index 3a9a9f12d..000000000 --- a/aggsender/db/aggsender_db_storage.go +++ /dev/null @@ -1,260 +0,0 @@ -package db - -import ( - "context" - "database/sql" - "errors" - "fmt" - "strings" - - "github.com/0xPolygon/cdk/agglayer" - "github.com/0xPolygon/cdk/aggsender/db/migrations" - "github.com/0xPolygon/cdk/aggsender/types" - "github.com/0xPolygon/cdk/db" - "github.com/0xPolygon/cdk/log" - "github.com/ethereum/go-ethereum/common" - "github.com/russross/meddler" -) - -const errWhileRollbackFormat = "error while rolling back tx: %w" - -// AggSenderStorage is the interface that defines the methods to interact with the storage -type AggSenderStorage interface { - // GetCertificateByHeight returns a certificate by its height - GetCertificateByHeight(height uint64) (*types.CertificateInfo, error) - // GetLastSentCertificate returns the last certificate sent to the aggLayer - GetLastSentCertificate() (*types.CertificateInfo, error) - // SaveLastSentCertificate saves the last certificate sent to the aggLayer - SaveLastSentCertificate(ctx context.Context, certificate types.CertificateInfo) error - // DeleteCertificate deletes a certificate from the storage - DeleteCertificate(ctx context.Context, certificateID common.Hash) error - // GetCertificatesByStatus returns a list of certificates by their status - GetCertificatesByStatus(status []agglayer.CertificateStatus) ([]*types.CertificateInfo, error) - // UpdateCertificate updates certificate in db - UpdateCertificate(ctx context.Context, certificate types.CertificateInfo) error -} - -var _ AggSenderStorage = (*AggSenderSQLStorage)(nil) - -// AggSenderSQLStorageConfig is the configuration for the AggSenderSQLStorage -type AggSenderSQLStorageConfig struct { - DBPath string - KeepCertificatesHistory bool -} - -// AggSenderSQLStorage is the struct that implements the AggSenderStorage interface -type AggSenderSQLStorage struct { - logger *log.Logger - db *sql.DB - cfg AggSenderSQLStorageConfig -} - -// NewAggSenderSQLStorage creates a new AggSenderSQLStorage -func NewAggSenderSQLStorage(logger *log.Logger, cfg AggSenderSQLStorageConfig) (*AggSenderSQLStorage, error) { - db, err := db.NewSQLiteDB(cfg.DBPath) - if err != nil { - return nil, err - } - if err := migrations.RunMigrations(logger, db); err != nil { - return nil, err - } - - return &AggSenderSQLStorage{ - db: db, - logger: logger, - cfg: cfg, - }, nil -} - -func (a *AggSenderSQLStorage) GetCertificatesByStatus( - statuses []agglayer.CertificateStatus) ([]*types.CertificateInfo, error) { - query := "SELECT * FROM certificate_info" - args := make([]interface{}, len(statuses)) - - if len(statuses) > 0 { - placeholders := make([]string, len(statuses)) - // Build the WHERE clause for status filtering - for i := range statuses { - placeholders[i] = fmt.Sprintf("$%d", i+1) - args[i] = statuses[i] - } - - // Build the WHERE clause with the joined placeholders - query += " WHERE status IN (" + strings.Join(placeholders, ", ") + ")" - } - - // Add ordering by creation date (oldest first) - query += " ORDER BY height ASC" - - var certificates []*types.CertificateInfo - if err := meddler.QueryAll(a.db, &certificates, query, args...); err != nil { - return nil, err - } - - return certificates, nil -} - -// GetCertificateByHeight returns a certificate by its height -func (a *AggSenderSQLStorage) GetCertificateByHeight(height uint64) (*types.CertificateInfo, error) { - return getCertificateByHeight(a.db, height) -} - -// getCertificateByHeight returns a certificate by its height using the provided db -func getCertificateByHeight(db db.Querier, - height uint64) (*types.CertificateInfo, error) { - var certificateInfo types.CertificateInfo - if err := meddler.QueryRow(db, &certificateInfo, - "SELECT * FROM certificate_info WHERE height = $1;", height); err != nil { - return nil, getSelectQueryError(height, err) - } - - return &certificateInfo, nil -} - -// GetLastSentCertificate returns the last certificate sent to the aggLayer -func (a *AggSenderSQLStorage) GetLastSentCertificate() (*types.CertificateInfo, error) { - var certificateInfo types.CertificateInfo - if err := meddler.QueryRow(a.db, &certificateInfo, - "SELECT * FROM certificate_info ORDER BY height DESC LIMIT 1;"); err != nil { - return nil, getSelectQueryError(0, err) - } - - return &certificateInfo, nil -} - -// SaveLastSentCertificate saves the last certificate sent to the aggLayer -func (a *AggSenderSQLStorage) SaveLastSentCertificate(ctx context.Context, certificate types.CertificateInfo) error { - tx, err := db.NewTx(ctx, a.db) - if err != nil { - return fmt.Errorf("saveLastSentCertificate NewTx. Err: %w", err) - } - shouldRollback := true - defer func() { - if shouldRollback { - if errRllbck := tx.Rollback(); errRllbck != nil { - a.logger.Errorf(errWhileRollbackFormat, errRllbck) - } - } - }() - - cert, err := getCertificateByHeight(tx, certificate.Height) - if err != nil && !errors.Is(err, db.ErrNotFound) { - return fmt.Errorf("saveLastSentCertificate getCertificateByHeight. Err: %w", err) - } - - if cert != nil { - // we already have a certificate with this height - // we need to delete it before inserting the new one - if err = a.moveCertificateToHistoryOrDelete(tx, cert); err != nil { - return fmt.Errorf("saveLastSentCertificate moveCertificateToHistory Err: %w", err) - } - } - - if err = meddler.Insert(tx, "certificate_info", &certificate); err != nil { - return fmt.Errorf("error inserting certificate info: %w", err) - } - - if err = tx.Commit(); err != nil { - return fmt.Errorf("saveLastSentCertificate commit. Err: %w", err) - } - shouldRollback = false - - a.logger.Debugf("inserted certificate - Height: %d. Hash: %s", certificate.Height, certificate.CertificateID) - - return nil -} - -func (a *AggSenderSQLStorage) moveCertificateToHistoryOrDelete(tx db.Querier, - certificate *types.CertificateInfo) error { - if a.cfg.KeepCertificatesHistory { - a.logger.Debugf("moving certificate to history - new CertificateID: %s", certificate.ID()) - if _, err := tx.Exec(`INSERT INTO certificate_info_history SELECT * FROM certificate_info WHERE height = $1;`, - certificate.Height); err != nil { - return fmt.Errorf("error moving certificate to history: %w", err) - } - } - a.logger.Debugf("deleting certificate - CertificateID: %s", certificate.ID()) - if err := deleteCertificate(tx, certificate.CertificateID); err != nil { - return fmt.Errorf("deleteCertificate %s . Error: %w", certificate.ID(), err) - } - - return nil -} - -// DeleteCertificate deletes a certificate from the storage -func (a *AggSenderSQLStorage) DeleteCertificate(ctx context.Context, certificateID common.Hash) error { - tx, err := db.NewTx(ctx, a.db) - if err != nil { - return err - } - defer func() { - if err != nil { - if errRllbck := tx.Rollback(); errRllbck != nil { - a.logger.Errorf(errWhileRollbackFormat, errRllbck) - } - } - }() - - if err = deleteCertificate(tx, certificateID); err != nil { - return err - } - - if err = tx.Commit(); err != nil { - return err - } - a.logger.Debugf("deleted certificate - CertificateID: %s", certificateID) - return nil -} - -// deleteCertificate deletes a certificate from the storage using the provided db -func deleteCertificate(tx db.Querier, certificateID common.Hash) error { - if _, err := tx.Exec(`DELETE FROM certificate_info WHERE certificate_id = $1;`, certificateID.String()); err != nil { - return fmt.Errorf("error deleting certificate info: %w", err) - } - - return nil -} - -// UpdateCertificate updates a certificate -func (a *AggSenderSQLStorage) UpdateCertificate(ctx context.Context, certificate types.CertificateInfo) error { - tx, err := db.NewTx(ctx, a.db) - if err != nil { - return err - } - shouldRollback := true - defer func() { - if shouldRollback { - if errRllbck := tx.Rollback(); errRllbck != nil { - a.logger.Errorf(errWhileRollbackFormat, errRllbck) - } - } - }() - - if _, err = tx.Exec(`UPDATE certificate_info SET status = $1, updated_at = $2 WHERE certificate_id = $3;`, - certificate.Status, certificate.UpdatedAt, certificate.CertificateID.String()); err != nil { - return fmt.Errorf("error updating certificate info: %w", err) - } - if err = tx.Commit(); err != nil { - return err - } - shouldRollback = false - - a.logger.Debugf("updated certificate status - CertificateID: %s", certificate.CertificateID) - - return nil -} - -func getSelectQueryError(height uint64, err error) error { - errToReturn := err - if errors.Is(err, sql.ErrNoRows) { - if height == 0 { - // height 0 is never sent to the aggLayer - // so we don't return an error in this case - errToReturn = nil - } else { - errToReturn = db.ErrNotFound - } - } - - return errToReturn -} diff --git a/aggsender/db/aggsender_db_storage_test.go b/aggsender/db/aggsender_db_storage_test.go deleted file mode 100644 index 912d243c7..000000000 --- a/aggsender/db/aggsender_db_storage_test.go +++ /dev/null @@ -1,427 +0,0 @@ -package db - -import ( - "context" - "encoding/json" - "math/big" - "path" - "testing" - "time" - - "github.com/0xPolygon/cdk/agglayer" - "github.com/0xPolygon/cdk/aggsender/types" - "github.com/0xPolygon/cdk/db" - "github.com/0xPolygon/cdk/log" - "github.com/ethereum/go-ethereum/common" - "github.com/stretchr/testify/require" -) - -func Test_Storage(t *testing.T) { - ctx := context.Background() - - path := path.Join(t.TempDir(), "aggsenderTest_Storage.sqlite") - log.Debugf("sqlite path: %s", path) - cfg := AggSenderSQLStorageConfig{ - DBPath: path, - KeepCertificatesHistory: true, - } - - storage, err := NewAggSenderSQLStorage(log.WithFields("aggsender-db"), cfg) - require.NoError(t, err) - - updateTime := uint32(time.Now().UTC().UnixMilli()) - - t.Run("SaveLastSentCertificate", func(t *testing.T) { - certificate := types.CertificateInfo{ - Height: 1, - CertificateID: common.HexToHash("0x1"), - NewLocalExitRoot: common.HexToHash("0x2"), - FromBlock: 1, - ToBlock: 2, - Status: agglayer.Settled, - CreatedAt: updateTime, - UpdatedAt: updateTime, - } - require.NoError(t, storage.SaveLastSentCertificate(ctx, certificate)) - - certificateFromDB, err := storage.GetCertificateByHeight(certificate.Height) - require.NoError(t, err) - - require.Equal(t, certificate, *certificateFromDB) - require.NoError(t, storage.clean()) - }) - - t.Run("DeleteCertificate", func(t *testing.T) { - certificate := types.CertificateInfo{ - Height: 2, - CertificateID: common.HexToHash("0x3"), - NewLocalExitRoot: common.HexToHash("0x4"), - FromBlock: 3, - ToBlock: 4, - Status: agglayer.Settled, - CreatedAt: updateTime, - UpdatedAt: updateTime, - } - require.NoError(t, storage.SaveLastSentCertificate(ctx, certificate)) - - require.NoError(t, storage.DeleteCertificate(ctx, certificate.CertificateID)) - - certificateFromDB, err := storage.GetCertificateByHeight(certificate.Height) - require.ErrorIs(t, err, db.ErrNotFound) - require.Nil(t, certificateFromDB) - require.NoError(t, storage.clean()) - }) - - t.Run("GetLastSentCertificate", func(t *testing.T) { - // try getting a certificate that doesn't exist - certificateFromDB, err := storage.GetLastSentCertificate() - require.NoError(t, err) - require.Nil(t, certificateFromDB) - - // try getting a certificate that exists - certificate := types.CertificateInfo{ - Height: 3, - CertificateID: common.HexToHash("0x5"), - NewLocalExitRoot: common.HexToHash("0x6"), - FromBlock: 5, - ToBlock: 6, - Status: agglayer.Pending, - CreatedAt: updateTime, - UpdatedAt: updateTime, - } - require.NoError(t, storage.SaveLastSentCertificate(ctx, certificate)) - - certificateFromDB, err = storage.GetLastSentCertificate() - require.NoError(t, err) - require.NotNil(t, certificateFromDB) - require.Equal(t, certificate, *certificateFromDB) - require.NoError(t, storage.clean()) - }) - - t.Run("GetCertificateByHeight", func(t *testing.T) { - // try getting height 0 - certificateFromDB, err := storage.GetCertificateByHeight(0) - require.NoError(t, err) - require.Nil(t, certificateFromDB) - - // try getting a certificate that doesn't exist - certificateFromDB, err = storage.GetCertificateByHeight(4) - require.ErrorIs(t, err, db.ErrNotFound) - require.Nil(t, certificateFromDB) - - // try getting a certificate that exists - certificate := types.CertificateInfo{ - Height: 11, - CertificateID: common.HexToHash("0x17"), - NewLocalExitRoot: common.HexToHash("0x18"), - FromBlock: 17, - ToBlock: 18, - Status: agglayer.Pending, - CreatedAt: updateTime, - UpdatedAt: updateTime, - } - require.NoError(t, storage.SaveLastSentCertificate(ctx, certificate)) - - certificateFromDB, err = storage.GetCertificateByHeight(certificate.Height) - require.NoError(t, err) - require.NotNil(t, certificateFromDB) - require.Equal(t, certificate, *certificateFromDB) - require.NoError(t, storage.clean()) - }) - - t.Run("GetCertificatesByStatus", func(t *testing.T) { - // Insert some certificates with different statuses - certificates := []*types.CertificateInfo{ - { - Height: 7, - CertificateID: common.HexToHash("0x7"), - NewLocalExitRoot: common.HexToHash("0x8"), - FromBlock: 7, - ToBlock: 8, - Status: agglayer.Settled, - CreatedAt: updateTime, - UpdatedAt: updateTime, - }, - { - Height: 9, - CertificateID: common.HexToHash("0x9"), - NewLocalExitRoot: common.HexToHash("0xA"), - FromBlock: 9, - ToBlock: 10, - Status: agglayer.Pending, - CreatedAt: updateTime, - UpdatedAt: updateTime, - }, - { - Height: 11, - CertificateID: common.HexToHash("0xB"), - NewLocalExitRoot: common.HexToHash("0xC"), - FromBlock: 11, - ToBlock: 12, - Status: agglayer.InError, - CreatedAt: updateTime, - UpdatedAt: updateTime, - }, - } - - for _, cert := range certificates { - require.NoError(t, storage.SaveLastSentCertificate(ctx, *cert)) - } - - // Test fetching certificates with status Settled - statuses := []agglayer.CertificateStatus{agglayer.Settled} - certificatesFromDB, err := storage.GetCertificatesByStatus(statuses) - require.NoError(t, err) - require.Len(t, certificatesFromDB, 1) - require.ElementsMatch(t, []*types.CertificateInfo{certificates[0]}, certificatesFromDB) - - // Test fetching certificates with status Pending - statuses = []agglayer.CertificateStatus{agglayer.Pending} - certificatesFromDB, err = storage.GetCertificatesByStatus(statuses) - require.NoError(t, err) - require.Len(t, certificatesFromDB, 1) - require.ElementsMatch(t, []*types.CertificateInfo{certificates[1]}, certificatesFromDB) - - // Test fetching certificates with status InError - statuses = []agglayer.CertificateStatus{agglayer.InError} - certificatesFromDB, err = storage.GetCertificatesByStatus(statuses) - require.NoError(t, err) - require.Len(t, certificatesFromDB, 1) - require.ElementsMatch(t, []*types.CertificateInfo{certificates[2]}, certificatesFromDB) - - // Test fetching certificates with status InError and Pending - statuses = []agglayer.CertificateStatus{agglayer.InError, agglayer.Pending} - certificatesFromDB, err = storage.GetCertificatesByStatus(statuses) - require.NoError(t, err) - require.Len(t, certificatesFromDB, 2) - require.ElementsMatch(t, []*types.CertificateInfo{certificates[1], certificates[2]}, certificatesFromDB) - - require.NoError(t, storage.clean()) - }) - - t.Run("UpdateCertificateStatus", func(t *testing.T) { - // Insert a certificate - certificate := types.CertificateInfo{ - Height: 13, - RetryCount: 1234, - CertificateID: common.HexToHash("0xD"), - NewLocalExitRoot: common.HexToHash("0xE"), - FromBlock: 13, - ToBlock: 14, - Status: agglayer.Pending, - CreatedAt: updateTime, - UpdatedAt: updateTime, - } - require.NoError(t, storage.SaveLastSentCertificate(ctx, certificate)) - - // Update the status of the certificate - certificate.Status = agglayer.Settled - certificate.UpdatedAt = updateTime + 1 - require.NoError(t, storage.UpdateCertificate(ctx, certificate)) - - // Fetch the certificate and verify the status has been updated - certificateFromDB, err := storage.GetCertificateByHeight(certificate.Height) - require.NoError(t, err) - require.Equal(t, certificate.Status, certificateFromDB.Status, "equal status") - require.Equal(t, certificate.UpdatedAt, certificateFromDB.UpdatedAt, "equal updated at") - - require.NoError(t, storage.clean()) - }) -} - -func Test_SaveLastSentCertificate(t *testing.T) { - ctx := context.Background() - - path := path.Join(t.TempDir(), "aggsenderTest_SaveLastSentCertificate.sqlite") - log.Debugf("sqlite path: %s", path) - cfg := AggSenderSQLStorageConfig{ - DBPath: path, - KeepCertificatesHistory: true, - } - - storage, err := NewAggSenderSQLStorage(log.WithFields("aggsender-db"), cfg) - require.NoError(t, err) - - updateTime := uint32(time.Now().UTC().UnixMilli()) - - t.Run("SaveNewCertificate", func(t *testing.T) { - certificate := types.CertificateInfo{ - Height: 1, - CertificateID: common.HexToHash("0x1"), - NewLocalExitRoot: common.HexToHash("0x2"), - FromBlock: 1, - ToBlock: 2, - Status: agglayer.Settled, - CreatedAt: updateTime, - UpdatedAt: updateTime, - } - require.NoError(t, storage.SaveLastSentCertificate(ctx, certificate)) - - certificateFromDB, err := storage.GetCertificateByHeight(certificate.Height) - require.NoError(t, err) - require.Equal(t, certificate, *certificateFromDB) - require.NoError(t, storage.clean()) - }) - - t.Run("UpdateExistingCertificate", func(t *testing.T) { - certificate := types.CertificateInfo{ - Height: 2, - CertificateID: common.HexToHash("0x3"), - NewLocalExitRoot: common.HexToHash("0x4"), - FromBlock: 3, - ToBlock: 4, - Status: agglayer.InError, - CreatedAt: updateTime, - UpdatedAt: updateTime, - } - require.NoError(t, storage.SaveLastSentCertificate(ctx, certificate)) - - // Update the certificate with the same height - updatedCertificate := types.CertificateInfo{ - Height: 2, - CertificateID: common.HexToHash("0x5"), - NewLocalExitRoot: common.HexToHash("0x6"), - FromBlock: 3, - ToBlock: 6, - Status: agglayer.Pending, - } - require.NoError(t, storage.SaveLastSentCertificate(ctx, updatedCertificate)) - - certificateFromDB, err := storage.GetCertificateByHeight(updatedCertificate.Height) - require.NoError(t, err) - require.Equal(t, updatedCertificate, *certificateFromDB) - require.NoError(t, storage.clean()) - }) - - t.Run("SaveCertificateWithRollback", func(t *testing.T) { - // Simulate an error during the transaction to trigger a rollback - certificate := types.CertificateInfo{ - Height: 3, - CertificateID: common.HexToHash("0x7"), - NewLocalExitRoot: common.HexToHash("0x8"), - FromBlock: 7, - ToBlock: 8, - Status: agglayer.Settled, - CreatedAt: updateTime, - UpdatedAt: updateTime, - } - - // Close the database to force an error - require.NoError(t, storage.db.Close()) - - err := storage.SaveLastSentCertificate(ctx, certificate) - require.Error(t, err) - - // Reopen the database and check that the certificate was not saved - storage.db, err = db.NewSQLiteDB(path) - require.NoError(t, err) - - certificateFromDB, err := storage.GetCertificateByHeight(certificate.Height) - require.ErrorIs(t, err, db.ErrNotFound) - require.Nil(t, certificateFromDB) - require.NoError(t, storage.clean()) - }) - - t.Run("SaveCertificate with raw data", func(t *testing.T) { - certfiicate := &agglayer.SignedCertificate{ - Certificate: &agglayer.Certificate{ - NetworkID: 1, - Height: 1, - PrevLocalExitRoot: common.HexToHash("0x1"), - NewLocalExitRoot: common.HexToHash("0x2"), - Metadata: common.HexToHash("0x3"), - BridgeExits: []*agglayer.BridgeExit{ - { - LeafType: agglayer.LeafTypeAsset, - TokenInfo: &agglayer.TokenInfo{ - OriginNetwork: 1, - OriginTokenAddress: common.HexToAddress("0x1"), - }, - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x2"), - Amount: big.NewInt(100), - Metadata: []byte("metadata"), - }, - }, - ImportedBridgeExits: []*agglayer.ImportedBridgeExit{}, - }, - Signature: &agglayer.Signature{ - R: common.HexToHash("0x4"), - S: common.HexToHash("0x5"), - OddParity: false, - }, - } - - raw, err := json.Marshal(certfiicate) - require.NoError(t, err) - - certificate := types.CertificateInfo{ - Height: 1, - CertificateID: common.HexToHash("0x9"), - NewLocalExitRoot: common.HexToHash("0x2"), - FromBlock: 1, - ToBlock: 10, - Status: agglayer.Pending, - CreatedAt: updateTime, - UpdatedAt: updateTime, - SignedCertificate: string(raw), - } - require.NoError(t, storage.SaveLastSentCertificate(ctx, certificate)) - - certificateFromDB, err := storage.GetCertificateByHeight(certificate.Height) - require.NoError(t, err) - require.Equal(t, certificate, *certificateFromDB) - require.Equal(t, raw, []byte(certificateFromDB.SignedCertificate)) - - require.NoError(t, storage.clean()) - }) -} - -func (a *AggSenderSQLStorage) clean() error { - if _, err := a.db.Exec(`DELETE FROM certificate_info;`); err != nil { - return err - } - - return nil -} - -func Test_StoragePreviousLER(t *testing.T) { - ctx := context.TODO() - dbPath := path.Join(t.TempDir(), "Test_StoragePreviousLER.sqlite") - cfg := AggSenderSQLStorageConfig{ - DBPath: dbPath, - KeepCertificatesHistory: true, - } - storage, err := NewAggSenderSQLStorage(log.WithFields("aggsender-db"), cfg) - require.NoError(t, err) - require.NotNil(t, storage) - - certNoLER := types.CertificateInfo{ - Height: 0, - CertificateID: common.HexToHash("0x1"), - Status: agglayer.InError, - NewLocalExitRoot: common.HexToHash("0x2"), - } - err = storage.SaveLastSentCertificate(ctx, certNoLER) - require.NoError(t, err) - - readCertNoLER, err := storage.GetCertificateByHeight(0) - require.NoError(t, err) - require.NotNil(t, readCertNoLER) - require.Equal(t, certNoLER, *readCertNoLER) - - certLER := types.CertificateInfo{ - Height: 1, - CertificateID: common.HexToHash("0x2"), - Status: agglayer.InError, - NewLocalExitRoot: common.HexToHash("0x2"), - PreviousLocalExitRoot: &common.Hash{}, - } - err = storage.SaveLastSentCertificate(ctx, certLER) - require.NoError(t, err) - - readCertWithLER, err := storage.GetCertificateByHeight(1) - require.NoError(t, err) - require.NotNil(t, readCertWithLER) - require.Equal(t, certLER, *readCertWithLER) -} diff --git a/aggsender/db/migrations/0001.sql b/aggsender/db/migrations/0001.sql deleted file mode 100644 index d418f1d89..000000000 --- a/aggsender/db/migrations/0001.sql +++ /dev/null @@ -1,35 +0,0 @@ --- +migrate Down -DROP TABLE IF EXISTS certificate_info; -DROP TABLE IF EXISTS certificate_info_history; -DROP TABLE IF EXISTS certificate_info_history; - --- +migrate Up -CREATE TABLE certificate_info ( - height INTEGER NOT NULL, - retry_count INTEGER DEFAULT 0, - certificate_id VARCHAR NOT NULL, - status INTEGER NOT NULL, - previous_local_exit_root VARCHAR, - new_local_exit_root VARCHAR NOT NULL, - from_block INTEGER NOT NULL, - to_block INTEGER NOT NULL, - created_at INTEGER NOT NULL, - updated_at INTEGER NOT NULL, - signed_certificate TEXT, - PRIMARY KEY (height) -); - -CREATE TABLE certificate_info_history ( - height INTEGER NOT NULL , - retry_count INTEGER DEFAULT 0, - certificate_id VARCHAR NOT NULL, - status INTEGER NOT NULL, - previous_local_exit_root VARCHAR, - new_local_exit_root VARCHAR NOT NULL, - from_block INTEGER NOT NULL, - to_block INTEGER NOT NULL, - created_at INTEGER NOT NULL, - updated_at INTEGER NOT NULL, - signed_certificate TEXT, - PRIMARY KEY (height, retry_count) -); diff --git a/aggsender/db/migrations/migrations.go b/aggsender/db/migrations/migrations.go deleted file mode 100644 index 78c58b85e..000000000 --- a/aggsender/db/migrations/migrations.go +++ /dev/null @@ -1,24 +0,0 @@ -package migrations - -import ( - "database/sql" - _ "embed" - - "github.com/0xPolygon/cdk/db" - "github.com/0xPolygon/cdk/db/types" - "github.com/0xPolygon/cdk/log" -) - -//go:embed 0001.sql -var mig001 string - -func RunMigrations(logger *log.Logger, database *sql.DB) error { - migrations := []types.Migration{ - { - ID: "0001", - SQL: mig001, - }, - } - - return db.RunMigrationsDB(logger, database, migrations) -} diff --git a/aggsender/epoch_notifier_per_block.go b/aggsender/epoch_notifier_per_block.go deleted file mode 100644 index 80494cc0d..000000000 --- a/aggsender/epoch_notifier_per_block.go +++ /dev/null @@ -1,217 +0,0 @@ -package aggsender - -import ( - "context" - "fmt" - - "github.com/0xPolygon/cdk/agglayer" - "github.com/0xPolygon/cdk/aggsender/types" -) - -const ( - maxPercent = 100.0 -) - -type ExtraInfoEventEpoch struct { - PendingBlocks int -} - -func (e *ExtraInfoEventEpoch) String() string { - return fmt.Sprintf("ExtraInfoEventEpoch: pendingBlocks=%d", e.PendingBlocks) -} - -type ConfigEpochNotifierPerBlock struct { - StartingEpochBlock uint64 - NumBlockPerEpoch uint - - // EpochNotificationPercentage - // 0 -> begin new Epoch - // 50 -> middle of epoch - // 100 -> end of epoch (same as 0) - EpochNotificationPercentage uint -} - -func (c *ConfigEpochNotifierPerBlock) String() string { - if c == nil { - return "nil" - } - return fmt.Sprintf("{startEpochBlock=%d, sizeEpoch=%d, threshold=%d%%}", - c.StartingEpochBlock, c.NumBlockPerEpoch, c.EpochNotificationPercentage) -} - -func NewConfigEpochNotifierPerBlock(aggLayer agglayer.AggLayerClientGetEpochConfiguration, - epochNotificationPercentage uint) (*ConfigEpochNotifierPerBlock, error) { - if aggLayer == nil { - return nil, fmt.Errorf("newConfigEpochNotifierPerBlock: aggLayerClient is required") - } - clockConfig, err := aggLayer.GetEpochConfiguration() - if err != nil { - return nil, fmt.Errorf("newConfigEpochNotifierPerBlock: error getting clock configuration from AggLayer: %w", err) - } - return &ConfigEpochNotifierPerBlock{ - StartingEpochBlock: clockConfig.GenesisBlock, - NumBlockPerEpoch: uint(clockConfig.EpochDuration), - EpochNotificationPercentage: epochNotificationPercentage, - }, nil -} - -func (c *ConfigEpochNotifierPerBlock) Validate() error { - if c.NumBlockPerEpoch == 0 { - return fmt.Errorf("numBlockPerEpoch: num block per epoch is required > 0 ") - } - if c.EpochNotificationPercentage >= maxPercent { - return fmt.Errorf("epochNotificationPercentage: must be between 0 and 99") - } - return nil -} - -type EpochNotifierPerBlock struct { - blockNotifier types.BlockNotifier - logger types.Logger - - lastStartingEpochBlock uint64 - - Config ConfigEpochNotifierPerBlock - types.GenericSubscriber[types.EpochEvent] -} - -func NewEpochNotifierPerBlock(blockNotifier types.BlockNotifier, - logger types.Logger, - config ConfigEpochNotifierPerBlock, - subscriber types.GenericSubscriber[types.EpochEvent]) (*EpochNotifierPerBlock, error) { - if subscriber == nil { - subscriber = NewGenericSubscriberImpl[types.EpochEvent]() - } - - err := config.Validate() - if err != nil { - return nil, fmt.Errorf("invalid config: %w", err) - } - return &EpochNotifierPerBlock{ - blockNotifier: blockNotifier, - logger: logger, - lastStartingEpochBlock: config.StartingEpochBlock, - Config: config, - GenericSubscriber: subscriber, - }, nil -} - -func (e *EpochNotifierPerBlock) String() string { - return fmt.Sprintf("EpochNotifierPerBlock: config: %s", e.Config.String()) -} - -// StartAsync starts the notifier in a goroutine -func (e *EpochNotifierPerBlock) StartAsync(ctx context.Context) { - eventNewBlockChannel := e.blockNotifier.Subscribe("EpochNotifierPerBlock") - go e.startInternal(ctx, eventNewBlockChannel) -} - -// Start starts the notifier synchronously -func (e *EpochNotifierPerBlock) Start(ctx context.Context) { - eventNewBlockChannel := e.blockNotifier.Subscribe("EpochNotifierPerBlock") - e.startInternal(ctx, eventNewBlockChannel) -} - -func (e *EpochNotifierPerBlock) startInternal(ctx context.Context, eventNewBlockChannel <-chan types.EventNewBlock) { - status := internalStatus{ - lastBlockSeen: e.Config.StartingEpochBlock, - waitingForEpoch: e.epochNumber(e.Config.StartingEpochBlock), - } - for { - select { - case <-ctx.Done(): - return - case newBlock := <-eventNewBlockChannel: - var event *types.EpochEvent - status, event = e.step(status, newBlock) - if event != nil { - e.logger.Debugf("new Epoch Event: %s", event.String()) - e.GenericSubscriber.Publish(*event) - } - } - } -} - -type internalStatus struct { - lastBlockSeen uint64 - waitingForEpoch uint64 -} - -func (e *EpochNotifierPerBlock) step(status internalStatus, - newBlock types.EventNewBlock) (internalStatus, *types.EpochEvent) { - currentBlock := newBlock.BlockNumber - if currentBlock < e.Config.StartingEpochBlock { - // This is a bit strange, the first epoch is in the future - e.logger.Warnf("Block number %d is before the starting first epoch block %d."+ - " Please check your config", currentBlock, e.Config.StartingEpochBlock) - return status, nil - } - // No new block - if currentBlock <= status.lastBlockSeen { - return status, nil - } - status.lastBlockSeen = currentBlock - - needNotify, closingEpoch := e.isNotificationRequired(currentBlock, status.waitingForEpoch) - percentEpoch := e.percentEpoch(currentBlock) - logFunc := e.logger.Debugf - if needNotify { - logFunc = e.logger.Infof - } - logFunc("New block seen [finality:%s]: %d. blockRate:%s Epoch:%d Percent:%f%% notify:%v config:%s", - newBlock.BlockFinalityType, newBlock.BlockNumber, newBlock.BlockRate, closingEpoch, - percentEpoch*maxPercent, needNotify, e.Config.String()) - if needNotify { - // Notify the epoch has started - info := e.infoEpoch(currentBlock, closingEpoch) - status.waitingForEpoch = closingEpoch + 1 - return status, &types.EpochEvent{ - Epoch: closingEpoch, - ExtraInfo: info, - } - } - return status, nil -} - -func (e *EpochNotifierPerBlock) infoEpoch(currentBlock, newEpochNotified uint64) *ExtraInfoEventEpoch { - nextBlockStartingEpoch := e.endBlockEpoch(newEpochNotified) - return &ExtraInfoEventEpoch{ - PendingBlocks: int(nextBlockStartingEpoch - currentBlock), - } -} -func (e *EpochNotifierPerBlock) percentEpoch(currentBlock uint64) float64 { - epoch := e.epochNumber(currentBlock) - startingBlock := e.startingBlockEpoch(epoch) - elapsedBlocks := currentBlock - startingBlock - return float64(elapsedBlocks) / float64(e.Config.NumBlockPerEpoch) -} -func (e *EpochNotifierPerBlock) isNotificationRequired(currentBlock, lastEpochNotified uint64) (bool, uint64) { - percentEpoch := e.percentEpoch(currentBlock) - thresholdPercent := float64(e.Config.EpochNotificationPercentage) / maxPercent - maxTresholdPercent := float64(e.Config.NumBlockPerEpoch-1) / float64(e.Config.NumBlockPerEpoch) - if thresholdPercent > maxTresholdPercent { - thresholdPercent = maxTresholdPercent - } - if percentEpoch < thresholdPercent { - return false, e.epochNumber(currentBlock) - } - nextEpoch := e.epochNumber(currentBlock) + 1 - return nextEpoch > lastEpochNotified, e.epochNumber(currentBlock) -} - -func (e *EpochNotifierPerBlock) startingBlockEpoch(epoch uint64) uint64 { - if epoch == 0 { - return e.Config.StartingEpochBlock - 1 - } - return e.Config.StartingEpochBlock + ((epoch - 1) * uint64(e.Config.NumBlockPerEpoch)) -} - -func (e *EpochNotifierPerBlock) endBlockEpoch(epoch uint64) uint64 { - return e.startingBlockEpoch(epoch + 1) -} -func (e *EpochNotifierPerBlock) epochNumber(currentBlock uint64) uint64 { - if currentBlock < e.Config.StartingEpochBlock { - return 0 - } - return 1 + ((currentBlock - e.Config.StartingEpochBlock) / uint64(e.Config.NumBlockPerEpoch)) -} diff --git a/aggsender/epoch_notifier_per_block_test.go b/aggsender/epoch_notifier_per_block_test.go deleted file mode 100644 index ac35350e5..000000000 --- a/aggsender/epoch_notifier_per_block_test.go +++ /dev/null @@ -1,230 +0,0 @@ -package aggsender - -import ( - "context" - "fmt" - "testing" - - "github.com/0xPolygon/cdk/agglayer" - "github.com/0xPolygon/cdk/aggsender/mocks" - "github.com/0xPolygon/cdk/aggsender/types" - "github.com/0xPolygon/cdk/etherman" - "github.com/0xPolygon/cdk/log" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" -) - -func TestConfigEpochNotifierPerBlockString(t *testing.T) { - cfg := ConfigEpochNotifierPerBlock{ - StartingEpochBlock: 123, - NumBlockPerEpoch: 456, - EpochNotificationPercentage: 789, - } - require.Equal(t, "{startEpochBlock=123, sizeEpoch=456, threshold=789%}", cfg.String()) - var cfg2 *ConfigEpochNotifierPerBlock - require.Equal(t, "nil", cfg2.String()) -} - -func TestStartingBlockEpoch(t *testing.T) { - testData := newNotifierPerBlockTestData(t, &ConfigEpochNotifierPerBlock{ - StartingEpochBlock: 9, - NumBlockPerEpoch: 10, - EpochNotificationPercentage: 80, - }) - // EPOCH: ---0 ----+----1 -----+----2 ----+----3 ----+----4 ----+----5 ----+---- - // BLOCK: 9 19 29 39 49 - require.Equal(t, uint64(8), testData.sut.startingBlockEpoch(0)) - require.Equal(t, uint64(9), testData.sut.startingBlockEpoch(1)) - require.Equal(t, uint64(19), testData.sut.startingBlockEpoch(2)) -} - -func TestEpochNotifyPercentageEdgeCase0(t *testing.T) { - testData := newNotifierPerBlockTestData(t, nil) - testData.sut.Config.EpochNotificationPercentage = 0 - notify, epoch := testData.sut.isNotificationRequired(9, 0) - require.True(t, notify) - require.Equal(t, uint64(1), epoch) -} - -// if percent is 99 means at end of epoch, so in a config 0, epoch-size=10, -// 99% means last block of epoch -func TestEpochNotifyPercentageEdgeCase99(t *testing.T) { - testData := newNotifierPerBlockTestData(t, nil) - testData.sut.Config.EpochNotificationPercentage = 99 - notify, epoch := testData.sut.isNotificationRequired(9, 0) - require.True(t, notify) - require.Equal(t, uint64(1), epoch) -} - -func TestEpochStep(t *testing.T) { - testData := newNotifierPerBlockTestData(t, &ConfigEpochNotifierPerBlock{ - StartingEpochBlock: 9, - NumBlockPerEpoch: 10, - EpochNotificationPercentage: 50, - }) - // EPOCH: ---0 ----+----1 -----+----2 ----+----3 ----+----4 ----+----5 ----+---- - // BLOCK: 9 19 29 39 49 - // start EPOCH#1 -> 9 - // end EPOCH#1 -> 19 - // start EPOCH#2 -> 19 - - tests := []struct { - name string - initialStatus internalStatus - blockNumber uint64 - expectedEvent bool - expectedEventEpoch uint64 - expectedEventPendingBlocks int - }{ - { - name: "First block of epoch, no notification until close to end", - initialStatus: internalStatus{lastBlockSeen: 8, waitingForEpoch: 0}, - blockNumber: 9, - expectedEvent: false, - expectedEventEpoch: 1, - expectedEventPendingBlocks: 0, - }, - { - name: "epoch#1 close to end, notify it!", - initialStatus: internalStatus{lastBlockSeen: 17, waitingForEpoch: 0}, - blockNumber: 18, - expectedEvent: true, - expectedEventEpoch: 1, // Finishing epoch 0 - expectedEventPendingBlocks: 1, // 19 - 18 - }, - { - name: "epoch#1 close to end, but already notified", - initialStatus: internalStatus{lastBlockSeen: 17, waitingForEpoch: 2}, - blockNumber: 18, - expectedEvent: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - _, event := testData.sut.step(tt.initialStatus, types.EventNewBlock{BlockNumber: tt.blockNumber, BlockFinalityType: etherman.LatestBlock}) - require.Equal(t, tt.expectedEvent, event != nil) - if event != nil { - require.Equal(t, tt.expectedEventEpoch, event.Epoch, "Epoch") - extraInfo, ok := event.ExtraInfo.(*ExtraInfoEventEpoch) - require.True(t, ok, "ExtraInfo") - require.Equal(t, tt.expectedEventPendingBlocks, extraInfo.PendingBlocks, "PendingBlocks") - } - }) - } -} - -func TestNewConfigEpochNotifierPerBlock(t *testing.T) { - _, err := NewConfigEpochNotifierPerBlock(nil, 1) - require.Error(t, err) - aggLayerMock := agglayer.NewAgglayerClientMock(t) - aggLayerMock.On("GetEpochConfiguration").Return(nil, fmt.Errorf("error")).Once() - _, err = NewConfigEpochNotifierPerBlock(aggLayerMock, 1) - require.Error(t, err) - cfgAggLayer := &agglayer.ClockConfiguration{ - GenesisBlock: 123, - EpochDuration: 456, - } - aggLayerMock.On("GetEpochConfiguration").Return(cfgAggLayer, nil).Once() - cfg, err := NewConfigEpochNotifierPerBlock(aggLayerMock, 1) - require.NoError(t, err) - require.Equal(t, uint64(123), cfg.StartingEpochBlock) - require.Equal(t, uint(456), cfg.NumBlockPerEpoch) -} - -func TestNotifyEpoch(t *testing.T) { - testData := newNotifierPerBlockTestData(t, nil) - ch := testData.sut.Subscribe("test") - chBlocks := make(chan types.EventNewBlock) - testData.blockNotifierMock.EXPECT().Subscribe(mock.Anything).Return(chBlocks) - testData.sut.StartAsync(testData.ctx) - chBlocks <- types.EventNewBlock{BlockNumber: 109, BlockFinalityType: etherman.LatestBlock} - epochEvent := <-ch - require.Equal(t, uint64(11), epochEvent.Epoch) - testData.ctx.Done() -} - -func TestStepSameEpoch(t *testing.T) { - testData := newNotifierPerBlockTestData(t, nil) - status := internalStatus{ - lastBlockSeen: 100, - waitingForEpoch: testData.sut.epochNumber(100), - } - newStatus, _ := testData.sut.step(status, types.EventNewBlock{BlockNumber: 103, BlockFinalityType: etherman.LatestBlock}) - require.Equal(t, uint64(103), newStatus.lastBlockSeen) - require.Equal(t, status.waitingForEpoch, newStatus.waitingForEpoch) -} - -func TestStepNotifyEpoch(t *testing.T) { - testData := newNotifierPerBlockTestData(t, nil) - status := internalStatus{ - lastBlockSeen: 100, - waitingForEpoch: testData.sut.epochNumber(100), - } - status, _ = testData.sut.step(status, types.EventNewBlock{BlockNumber: 109, BlockFinalityType: etherman.LatestBlock}) - require.Equal(t, uint64(109), status.lastBlockSeen) - require.Equal(t, uint64(12), status.waitingForEpoch) -} - -func TestBlockEpochNumber(t *testing.T) { - testData := newNotifierPerBlockTestData(t, &ConfigEpochNotifierPerBlock{ - StartingEpochBlock: 105, - NumBlockPerEpoch: 10, - EpochNotificationPercentage: 1, - }) - require.Equal(t, uint64(0), testData.sut.epochNumber(0)) - require.Equal(t, uint64(0), testData.sut.epochNumber(104)) - require.Equal(t, uint64(1), testData.sut.epochNumber(105)) - require.Equal(t, uint64(1), testData.sut.epochNumber(114)) - require.Equal(t, uint64(2), testData.sut.epochNumber(115)) - require.Equal(t, uint64(2), testData.sut.epochNumber(116)) - require.Equal(t, uint64(2), testData.sut.epochNumber(124)) - require.Equal(t, uint64(3), testData.sut.epochNumber(125)) -} - -func TestBlockBeforeEpoch(t *testing.T) { - testData := newNotifierPerBlockTestData(t, &ConfigEpochNotifierPerBlock{ - StartingEpochBlock: 105, - NumBlockPerEpoch: 10, - EpochNotificationPercentage: 1, - }) - status := internalStatus{ - lastBlockSeen: 104, - waitingForEpoch: testData.sut.epochNumber(104), - } - newStatus, _ := testData.sut.step(status, types.EventNewBlock{BlockNumber: 104, BlockFinalityType: etherman.LatestBlock}) - // We are previous block of first epoch, so we should do nothing - require.Equal(t, status, newStatus) - status = newStatus - // First block of first epoch - newStatus, _ = testData.sut.step(status, types.EventNewBlock{BlockNumber: 105, BlockFinalityType: etherman.LatestBlock}) - require.Equal(t, uint64(105), newStatus.lastBlockSeen) - // Near end first epoch - newStatus, _ = testData.sut.step(status, types.EventNewBlock{BlockNumber: 114, BlockFinalityType: etherman.LatestBlock}) - require.Equal(t, uint64(114), newStatus.lastBlockSeen) -} - -type notifierPerBlockTestData struct { - sut *EpochNotifierPerBlock - blockNotifierMock *mocks.BlockNotifier - ctx context.Context -} - -func newNotifierPerBlockTestData(t *testing.T, config *ConfigEpochNotifierPerBlock) notifierPerBlockTestData { - t.Helper() - if config == nil { - config = &ConfigEpochNotifierPerBlock{ - StartingEpochBlock: 0, - NumBlockPerEpoch: 10, - EpochNotificationPercentage: 50, - } - } - blockNotifierMock := mocks.NewBlockNotifier(t) - logger := log.WithFields("test", "EpochNotifierPerBlock") - sut, err := NewEpochNotifierPerBlock(blockNotifierMock, logger, *config, nil) - require.NoError(t, err) - return notifierPerBlockTestData{ - sut: sut, - blockNotifierMock: blockNotifierMock, - ctx: context.TODO(), - } -} diff --git a/aggsender/generic_subscriber_impl.go b/aggsender/generic_subscriber_impl.go deleted file mode 100644 index e4251449d..000000000 --- a/aggsender/generic_subscriber_impl.go +++ /dev/null @@ -1,33 +0,0 @@ -package aggsender - -import "sync" - -type GenericSubscriberImpl[T any] struct { - // map of subscribers with names - subs map[chan T]string - mu sync.RWMutex -} - -func NewGenericSubscriberImpl[T any]() *GenericSubscriberImpl[T] { - return &GenericSubscriberImpl[T]{ - subs: make(map[chan T]string), - } -} - -func (g *GenericSubscriberImpl[T]) Subscribe(subscriberName string) <-chan T { - ch := make(chan T) - g.mu.Lock() - defer g.mu.Unlock() - g.subs[ch] = subscriberName - return ch -} - -func (g *GenericSubscriberImpl[T]) Publish(data T) { - g.mu.RLock() - defer g.mu.RUnlock() - for ch := range g.subs { - go func(ch chan T) { - ch <- data - }(ch) - } -} diff --git a/aggsender/mocks/agg_sender_storage.go b/aggsender/mocks/agg_sender_storage.go deleted file mode 100644 index 9c0d20a64..000000000 --- a/aggsender/mocks/agg_sender_storage.go +++ /dev/null @@ -1,355 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package mocks - -import ( - agglayer "github.com/0xPolygon/cdk/agglayer" - common "github.com/ethereum/go-ethereum/common" - - context "context" - - mock "github.com/stretchr/testify/mock" - - types "github.com/0xPolygon/cdk/aggsender/types" -) - -// AggSenderStorage is an autogenerated mock type for the AggSenderStorage type -type AggSenderStorage struct { - mock.Mock -} - -type AggSenderStorage_Expecter struct { - mock *mock.Mock -} - -func (_m *AggSenderStorage) EXPECT() *AggSenderStorage_Expecter { - return &AggSenderStorage_Expecter{mock: &_m.Mock} -} - -// DeleteCertificate provides a mock function with given fields: ctx, certificateID -func (_m *AggSenderStorage) DeleteCertificate(ctx context.Context, certificateID common.Hash) error { - ret := _m.Called(ctx, certificateID) - - if len(ret) == 0 { - panic("no return value specified for DeleteCertificate") - } - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) error); ok { - r0 = rf(ctx, certificateID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// AggSenderStorage_DeleteCertificate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteCertificate' -type AggSenderStorage_DeleteCertificate_Call struct { - *mock.Call -} - -// DeleteCertificate is a helper method to define mock.On call -// - ctx context.Context -// - certificateID common.Hash -func (_e *AggSenderStorage_Expecter) DeleteCertificate(ctx interface{}, certificateID interface{}) *AggSenderStorage_DeleteCertificate_Call { - return &AggSenderStorage_DeleteCertificate_Call{Call: _e.mock.On("DeleteCertificate", ctx, certificateID)} -} - -func (_c *AggSenderStorage_DeleteCertificate_Call) Run(run func(ctx context.Context, certificateID common.Hash)) *AggSenderStorage_DeleteCertificate_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(common.Hash)) - }) - return _c -} - -func (_c *AggSenderStorage_DeleteCertificate_Call) Return(_a0 error) *AggSenderStorage_DeleteCertificate_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *AggSenderStorage_DeleteCertificate_Call) RunAndReturn(run func(context.Context, common.Hash) error) *AggSenderStorage_DeleteCertificate_Call { - _c.Call.Return(run) - return _c -} - -// GetCertificateByHeight provides a mock function with given fields: height -func (_m *AggSenderStorage) GetCertificateByHeight(height uint64) (*types.CertificateInfo, error) { - ret := _m.Called(height) - - if len(ret) == 0 { - panic("no return value specified for GetCertificateByHeight") - } - - var r0 *types.CertificateInfo - var r1 error - if rf, ok := ret.Get(0).(func(uint64) (*types.CertificateInfo, error)); ok { - return rf(height) - } - if rf, ok := ret.Get(0).(func(uint64) *types.CertificateInfo); ok { - r0 = rf(height) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.CertificateInfo) - } - } - - if rf, ok := ret.Get(1).(func(uint64) error); ok { - r1 = rf(height) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// AggSenderStorage_GetCertificateByHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCertificateByHeight' -type AggSenderStorage_GetCertificateByHeight_Call struct { - *mock.Call -} - -// GetCertificateByHeight is a helper method to define mock.On call -// - height uint64 -func (_e *AggSenderStorage_Expecter) GetCertificateByHeight(height interface{}) *AggSenderStorage_GetCertificateByHeight_Call { - return &AggSenderStorage_GetCertificateByHeight_Call{Call: _e.mock.On("GetCertificateByHeight", height)} -} - -func (_c *AggSenderStorage_GetCertificateByHeight_Call) Run(run func(height uint64)) *AggSenderStorage_GetCertificateByHeight_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(uint64)) - }) - return _c -} - -func (_c *AggSenderStorage_GetCertificateByHeight_Call) Return(_a0 *types.CertificateInfo, _a1 error) *AggSenderStorage_GetCertificateByHeight_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *AggSenderStorage_GetCertificateByHeight_Call) RunAndReturn(run func(uint64) (*types.CertificateInfo, error)) *AggSenderStorage_GetCertificateByHeight_Call { - _c.Call.Return(run) - return _c -} - -// GetCertificatesByStatus provides a mock function with given fields: status -func (_m *AggSenderStorage) GetCertificatesByStatus(status []agglayer.CertificateStatus) ([]*types.CertificateInfo, error) { - ret := _m.Called(status) - - if len(ret) == 0 { - panic("no return value specified for GetCertificatesByStatus") - } - - var r0 []*types.CertificateInfo - var r1 error - if rf, ok := ret.Get(0).(func([]agglayer.CertificateStatus) ([]*types.CertificateInfo, error)); ok { - return rf(status) - } - if rf, ok := ret.Get(0).(func([]agglayer.CertificateStatus) []*types.CertificateInfo); ok { - r0 = rf(status) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*types.CertificateInfo) - } - } - - if rf, ok := ret.Get(1).(func([]agglayer.CertificateStatus) error); ok { - r1 = rf(status) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// AggSenderStorage_GetCertificatesByStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCertificatesByStatus' -type AggSenderStorage_GetCertificatesByStatus_Call struct { - *mock.Call -} - -// GetCertificatesByStatus is a helper method to define mock.On call -// - status []agglayer.CertificateStatus -func (_e *AggSenderStorage_Expecter) GetCertificatesByStatus(status interface{}) *AggSenderStorage_GetCertificatesByStatus_Call { - return &AggSenderStorage_GetCertificatesByStatus_Call{Call: _e.mock.On("GetCertificatesByStatus", status)} -} - -func (_c *AggSenderStorage_GetCertificatesByStatus_Call) Run(run func(status []agglayer.CertificateStatus)) *AggSenderStorage_GetCertificatesByStatus_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].([]agglayer.CertificateStatus)) - }) - return _c -} - -func (_c *AggSenderStorage_GetCertificatesByStatus_Call) Return(_a0 []*types.CertificateInfo, _a1 error) *AggSenderStorage_GetCertificatesByStatus_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *AggSenderStorage_GetCertificatesByStatus_Call) RunAndReturn(run func([]agglayer.CertificateStatus) ([]*types.CertificateInfo, error)) *AggSenderStorage_GetCertificatesByStatus_Call { - _c.Call.Return(run) - return _c -} - -// GetLastSentCertificate provides a mock function with no fields -func (_m *AggSenderStorage) GetLastSentCertificate() (*types.CertificateInfo, error) { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for GetLastSentCertificate") - } - - var r0 *types.CertificateInfo - var r1 error - if rf, ok := ret.Get(0).(func() (*types.CertificateInfo, error)); ok { - return rf() - } - if rf, ok := ret.Get(0).(func() *types.CertificateInfo); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.CertificateInfo) - } - } - - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// AggSenderStorage_GetLastSentCertificate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLastSentCertificate' -type AggSenderStorage_GetLastSentCertificate_Call struct { - *mock.Call -} - -// GetLastSentCertificate is a helper method to define mock.On call -func (_e *AggSenderStorage_Expecter) GetLastSentCertificate() *AggSenderStorage_GetLastSentCertificate_Call { - return &AggSenderStorage_GetLastSentCertificate_Call{Call: _e.mock.On("GetLastSentCertificate")} -} - -func (_c *AggSenderStorage_GetLastSentCertificate_Call) Run(run func()) *AggSenderStorage_GetLastSentCertificate_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *AggSenderStorage_GetLastSentCertificate_Call) Return(_a0 *types.CertificateInfo, _a1 error) *AggSenderStorage_GetLastSentCertificate_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *AggSenderStorage_GetLastSentCertificate_Call) RunAndReturn(run func() (*types.CertificateInfo, error)) *AggSenderStorage_GetLastSentCertificate_Call { - _c.Call.Return(run) - return _c -} - -// SaveLastSentCertificate provides a mock function with given fields: ctx, certificate -func (_m *AggSenderStorage) SaveLastSentCertificate(ctx context.Context, certificate types.CertificateInfo) error { - ret := _m.Called(ctx, certificate) - - if len(ret) == 0 { - panic("no return value specified for SaveLastSentCertificate") - } - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, types.CertificateInfo) error); ok { - r0 = rf(ctx, certificate) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// AggSenderStorage_SaveLastSentCertificate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SaveLastSentCertificate' -type AggSenderStorage_SaveLastSentCertificate_Call struct { - *mock.Call -} - -// SaveLastSentCertificate is a helper method to define mock.On call -// - ctx context.Context -// - certificate types.CertificateInfo -func (_e *AggSenderStorage_Expecter) SaveLastSentCertificate(ctx interface{}, certificate interface{}) *AggSenderStorage_SaveLastSentCertificate_Call { - return &AggSenderStorage_SaveLastSentCertificate_Call{Call: _e.mock.On("SaveLastSentCertificate", ctx, certificate)} -} - -func (_c *AggSenderStorage_SaveLastSentCertificate_Call) Run(run func(ctx context.Context, certificate types.CertificateInfo)) *AggSenderStorage_SaveLastSentCertificate_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(types.CertificateInfo)) - }) - return _c -} - -func (_c *AggSenderStorage_SaveLastSentCertificate_Call) Return(_a0 error) *AggSenderStorage_SaveLastSentCertificate_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *AggSenderStorage_SaveLastSentCertificate_Call) RunAndReturn(run func(context.Context, types.CertificateInfo) error) *AggSenderStorage_SaveLastSentCertificate_Call { - _c.Call.Return(run) - return _c -} - -// UpdateCertificate provides a mock function with given fields: ctx, certificate -func (_m *AggSenderStorage) UpdateCertificate(ctx context.Context, certificate types.CertificateInfo) error { - ret := _m.Called(ctx, certificate) - - if len(ret) == 0 { - panic("no return value specified for UpdateCertificate") - } - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, types.CertificateInfo) error); ok { - r0 = rf(ctx, certificate) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// AggSenderStorage_UpdateCertificate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateCertificate' -type AggSenderStorage_UpdateCertificate_Call struct { - *mock.Call -} - -// UpdateCertificate is a helper method to define mock.On call -// - ctx context.Context -// - certificate types.CertificateInfo -func (_e *AggSenderStorage_Expecter) UpdateCertificate(ctx interface{}, certificate interface{}) *AggSenderStorage_UpdateCertificate_Call { - return &AggSenderStorage_UpdateCertificate_Call{Call: _e.mock.On("UpdateCertificate", ctx, certificate)} -} - -func (_c *AggSenderStorage_UpdateCertificate_Call) Run(run func(ctx context.Context, certificate types.CertificateInfo)) *AggSenderStorage_UpdateCertificate_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(types.CertificateInfo)) - }) - return _c -} - -func (_c *AggSenderStorage_UpdateCertificate_Call) Return(_a0 error) *AggSenderStorage_UpdateCertificate_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *AggSenderStorage_UpdateCertificate_Call) RunAndReturn(run func(context.Context, types.CertificateInfo) error) *AggSenderStorage_UpdateCertificate_Call { - _c.Call.Return(run) - return _c -} - -// NewAggSenderStorage creates a new instance of AggSenderStorage. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewAggSenderStorage(t interface { - mock.TestingT - Cleanup(func()) -}) *AggSenderStorage { - mock := &AggSenderStorage{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/aggsender/mocks/aggsender_interface.go b/aggsender/mocks/aggsender_interface.go deleted file mode 100644 index bfd7e8853..000000000 --- a/aggsender/mocks/aggsender_interface.go +++ /dev/null @@ -1,80 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package mocks - -import ( - types "github.com/0xPolygon/cdk/aggsender/types" - mock "github.com/stretchr/testify/mock" -) - -// AggsenderInterface is an autogenerated mock type for the aggsenderInterface type -type AggsenderInterface struct { - mock.Mock -} - -type AggsenderInterface_Expecter struct { - mock *mock.Mock -} - -func (_m *AggsenderInterface) EXPECT() *AggsenderInterface_Expecter { - return &AggsenderInterface_Expecter{mock: &_m.Mock} -} - -// Info provides a mock function with given fields: -func (_m *AggsenderInterface) Info() types.AggsenderInfo { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for Info") - } - - var r0 types.AggsenderInfo - if rf, ok := ret.Get(0).(func() types.AggsenderInfo); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(types.AggsenderInfo) - } - - return r0 -} - -// AggsenderInterface_Info_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Info' -type AggsenderInterface_Info_Call struct { - *mock.Call -} - -// Info is a helper method to define mock.On call -func (_e *AggsenderInterface_Expecter) Info() *AggsenderInterface_Info_Call { - return &AggsenderInterface_Info_Call{Call: _e.mock.On("Info")} -} - -func (_c *AggsenderInterface_Info_Call) Run(run func()) *AggsenderInterface_Info_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *AggsenderInterface_Info_Call) Return(_a0 types.AggsenderInfo) *AggsenderInterface_Info_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *AggsenderInterface_Info_Call) RunAndReturn(run func() types.AggsenderInfo) *AggsenderInterface_Info_Call { - _c.Call.Return(run) - return _c -} - -// NewAggsenderInterface creates a new instance of AggsenderInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewAggsenderInterface(t interface { - mock.TestingT - Cleanup(func()) -}) *AggsenderInterface { - mock := &AggsenderInterface{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/aggsender/mocks/aggsender_storer.go b/aggsender/mocks/aggsender_storer.go deleted file mode 100644 index ed17ea18c..000000000 --- a/aggsender/mocks/aggsender_storer.go +++ /dev/null @@ -1,150 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package mocks - -import ( - types "github.com/0xPolygon/cdk/aggsender/types" - mock "github.com/stretchr/testify/mock" -) - -// AggsenderStorer is an autogenerated mock type for the aggsenderStorer type -type AggsenderStorer struct { - mock.Mock -} - -type AggsenderStorer_Expecter struct { - mock *mock.Mock -} - -func (_m *AggsenderStorer) EXPECT() *AggsenderStorer_Expecter { - return &AggsenderStorer_Expecter{mock: &_m.Mock} -} - -// GetCertificateByHeight provides a mock function with given fields: height -func (_m *AggsenderStorer) GetCertificateByHeight(height uint64) (*types.CertificateInfo, error) { - ret := _m.Called(height) - - if len(ret) == 0 { - panic("no return value specified for GetCertificateByHeight") - } - - var r0 *types.CertificateInfo - var r1 error - if rf, ok := ret.Get(0).(func(uint64) (*types.CertificateInfo, error)); ok { - return rf(height) - } - if rf, ok := ret.Get(0).(func(uint64) *types.CertificateInfo); ok { - r0 = rf(height) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.CertificateInfo) - } - } - - if rf, ok := ret.Get(1).(func(uint64) error); ok { - r1 = rf(height) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// AggsenderStorer_GetCertificateByHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCertificateByHeight' -type AggsenderStorer_GetCertificateByHeight_Call struct { - *mock.Call -} - -// GetCertificateByHeight is a helper method to define mock.On call -// - height uint64 -func (_e *AggsenderStorer_Expecter) GetCertificateByHeight(height interface{}) *AggsenderStorer_GetCertificateByHeight_Call { - return &AggsenderStorer_GetCertificateByHeight_Call{Call: _e.mock.On("GetCertificateByHeight", height)} -} - -func (_c *AggsenderStorer_GetCertificateByHeight_Call) Run(run func(height uint64)) *AggsenderStorer_GetCertificateByHeight_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(uint64)) - }) - return _c -} - -func (_c *AggsenderStorer_GetCertificateByHeight_Call) Return(_a0 *types.CertificateInfo, _a1 error) *AggsenderStorer_GetCertificateByHeight_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *AggsenderStorer_GetCertificateByHeight_Call) RunAndReturn(run func(uint64) (*types.CertificateInfo, error)) *AggsenderStorer_GetCertificateByHeight_Call { - _c.Call.Return(run) - return _c -} - -// GetLastSentCertificate provides a mock function with given fields: -func (_m *AggsenderStorer) GetLastSentCertificate() (*types.CertificateInfo, error) { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for GetLastSentCertificate") - } - - var r0 *types.CertificateInfo - var r1 error - if rf, ok := ret.Get(0).(func() (*types.CertificateInfo, error)); ok { - return rf() - } - if rf, ok := ret.Get(0).(func() *types.CertificateInfo); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.CertificateInfo) - } - } - - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// AggsenderStorer_GetLastSentCertificate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLastSentCertificate' -type AggsenderStorer_GetLastSentCertificate_Call struct { - *mock.Call -} - -// GetLastSentCertificate is a helper method to define mock.On call -func (_e *AggsenderStorer_Expecter) GetLastSentCertificate() *AggsenderStorer_GetLastSentCertificate_Call { - return &AggsenderStorer_GetLastSentCertificate_Call{Call: _e.mock.On("GetLastSentCertificate")} -} - -func (_c *AggsenderStorer_GetLastSentCertificate_Call) Run(run func()) *AggsenderStorer_GetLastSentCertificate_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *AggsenderStorer_GetLastSentCertificate_Call) Return(_a0 *types.CertificateInfo, _a1 error) *AggsenderStorer_GetLastSentCertificate_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *AggsenderStorer_GetLastSentCertificate_Call) RunAndReturn(run func() (*types.CertificateInfo, error)) *AggsenderStorer_GetLastSentCertificate_Call { - _c.Call.Return(run) - return _c -} - -// NewAggsenderStorer creates a new instance of AggsenderStorer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewAggsenderStorer(t interface { - mock.TestingT - Cleanup(func()) -}) *AggsenderStorer { - mock := &AggsenderStorer{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/aggsender/mocks/block_notifier.go b/aggsender/mocks/block_notifier.go deleted file mode 100644 index 24d751b36..000000000 --- a/aggsender/mocks/block_notifier.go +++ /dev/null @@ -1,128 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package mocks - -import ( - types "github.com/0xPolygon/cdk/aggsender/types" - mock "github.com/stretchr/testify/mock" -) - -// BlockNotifier is an autogenerated mock type for the BlockNotifier type -type BlockNotifier struct { - mock.Mock -} - -type BlockNotifier_Expecter struct { - mock *mock.Mock -} - -func (_m *BlockNotifier) EXPECT() *BlockNotifier_Expecter { - return &BlockNotifier_Expecter{mock: &_m.Mock} -} - -// String provides a mock function with no fields -func (_m *BlockNotifier) String() string { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for String") - } - - var r0 string - if rf, ok := ret.Get(0).(func() string); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(string) - } - - return r0 -} - -// BlockNotifier_String_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'String' -type BlockNotifier_String_Call struct { - *mock.Call -} - -// String is a helper method to define mock.On call -func (_e *BlockNotifier_Expecter) String() *BlockNotifier_String_Call { - return &BlockNotifier_String_Call{Call: _e.mock.On("String")} -} - -func (_c *BlockNotifier_String_Call) Run(run func()) *BlockNotifier_String_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *BlockNotifier_String_Call) Return(_a0 string) *BlockNotifier_String_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *BlockNotifier_String_Call) RunAndReturn(run func() string) *BlockNotifier_String_Call { - _c.Call.Return(run) - return _c -} - -// Subscribe provides a mock function with given fields: id -func (_m *BlockNotifier) Subscribe(id string) <-chan types.EventNewBlock { - ret := _m.Called(id) - - if len(ret) == 0 { - panic("no return value specified for Subscribe") - } - - var r0 <-chan types.EventNewBlock - if rf, ok := ret.Get(0).(func(string) <-chan types.EventNewBlock); ok { - r0 = rf(id) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(<-chan types.EventNewBlock) - } - } - - return r0 -} - -// BlockNotifier_Subscribe_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Subscribe' -type BlockNotifier_Subscribe_Call struct { - *mock.Call -} - -// Subscribe is a helper method to define mock.On call -// - id string -func (_e *BlockNotifier_Expecter) Subscribe(id interface{}) *BlockNotifier_Subscribe_Call { - return &BlockNotifier_Subscribe_Call{Call: _e.mock.On("Subscribe", id)} -} - -func (_c *BlockNotifier_Subscribe_Call) Run(run func(id string)) *BlockNotifier_Subscribe_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(string)) - }) - return _c -} - -func (_c *BlockNotifier_Subscribe_Call) Return(_a0 <-chan types.EventNewBlock) *BlockNotifier_Subscribe_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *BlockNotifier_Subscribe_Call) RunAndReturn(run func(string) <-chan types.EventNewBlock) *BlockNotifier_Subscribe_Call { - _c.Call.Return(run) - return _c -} - -// NewBlockNotifier creates a new instance of BlockNotifier. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewBlockNotifier(t interface { - mock.TestingT - Cleanup(func()) -}) *BlockNotifier { - mock := &BlockNotifier{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/aggsender/mocks/epoch_notifier.go b/aggsender/mocks/epoch_notifier.go deleted file mode 100644 index 0da06d93d..000000000 --- a/aggsender/mocks/epoch_notifier.go +++ /dev/null @@ -1,163 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package mocks - -import ( - context "context" - - types "github.com/0xPolygon/cdk/aggsender/types" - mock "github.com/stretchr/testify/mock" -) - -// EpochNotifier is an autogenerated mock type for the EpochNotifier type -type EpochNotifier struct { - mock.Mock -} - -type EpochNotifier_Expecter struct { - mock *mock.Mock -} - -func (_m *EpochNotifier) EXPECT() *EpochNotifier_Expecter { - return &EpochNotifier_Expecter{mock: &_m.Mock} -} - -// Start provides a mock function with given fields: ctx -func (_m *EpochNotifier) Start(ctx context.Context) { - _m.Called(ctx) -} - -// EpochNotifier_Start_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Start' -type EpochNotifier_Start_Call struct { - *mock.Call -} - -// Start is a helper method to define mock.On call -// - ctx context.Context -func (_e *EpochNotifier_Expecter) Start(ctx interface{}) *EpochNotifier_Start_Call { - return &EpochNotifier_Start_Call{Call: _e.mock.On("Start", ctx)} -} - -func (_c *EpochNotifier_Start_Call) Run(run func(ctx context.Context)) *EpochNotifier_Start_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context)) - }) - return _c -} - -func (_c *EpochNotifier_Start_Call) Return() *EpochNotifier_Start_Call { - _c.Call.Return() - return _c -} - -func (_c *EpochNotifier_Start_Call) RunAndReturn(run func(context.Context)) *EpochNotifier_Start_Call { - _c.Run(run) - return _c -} - -// String provides a mock function with no fields -func (_m *EpochNotifier) String() string { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for String") - } - - var r0 string - if rf, ok := ret.Get(0).(func() string); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(string) - } - - return r0 -} - -// EpochNotifier_String_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'String' -type EpochNotifier_String_Call struct { - *mock.Call -} - -// String is a helper method to define mock.On call -func (_e *EpochNotifier_Expecter) String() *EpochNotifier_String_Call { - return &EpochNotifier_String_Call{Call: _e.mock.On("String")} -} - -func (_c *EpochNotifier_String_Call) Run(run func()) *EpochNotifier_String_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *EpochNotifier_String_Call) Return(_a0 string) *EpochNotifier_String_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *EpochNotifier_String_Call) RunAndReturn(run func() string) *EpochNotifier_String_Call { - _c.Call.Return(run) - return _c -} - -// Subscribe provides a mock function with given fields: id -func (_m *EpochNotifier) Subscribe(id string) <-chan types.EpochEvent { - ret := _m.Called(id) - - if len(ret) == 0 { - panic("no return value specified for Subscribe") - } - - var r0 <-chan types.EpochEvent - if rf, ok := ret.Get(0).(func(string) <-chan types.EpochEvent); ok { - r0 = rf(id) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(<-chan types.EpochEvent) - } - } - - return r0 -} - -// EpochNotifier_Subscribe_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Subscribe' -type EpochNotifier_Subscribe_Call struct { - *mock.Call -} - -// Subscribe is a helper method to define mock.On call -// - id string -func (_e *EpochNotifier_Expecter) Subscribe(id interface{}) *EpochNotifier_Subscribe_Call { - return &EpochNotifier_Subscribe_Call{Call: _e.mock.On("Subscribe", id)} -} - -func (_c *EpochNotifier_Subscribe_Call) Run(run func(id string)) *EpochNotifier_Subscribe_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(string)) - }) - return _c -} - -func (_c *EpochNotifier_Subscribe_Call) Return(_a0 <-chan types.EpochEvent) *EpochNotifier_Subscribe_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *EpochNotifier_Subscribe_Call) RunAndReturn(run func(string) <-chan types.EpochEvent) *EpochNotifier_Subscribe_Call { - _c.Call.Return(run) - return _c -} - -// NewEpochNotifier creates a new instance of EpochNotifier. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewEpochNotifier(t interface { - mock.TestingT - Cleanup(func()) -}) *EpochNotifier { - mock := &EpochNotifier{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/aggsender/mocks/eth_client.go b/aggsender/mocks/eth_client.go deleted file mode 100644 index 6a68de414..000000000 --- a/aggsender/mocks/eth_client.go +++ /dev/null @@ -1,154 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package mocks - -import ( - context "context" - big "math/big" - - coretypes "github.com/ethereum/go-ethereum/core/types" - - mock "github.com/stretchr/testify/mock" -) - -// EthClient is an autogenerated mock type for the EthClient type -type EthClient struct { - mock.Mock -} - -type EthClient_Expecter struct { - mock *mock.Mock -} - -func (_m *EthClient) EXPECT() *EthClient_Expecter { - return &EthClient_Expecter{mock: &_m.Mock} -} - -// BlockNumber provides a mock function with given fields: ctx -func (_m *EthClient) BlockNumber(ctx context.Context) (uint64, error) { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for BlockNumber") - } - - var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (uint64, error)); ok { - return rf(ctx) - } - if rf, ok := ret.Get(0).(func(context.Context) uint64); ok { - r0 = rf(ctx) - } else { - r0 = ret.Get(0).(uint64) - } - - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// EthClient_BlockNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockNumber' -type EthClient_BlockNumber_Call struct { - *mock.Call -} - -// BlockNumber is a helper method to define mock.On call -// - ctx context.Context -func (_e *EthClient_Expecter) BlockNumber(ctx interface{}) *EthClient_BlockNumber_Call { - return &EthClient_BlockNumber_Call{Call: _e.mock.On("BlockNumber", ctx)} -} - -func (_c *EthClient_BlockNumber_Call) Run(run func(ctx context.Context)) *EthClient_BlockNumber_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context)) - }) - return _c -} - -func (_c *EthClient_BlockNumber_Call) Return(_a0 uint64, _a1 error) *EthClient_BlockNumber_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *EthClient_BlockNumber_Call) RunAndReturn(run func(context.Context) (uint64, error)) *EthClient_BlockNumber_Call { - _c.Call.Return(run) - return _c -} - -// HeaderByNumber provides a mock function with given fields: ctx, number -func (_m *EthClient) HeaderByNumber(ctx context.Context, number *big.Int) (*coretypes.Header, error) { - ret := _m.Called(ctx, number) - - if len(ret) == 0 { - panic("no return value specified for HeaderByNumber") - } - - var r0 *coretypes.Header - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *big.Int) (*coretypes.Header, error)); ok { - return rf(ctx, number) - } - if rf, ok := ret.Get(0).(func(context.Context, *big.Int) *coretypes.Header); ok { - r0 = rf(ctx, number) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.Header) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { - r1 = rf(ctx, number) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// EthClient_HeaderByNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HeaderByNumber' -type EthClient_HeaderByNumber_Call struct { - *mock.Call -} - -// HeaderByNumber is a helper method to define mock.On call -// - ctx context.Context -// - number *big.Int -func (_e *EthClient_Expecter) HeaderByNumber(ctx interface{}, number interface{}) *EthClient_HeaderByNumber_Call { - return &EthClient_HeaderByNumber_Call{Call: _e.mock.On("HeaderByNumber", ctx, number)} -} - -func (_c *EthClient_HeaderByNumber_Call) Run(run func(ctx context.Context, number *big.Int)) *EthClient_HeaderByNumber_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(*big.Int)) - }) - return _c -} - -func (_c *EthClient_HeaderByNumber_Call) Return(_a0 *coretypes.Header, _a1 error) *EthClient_HeaderByNumber_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *EthClient_HeaderByNumber_Call) RunAndReturn(run func(context.Context, *big.Int) (*coretypes.Header, error)) *EthClient_HeaderByNumber_Call { - _c.Call.Return(run) - return _c -} - -// NewEthClient creates a new instance of EthClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewEthClient(t interface { - mock.TestingT - Cleanup(func()) -}) *EthClient { - mock := &EthClient{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/aggsender/mocks/generic_subscriber.go b/aggsender/mocks/generic_subscriber.go deleted file mode 100644 index 59a276428..000000000 --- a/aggsender/mocks/generic_subscriber.go +++ /dev/null @@ -1,113 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package mocks - -import mock "github.com/stretchr/testify/mock" - -// GenericSubscriber is an autogenerated mock type for the GenericSubscriber type -type GenericSubscriber[T interface{}] struct { - mock.Mock -} - -type GenericSubscriber_Expecter[T interface{}] struct { - mock *mock.Mock -} - -func (_m *GenericSubscriber[T]) EXPECT() *GenericSubscriber_Expecter[T] { - return &GenericSubscriber_Expecter[T]{mock: &_m.Mock} -} - -// Publish provides a mock function with given fields: data -func (_m *GenericSubscriber[T]) Publish(data T) { - _m.Called(data) -} - -// GenericSubscriber_Publish_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Publish' -type GenericSubscriber_Publish_Call[T interface{}] struct { - *mock.Call -} - -// Publish is a helper method to define mock.On call -// - data T -func (_e *GenericSubscriber_Expecter[T]) Publish(data interface{}) *GenericSubscriber_Publish_Call[T] { - return &GenericSubscriber_Publish_Call[T]{Call: _e.mock.On("Publish", data)} -} - -func (_c *GenericSubscriber_Publish_Call[T]) Run(run func(data T)) *GenericSubscriber_Publish_Call[T] { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(T)) - }) - return _c -} - -func (_c *GenericSubscriber_Publish_Call[T]) Return() *GenericSubscriber_Publish_Call[T] { - _c.Call.Return() - return _c -} - -func (_c *GenericSubscriber_Publish_Call[T]) RunAndReturn(run func(T)) *GenericSubscriber_Publish_Call[T] { - _c.Run(run) - return _c -} - -// Subscribe provides a mock function with given fields: subscriberName -func (_m *GenericSubscriber[T]) Subscribe(subscriberName string) <-chan T { - ret := _m.Called(subscriberName) - - if len(ret) == 0 { - panic("no return value specified for Subscribe") - } - - var r0 <-chan T - if rf, ok := ret.Get(0).(func(string) <-chan T); ok { - r0 = rf(subscriberName) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(<-chan T) - } - } - - return r0 -} - -// GenericSubscriber_Subscribe_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Subscribe' -type GenericSubscriber_Subscribe_Call[T interface{}] struct { - *mock.Call -} - -// Subscribe is a helper method to define mock.On call -// - subscriberName string -func (_e *GenericSubscriber_Expecter[T]) Subscribe(subscriberName interface{}) *GenericSubscriber_Subscribe_Call[T] { - return &GenericSubscriber_Subscribe_Call[T]{Call: _e.mock.On("Subscribe", subscriberName)} -} - -func (_c *GenericSubscriber_Subscribe_Call[T]) Run(run func(subscriberName string)) *GenericSubscriber_Subscribe_Call[T] { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(string)) - }) - return _c -} - -func (_c *GenericSubscriber_Subscribe_Call[T]) Return(_a0 <-chan T) *GenericSubscriber_Subscribe_Call[T] { - _c.Call.Return(_a0) - return _c -} - -func (_c *GenericSubscriber_Subscribe_Call[T]) RunAndReturn(run func(string) <-chan T) *GenericSubscriber_Subscribe_Call[T] { - _c.Call.Return(run) - return _c -} - -// NewGenericSubscriber creates a new instance of GenericSubscriber. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewGenericSubscriber[T interface{}](t interface { - mock.TestingT - Cleanup(func()) -}) *GenericSubscriber[T] { - mock := &GenericSubscriber[T]{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/aggsender/mocks/l1_info_tree_syncer.go b/aggsender/mocks/l1_info_tree_syncer.go deleted file mode 100644 index 70ac97de9..000000000 --- a/aggsender/mocks/l1_info_tree_syncer.go +++ /dev/null @@ -1,217 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package mocks - -import ( - context "context" - - common "github.com/ethereum/go-ethereum/common" - - l1infotreesync "github.com/0xPolygon/cdk/l1infotreesync" - - mock "github.com/stretchr/testify/mock" - - treetypes "github.com/0xPolygon/cdk/tree/types" -) - -// L1InfoTreeSyncer is an autogenerated mock type for the L1InfoTreeSyncer type -type L1InfoTreeSyncer struct { - mock.Mock -} - -type L1InfoTreeSyncer_Expecter struct { - mock *mock.Mock -} - -func (_m *L1InfoTreeSyncer) EXPECT() *L1InfoTreeSyncer_Expecter { - return &L1InfoTreeSyncer_Expecter{mock: &_m.Mock} -} - -// GetInfoByGlobalExitRoot provides a mock function with given fields: globalExitRoot -func (_m *L1InfoTreeSyncer) GetInfoByGlobalExitRoot(globalExitRoot common.Hash) (*l1infotreesync.L1InfoTreeLeaf, error) { - ret := _m.Called(globalExitRoot) - - if len(ret) == 0 { - panic("no return value specified for GetInfoByGlobalExitRoot") - } - - var r0 *l1infotreesync.L1InfoTreeLeaf - var r1 error - if rf, ok := ret.Get(0).(func(common.Hash) (*l1infotreesync.L1InfoTreeLeaf, error)); ok { - return rf(globalExitRoot) - } - if rf, ok := ret.Get(0).(func(common.Hash) *l1infotreesync.L1InfoTreeLeaf); ok { - r0 = rf(globalExitRoot) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*l1infotreesync.L1InfoTreeLeaf) - } - } - - if rf, ok := ret.Get(1).(func(common.Hash) error); ok { - r1 = rf(globalExitRoot) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L1InfoTreeSyncer_GetInfoByGlobalExitRoot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetInfoByGlobalExitRoot' -type L1InfoTreeSyncer_GetInfoByGlobalExitRoot_Call struct { - *mock.Call -} - -// GetInfoByGlobalExitRoot is a helper method to define mock.On call -// - globalExitRoot common.Hash -func (_e *L1InfoTreeSyncer_Expecter) GetInfoByGlobalExitRoot(globalExitRoot interface{}) *L1InfoTreeSyncer_GetInfoByGlobalExitRoot_Call { - return &L1InfoTreeSyncer_GetInfoByGlobalExitRoot_Call{Call: _e.mock.On("GetInfoByGlobalExitRoot", globalExitRoot)} -} - -func (_c *L1InfoTreeSyncer_GetInfoByGlobalExitRoot_Call) Run(run func(globalExitRoot common.Hash)) *L1InfoTreeSyncer_GetInfoByGlobalExitRoot_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(common.Hash)) - }) - return _c -} - -func (_c *L1InfoTreeSyncer_GetInfoByGlobalExitRoot_Call) Return(_a0 *l1infotreesync.L1InfoTreeLeaf, _a1 error) *L1InfoTreeSyncer_GetInfoByGlobalExitRoot_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L1InfoTreeSyncer_GetInfoByGlobalExitRoot_Call) RunAndReturn(run func(common.Hash) (*l1infotreesync.L1InfoTreeLeaf, error)) *L1InfoTreeSyncer_GetInfoByGlobalExitRoot_Call { - _c.Call.Return(run) - return _c -} - -// GetL1InfoTreeMerkleProofFromIndexToRoot provides a mock function with given fields: ctx, index, root -func (_m *L1InfoTreeSyncer) GetL1InfoTreeMerkleProofFromIndexToRoot(ctx context.Context, index uint32, root common.Hash) (treetypes.Proof, error) { - ret := _m.Called(ctx, index, root) - - if len(ret) == 0 { - panic("no return value specified for GetL1InfoTreeMerkleProofFromIndexToRoot") - } - - var r0 treetypes.Proof - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, uint32, common.Hash) (treetypes.Proof, error)); ok { - return rf(ctx, index, root) - } - if rf, ok := ret.Get(0).(func(context.Context, uint32, common.Hash) treetypes.Proof); ok { - r0 = rf(ctx, index, root) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(treetypes.Proof) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, uint32, common.Hash) error); ok { - r1 = rf(ctx, index, root) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L1InfoTreeSyncer_GetL1InfoTreeMerkleProofFromIndexToRoot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetL1InfoTreeMerkleProofFromIndexToRoot' -type L1InfoTreeSyncer_GetL1InfoTreeMerkleProofFromIndexToRoot_Call struct { - *mock.Call -} - -// GetL1InfoTreeMerkleProofFromIndexToRoot is a helper method to define mock.On call -// - ctx context.Context -// - index uint32 -// - root common.Hash -func (_e *L1InfoTreeSyncer_Expecter) GetL1InfoTreeMerkleProofFromIndexToRoot(ctx interface{}, index interface{}, root interface{}) *L1InfoTreeSyncer_GetL1InfoTreeMerkleProofFromIndexToRoot_Call { - return &L1InfoTreeSyncer_GetL1InfoTreeMerkleProofFromIndexToRoot_Call{Call: _e.mock.On("GetL1InfoTreeMerkleProofFromIndexToRoot", ctx, index, root)} -} - -func (_c *L1InfoTreeSyncer_GetL1InfoTreeMerkleProofFromIndexToRoot_Call) Run(run func(ctx context.Context, index uint32, root common.Hash)) *L1InfoTreeSyncer_GetL1InfoTreeMerkleProofFromIndexToRoot_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(uint32), args[2].(common.Hash)) - }) - return _c -} - -func (_c *L1InfoTreeSyncer_GetL1InfoTreeMerkleProofFromIndexToRoot_Call) Return(_a0 treetypes.Proof, _a1 error) *L1InfoTreeSyncer_GetL1InfoTreeMerkleProofFromIndexToRoot_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L1InfoTreeSyncer_GetL1InfoTreeMerkleProofFromIndexToRoot_Call) RunAndReturn(run func(context.Context, uint32, common.Hash) (treetypes.Proof, error)) *L1InfoTreeSyncer_GetL1InfoTreeMerkleProofFromIndexToRoot_Call { - _c.Call.Return(run) - return _c -} - -// GetL1InfoTreeRootByIndex provides a mock function with given fields: ctx, index -func (_m *L1InfoTreeSyncer) GetL1InfoTreeRootByIndex(ctx context.Context, index uint32) (treetypes.Root, error) { - ret := _m.Called(ctx, index) - - if len(ret) == 0 { - panic("no return value specified for GetL1InfoTreeRootByIndex") - } - - var r0 treetypes.Root - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, uint32) (treetypes.Root, error)); ok { - return rf(ctx, index) - } - if rf, ok := ret.Get(0).(func(context.Context, uint32) treetypes.Root); ok { - r0 = rf(ctx, index) - } else { - r0 = ret.Get(0).(treetypes.Root) - } - - if rf, ok := ret.Get(1).(func(context.Context, uint32) error); ok { - r1 = rf(ctx, index) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L1InfoTreeSyncer_GetL1InfoTreeRootByIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetL1InfoTreeRootByIndex' -type L1InfoTreeSyncer_GetL1InfoTreeRootByIndex_Call struct { - *mock.Call -} - -// GetL1InfoTreeRootByIndex is a helper method to define mock.On call -// - ctx context.Context -// - index uint32 -func (_e *L1InfoTreeSyncer_Expecter) GetL1InfoTreeRootByIndex(ctx interface{}, index interface{}) *L1InfoTreeSyncer_GetL1InfoTreeRootByIndex_Call { - return &L1InfoTreeSyncer_GetL1InfoTreeRootByIndex_Call{Call: _e.mock.On("GetL1InfoTreeRootByIndex", ctx, index)} -} - -func (_c *L1InfoTreeSyncer_GetL1InfoTreeRootByIndex_Call) Run(run func(ctx context.Context, index uint32)) *L1InfoTreeSyncer_GetL1InfoTreeRootByIndex_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(uint32)) - }) - return _c -} - -func (_c *L1InfoTreeSyncer_GetL1InfoTreeRootByIndex_Call) Return(_a0 treetypes.Root, _a1 error) *L1InfoTreeSyncer_GetL1InfoTreeRootByIndex_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L1InfoTreeSyncer_GetL1InfoTreeRootByIndex_Call) RunAndReturn(run func(context.Context, uint32) (treetypes.Root, error)) *L1InfoTreeSyncer_GetL1InfoTreeRootByIndex_Call { - _c.Call.Return(run) - return _c -} - -// NewL1InfoTreeSyncer creates a new instance of L1InfoTreeSyncer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewL1InfoTreeSyncer(t interface { - mock.TestingT - Cleanup(func()) -}) *L1InfoTreeSyncer { - mock := &L1InfoTreeSyncer{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/aggsender/mocks/l2_bridge_syncer.go b/aggsender/mocks/l2_bridge_syncer.go deleted file mode 100644 index b8eeb0848..000000000 --- a/aggsender/mocks/l2_bridge_syncer.go +++ /dev/null @@ -1,423 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package mocks - -import ( - bridgesync "github.com/0xPolygon/cdk/bridgesync" - common "github.com/ethereum/go-ethereum/common" - - context "context" - - etherman "github.com/0xPolygon/cdk/etherman" - - mock "github.com/stretchr/testify/mock" - - treetypes "github.com/0xPolygon/cdk/tree/types" -) - -// L2BridgeSyncer is an autogenerated mock type for the L2BridgeSyncer type -type L2BridgeSyncer struct { - mock.Mock -} - -type L2BridgeSyncer_Expecter struct { - mock *mock.Mock -} - -func (_m *L2BridgeSyncer) EXPECT() *L2BridgeSyncer_Expecter { - return &L2BridgeSyncer_Expecter{mock: &_m.Mock} -} - -// BlockFinality provides a mock function with no fields -func (_m *L2BridgeSyncer) BlockFinality() etherman.BlockNumberFinality { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for BlockFinality") - } - - var r0 etherman.BlockNumberFinality - if rf, ok := ret.Get(0).(func() etherman.BlockNumberFinality); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(etherman.BlockNumberFinality) - } - - return r0 -} - -// L2BridgeSyncer_BlockFinality_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockFinality' -type L2BridgeSyncer_BlockFinality_Call struct { - *mock.Call -} - -// BlockFinality is a helper method to define mock.On call -func (_e *L2BridgeSyncer_Expecter) BlockFinality() *L2BridgeSyncer_BlockFinality_Call { - return &L2BridgeSyncer_BlockFinality_Call{Call: _e.mock.On("BlockFinality")} -} - -func (_c *L2BridgeSyncer_BlockFinality_Call) Run(run func()) *L2BridgeSyncer_BlockFinality_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *L2BridgeSyncer_BlockFinality_Call) Return(_a0 etherman.BlockNumberFinality) *L2BridgeSyncer_BlockFinality_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *L2BridgeSyncer_BlockFinality_Call) RunAndReturn(run func() etherman.BlockNumberFinality) *L2BridgeSyncer_BlockFinality_Call { - _c.Call.Return(run) - return _c -} - -// GetBlockByLER provides a mock function with given fields: ctx, ler -func (_m *L2BridgeSyncer) GetBlockByLER(ctx context.Context, ler common.Hash) (uint64, error) { - ret := _m.Called(ctx, ler) - - if len(ret) == 0 { - panic("no return value specified for GetBlockByLER") - } - - var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (uint64, error)); ok { - return rf(ctx, ler) - } - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) uint64); ok { - r0 = rf(ctx, ler) - } else { - r0 = ret.Get(0).(uint64) - } - - if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { - r1 = rf(ctx, ler) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L2BridgeSyncer_GetBlockByLER_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBlockByLER' -type L2BridgeSyncer_GetBlockByLER_Call struct { - *mock.Call -} - -// GetBlockByLER is a helper method to define mock.On call -// - ctx context.Context -// - ler common.Hash -func (_e *L2BridgeSyncer_Expecter) GetBlockByLER(ctx interface{}, ler interface{}) *L2BridgeSyncer_GetBlockByLER_Call { - return &L2BridgeSyncer_GetBlockByLER_Call{Call: _e.mock.On("GetBlockByLER", ctx, ler)} -} - -func (_c *L2BridgeSyncer_GetBlockByLER_Call) Run(run func(ctx context.Context, ler common.Hash)) *L2BridgeSyncer_GetBlockByLER_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(common.Hash)) - }) - return _c -} - -func (_c *L2BridgeSyncer_GetBlockByLER_Call) Return(_a0 uint64, _a1 error) *L2BridgeSyncer_GetBlockByLER_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L2BridgeSyncer_GetBlockByLER_Call) RunAndReturn(run func(context.Context, common.Hash) (uint64, error)) *L2BridgeSyncer_GetBlockByLER_Call { - _c.Call.Return(run) - return _c -} - -// GetBridgesPublished provides a mock function with given fields: ctx, fromBlock, toBlock -func (_m *L2BridgeSyncer) GetBridgesPublished(ctx context.Context, fromBlock uint64, toBlock uint64) ([]bridgesync.Bridge, error) { - ret := _m.Called(ctx, fromBlock, toBlock) - - if len(ret) == 0 { - panic("no return value specified for GetBridgesPublished") - } - - var r0 []bridgesync.Bridge - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64) ([]bridgesync.Bridge, error)); ok { - return rf(ctx, fromBlock, toBlock) - } - if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64) []bridgesync.Bridge); ok { - r0 = rf(ctx, fromBlock, toBlock) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]bridgesync.Bridge) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, uint64, uint64) error); ok { - r1 = rf(ctx, fromBlock, toBlock) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L2BridgeSyncer_GetBridgesPublished_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBridgesPublished' -type L2BridgeSyncer_GetBridgesPublished_Call struct { - *mock.Call -} - -// GetBridgesPublished is a helper method to define mock.On call -// - ctx context.Context -// - fromBlock uint64 -// - toBlock uint64 -func (_e *L2BridgeSyncer_Expecter) GetBridgesPublished(ctx interface{}, fromBlock interface{}, toBlock interface{}) *L2BridgeSyncer_GetBridgesPublished_Call { - return &L2BridgeSyncer_GetBridgesPublished_Call{Call: _e.mock.On("GetBridgesPublished", ctx, fromBlock, toBlock)} -} - -func (_c *L2BridgeSyncer_GetBridgesPublished_Call) Run(run func(ctx context.Context, fromBlock uint64, toBlock uint64)) *L2BridgeSyncer_GetBridgesPublished_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(uint64), args[2].(uint64)) - }) - return _c -} - -func (_c *L2BridgeSyncer_GetBridgesPublished_Call) Return(_a0 []bridgesync.Bridge, _a1 error) *L2BridgeSyncer_GetBridgesPublished_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L2BridgeSyncer_GetBridgesPublished_Call) RunAndReturn(run func(context.Context, uint64, uint64) ([]bridgesync.Bridge, error)) *L2BridgeSyncer_GetBridgesPublished_Call { - _c.Call.Return(run) - return _c -} - -// GetClaims provides a mock function with given fields: ctx, fromBlock, toBlock -func (_m *L2BridgeSyncer) GetClaims(ctx context.Context, fromBlock uint64, toBlock uint64) ([]bridgesync.Claim, error) { - ret := _m.Called(ctx, fromBlock, toBlock) - - if len(ret) == 0 { - panic("no return value specified for GetClaims") - } - - var r0 []bridgesync.Claim - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64) ([]bridgesync.Claim, error)); ok { - return rf(ctx, fromBlock, toBlock) - } - if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64) []bridgesync.Claim); ok { - r0 = rf(ctx, fromBlock, toBlock) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]bridgesync.Claim) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, uint64, uint64) error); ok { - r1 = rf(ctx, fromBlock, toBlock) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L2BridgeSyncer_GetClaims_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetClaims' -type L2BridgeSyncer_GetClaims_Call struct { - *mock.Call -} - -// GetClaims is a helper method to define mock.On call -// - ctx context.Context -// - fromBlock uint64 -// - toBlock uint64 -func (_e *L2BridgeSyncer_Expecter) GetClaims(ctx interface{}, fromBlock interface{}, toBlock interface{}) *L2BridgeSyncer_GetClaims_Call { - return &L2BridgeSyncer_GetClaims_Call{Call: _e.mock.On("GetClaims", ctx, fromBlock, toBlock)} -} - -func (_c *L2BridgeSyncer_GetClaims_Call) Run(run func(ctx context.Context, fromBlock uint64, toBlock uint64)) *L2BridgeSyncer_GetClaims_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(uint64), args[2].(uint64)) - }) - return _c -} - -func (_c *L2BridgeSyncer_GetClaims_Call) Return(_a0 []bridgesync.Claim, _a1 error) *L2BridgeSyncer_GetClaims_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L2BridgeSyncer_GetClaims_Call) RunAndReturn(run func(context.Context, uint64, uint64) ([]bridgesync.Claim, error)) *L2BridgeSyncer_GetClaims_Call { - _c.Call.Return(run) - return _c -} - -// GetExitRootByIndex provides a mock function with given fields: ctx, index -func (_m *L2BridgeSyncer) GetExitRootByIndex(ctx context.Context, index uint32) (treetypes.Root, error) { - ret := _m.Called(ctx, index) - - if len(ret) == 0 { - panic("no return value specified for GetExitRootByIndex") - } - - var r0 treetypes.Root - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, uint32) (treetypes.Root, error)); ok { - return rf(ctx, index) - } - if rf, ok := ret.Get(0).(func(context.Context, uint32) treetypes.Root); ok { - r0 = rf(ctx, index) - } else { - r0 = ret.Get(0).(treetypes.Root) - } - - if rf, ok := ret.Get(1).(func(context.Context, uint32) error); ok { - r1 = rf(ctx, index) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L2BridgeSyncer_GetExitRootByIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetExitRootByIndex' -type L2BridgeSyncer_GetExitRootByIndex_Call struct { - *mock.Call -} - -// GetExitRootByIndex is a helper method to define mock.On call -// - ctx context.Context -// - index uint32 -func (_e *L2BridgeSyncer_Expecter) GetExitRootByIndex(ctx interface{}, index interface{}) *L2BridgeSyncer_GetExitRootByIndex_Call { - return &L2BridgeSyncer_GetExitRootByIndex_Call{Call: _e.mock.On("GetExitRootByIndex", ctx, index)} -} - -func (_c *L2BridgeSyncer_GetExitRootByIndex_Call) Run(run func(ctx context.Context, index uint32)) *L2BridgeSyncer_GetExitRootByIndex_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(uint32)) - }) - return _c -} - -func (_c *L2BridgeSyncer_GetExitRootByIndex_Call) Return(_a0 treetypes.Root, _a1 error) *L2BridgeSyncer_GetExitRootByIndex_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L2BridgeSyncer_GetExitRootByIndex_Call) RunAndReturn(run func(context.Context, uint32) (treetypes.Root, error)) *L2BridgeSyncer_GetExitRootByIndex_Call { - _c.Call.Return(run) - return _c -} - -// GetLastProcessedBlock provides a mock function with given fields: ctx -func (_m *L2BridgeSyncer) GetLastProcessedBlock(ctx context.Context) (uint64, error) { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for GetLastProcessedBlock") - } - - var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (uint64, error)); ok { - return rf(ctx) - } - if rf, ok := ret.Get(0).(func(context.Context) uint64); ok { - r0 = rf(ctx) - } else { - r0 = ret.Get(0).(uint64) - } - - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L2BridgeSyncer_GetLastProcessedBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLastProcessedBlock' -type L2BridgeSyncer_GetLastProcessedBlock_Call struct { - *mock.Call -} - -// GetLastProcessedBlock is a helper method to define mock.On call -// - ctx context.Context -func (_e *L2BridgeSyncer_Expecter) GetLastProcessedBlock(ctx interface{}) *L2BridgeSyncer_GetLastProcessedBlock_Call { - return &L2BridgeSyncer_GetLastProcessedBlock_Call{Call: _e.mock.On("GetLastProcessedBlock", ctx)} -} - -func (_c *L2BridgeSyncer_GetLastProcessedBlock_Call) Run(run func(ctx context.Context)) *L2BridgeSyncer_GetLastProcessedBlock_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context)) - }) - return _c -} - -func (_c *L2BridgeSyncer_GetLastProcessedBlock_Call) Return(_a0 uint64, _a1 error) *L2BridgeSyncer_GetLastProcessedBlock_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L2BridgeSyncer_GetLastProcessedBlock_Call) RunAndReturn(run func(context.Context) (uint64, error)) *L2BridgeSyncer_GetLastProcessedBlock_Call { - _c.Call.Return(run) - return _c -} - -// OriginNetwork provides a mock function with no fields -func (_m *L2BridgeSyncer) OriginNetwork() uint32 { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for OriginNetwork") - } - - var r0 uint32 - if rf, ok := ret.Get(0).(func() uint32); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(uint32) - } - - return r0 -} - -// L2BridgeSyncer_OriginNetwork_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'OriginNetwork' -type L2BridgeSyncer_OriginNetwork_Call struct { - *mock.Call -} - -// OriginNetwork is a helper method to define mock.On call -func (_e *L2BridgeSyncer_Expecter) OriginNetwork() *L2BridgeSyncer_OriginNetwork_Call { - return &L2BridgeSyncer_OriginNetwork_Call{Call: _e.mock.On("OriginNetwork")} -} - -func (_c *L2BridgeSyncer_OriginNetwork_Call) Run(run func()) *L2BridgeSyncer_OriginNetwork_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *L2BridgeSyncer_OriginNetwork_Call) Return(_a0 uint32) *L2BridgeSyncer_OriginNetwork_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *L2BridgeSyncer_OriginNetwork_Call) RunAndReturn(run func() uint32) *L2BridgeSyncer_OriginNetwork_Call { - _c.Call.Return(run) - return _c -} - -// NewL2BridgeSyncer creates a new instance of L2BridgeSyncer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewL2BridgeSyncer(t interface { - mock.TestingT - Cleanup(func()) -}) *L2BridgeSyncer { - mock := &L2BridgeSyncer{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/aggsender/mocks/logger.go b/aggsender/mocks/logger.go deleted file mode 100644 index b2a845ca9..000000000 --- a/aggsender/mocks/logger.go +++ /dev/null @@ -1,420 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package mocks - -import mock "github.com/stretchr/testify/mock" - -// Logger is an autogenerated mock type for the Logger type -type Logger struct { - mock.Mock -} - -type Logger_Expecter struct { - mock *mock.Mock -} - -func (_m *Logger) EXPECT() *Logger_Expecter { - return &Logger_Expecter{mock: &_m.Mock} -} - -// Debug provides a mock function with given fields: args -func (_m *Logger) Debug(args ...interface{}) { - var _ca []interface{} - _ca = append(_ca, args...) - _m.Called(_ca...) -} - -// Logger_Debug_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Debug' -type Logger_Debug_Call struct { - *mock.Call -} - -// Debug is a helper method to define mock.On call -// - args ...interface{} -func (_e *Logger_Expecter) Debug(args ...interface{}) *Logger_Debug_Call { - return &Logger_Debug_Call{Call: _e.mock.On("Debug", - append([]interface{}{}, args...)...)} -} - -func (_c *Logger_Debug_Call) Run(run func(args ...interface{})) *Logger_Debug_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]interface{}, len(args)-0) - for i, a := range args[0:] { - if a != nil { - variadicArgs[i] = a.(interface{}) - } - } - run(variadicArgs...) - }) - return _c -} - -func (_c *Logger_Debug_Call) Return() *Logger_Debug_Call { - _c.Call.Return() - return _c -} - -func (_c *Logger_Debug_Call) RunAndReturn(run func(...interface{})) *Logger_Debug_Call { - _c.Run(run) - return _c -} - -// Debugf provides a mock function with given fields: format, args -func (_m *Logger) Debugf(format string, args ...interface{}) { - var _ca []interface{} - _ca = append(_ca, format) - _ca = append(_ca, args...) - _m.Called(_ca...) -} - -// Logger_Debugf_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Debugf' -type Logger_Debugf_Call struct { - *mock.Call -} - -// Debugf is a helper method to define mock.On call -// - format string -// - args ...interface{} -func (_e *Logger_Expecter) Debugf(format interface{}, args ...interface{}) *Logger_Debugf_Call { - return &Logger_Debugf_Call{Call: _e.mock.On("Debugf", - append([]interface{}{format}, args...)...)} -} - -func (_c *Logger_Debugf_Call) Run(run func(format string, args ...interface{})) *Logger_Debugf_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]interface{}, len(args)-1) - for i, a := range args[1:] { - if a != nil { - variadicArgs[i] = a.(interface{}) - } - } - run(args[0].(string), variadicArgs...) - }) - return _c -} - -func (_c *Logger_Debugf_Call) Return() *Logger_Debugf_Call { - _c.Call.Return() - return _c -} - -func (_c *Logger_Debugf_Call) RunAndReturn(run func(string, ...interface{})) *Logger_Debugf_Call { - _c.Run(run) - return _c -} - -// Error provides a mock function with given fields: args -func (_m *Logger) Error(args ...interface{}) { - var _ca []interface{} - _ca = append(_ca, args...) - _m.Called(_ca...) -} - -// Logger_Error_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Error' -type Logger_Error_Call struct { - *mock.Call -} - -// Error is a helper method to define mock.On call -// - args ...interface{} -func (_e *Logger_Expecter) Error(args ...interface{}) *Logger_Error_Call { - return &Logger_Error_Call{Call: _e.mock.On("Error", - append([]interface{}{}, args...)...)} -} - -func (_c *Logger_Error_Call) Run(run func(args ...interface{})) *Logger_Error_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]interface{}, len(args)-0) - for i, a := range args[0:] { - if a != nil { - variadicArgs[i] = a.(interface{}) - } - } - run(variadicArgs...) - }) - return _c -} - -func (_c *Logger_Error_Call) Return() *Logger_Error_Call { - _c.Call.Return() - return _c -} - -func (_c *Logger_Error_Call) RunAndReturn(run func(...interface{})) *Logger_Error_Call { - _c.Run(run) - return _c -} - -// Errorf provides a mock function with given fields: format, args -func (_m *Logger) Errorf(format string, args ...interface{}) { - var _ca []interface{} - _ca = append(_ca, format) - _ca = append(_ca, args...) - _m.Called(_ca...) -} - -// Logger_Errorf_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Errorf' -type Logger_Errorf_Call struct { - *mock.Call -} - -// Errorf is a helper method to define mock.On call -// - format string -// - args ...interface{} -func (_e *Logger_Expecter) Errorf(format interface{}, args ...interface{}) *Logger_Errorf_Call { - return &Logger_Errorf_Call{Call: _e.mock.On("Errorf", - append([]interface{}{format}, args...)...)} -} - -func (_c *Logger_Errorf_Call) Run(run func(format string, args ...interface{})) *Logger_Errorf_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]interface{}, len(args)-1) - for i, a := range args[1:] { - if a != nil { - variadicArgs[i] = a.(interface{}) - } - } - run(args[0].(string), variadicArgs...) - }) - return _c -} - -func (_c *Logger_Errorf_Call) Return() *Logger_Errorf_Call { - _c.Call.Return() - return _c -} - -func (_c *Logger_Errorf_Call) RunAndReturn(run func(string, ...interface{})) *Logger_Errorf_Call { - _c.Run(run) - return _c -} - -// Fatalf provides a mock function with given fields: format, args -func (_m *Logger) Fatalf(format string, args ...interface{}) { - var _ca []interface{} - _ca = append(_ca, format) - _ca = append(_ca, args...) - _m.Called(_ca...) -} - -// Logger_Fatalf_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Fatalf' -type Logger_Fatalf_Call struct { - *mock.Call -} - -// Fatalf is a helper method to define mock.On call -// - format string -// - args ...interface{} -func (_e *Logger_Expecter) Fatalf(format interface{}, args ...interface{}) *Logger_Fatalf_Call { - return &Logger_Fatalf_Call{Call: _e.mock.On("Fatalf", - append([]interface{}{format}, args...)...)} -} - -func (_c *Logger_Fatalf_Call) Run(run func(format string, args ...interface{})) *Logger_Fatalf_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]interface{}, len(args)-1) - for i, a := range args[1:] { - if a != nil { - variadicArgs[i] = a.(interface{}) - } - } - run(args[0].(string), variadicArgs...) - }) - return _c -} - -func (_c *Logger_Fatalf_Call) Return() *Logger_Fatalf_Call { - _c.Call.Return() - return _c -} - -func (_c *Logger_Fatalf_Call) RunAndReturn(run func(string, ...interface{})) *Logger_Fatalf_Call { - _c.Run(run) - return _c -} - -// Info provides a mock function with given fields: args -func (_m *Logger) Info(args ...interface{}) { - var _ca []interface{} - _ca = append(_ca, args...) - _m.Called(_ca...) -} - -// Logger_Info_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Info' -type Logger_Info_Call struct { - *mock.Call -} - -// Info is a helper method to define mock.On call -// - args ...interface{} -func (_e *Logger_Expecter) Info(args ...interface{}) *Logger_Info_Call { - return &Logger_Info_Call{Call: _e.mock.On("Info", - append([]interface{}{}, args...)...)} -} - -func (_c *Logger_Info_Call) Run(run func(args ...interface{})) *Logger_Info_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]interface{}, len(args)-0) - for i, a := range args[0:] { - if a != nil { - variadicArgs[i] = a.(interface{}) - } - } - run(variadicArgs...) - }) - return _c -} - -func (_c *Logger_Info_Call) Return() *Logger_Info_Call { - _c.Call.Return() - return _c -} - -func (_c *Logger_Info_Call) RunAndReturn(run func(...interface{})) *Logger_Info_Call { - _c.Run(run) - return _c -} - -// Infof provides a mock function with given fields: format, args -func (_m *Logger) Infof(format string, args ...interface{}) { - var _ca []interface{} - _ca = append(_ca, format) - _ca = append(_ca, args...) - _m.Called(_ca...) -} - -// Logger_Infof_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Infof' -type Logger_Infof_Call struct { - *mock.Call -} - -// Infof is a helper method to define mock.On call -// - format string -// - args ...interface{} -func (_e *Logger_Expecter) Infof(format interface{}, args ...interface{}) *Logger_Infof_Call { - return &Logger_Infof_Call{Call: _e.mock.On("Infof", - append([]interface{}{format}, args...)...)} -} - -func (_c *Logger_Infof_Call) Run(run func(format string, args ...interface{})) *Logger_Infof_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]interface{}, len(args)-1) - for i, a := range args[1:] { - if a != nil { - variadicArgs[i] = a.(interface{}) - } - } - run(args[0].(string), variadicArgs...) - }) - return _c -} - -func (_c *Logger_Infof_Call) Return() *Logger_Infof_Call { - _c.Call.Return() - return _c -} - -func (_c *Logger_Infof_Call) RunAndReturn(run func(string, ...interface{})) *Logger_Infof_Call { - _c.Run(run) - return _c -} - -// Warn provides a mock function with given fields: args -func (_m *Logger) Warn(args ...interface{}) { - var _ca []interface{} - _ca = append(_ca, args...) - _m.Called(_ca...) -} - -// Logger_Warn_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Warn' -type Logger_Warn_Call struct { - *mock.Call -} - -// Warn is a helper method to define mock.On call -// - args ...interface{} -func (_e *Logger_Expecter) Warn(args ...interface{}) *Logger_Warn_Call { - return &Logger_Warn_Call{Call: _e.mock.On("Warn", - append([]interface{}{}, args...)...)} -} - -func (_c *Logger_Warn_Call) Run(run func(args ...interface{})) *Logger_Warn_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]interface{}, len(args)-0) - for i, a := range args[0:] { - if a != nil { - variadicArgs[i] = a.(interface{}) - } - } - run(variadicArgs...) - }) - return _c -} - -func (_c *Logger_Warn_Call) Return() *Logger_Warn_Call { - _c.Call.Return() - return _c -} - -func (_c *Logger_Warn_Call) RunAndReturn(run func(...interface{})) *Logger_Warn_Call { - _c.Run(run) - return _c -} - -// Warnf provides a mock function with given fields: format, args -func (_m *Logger) Warnf(format string, args ...interface{}) { - var _ca []interface{} - _ca = append(_ca, format) - _ca = append(_ca, args...) - _m.Called(_ca...) -} - -// Logger_Warnf_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Warnf' -type Logger_Warnf_Call struct { - *mock.Call -} - -// Warnf is a helper method to define mock.On call -// - format string -// - args ...interface{} -func (_e *Logger_Expecter) Warnf(format interface{}, args ...interface{}) *Logger_Warnf_Call { - return &Logger_Warnf_Call{Call: _e.mock.On("Warnf", - append([]interface{}{format}, args...)...)} -} - -func (_c *Logger_Warnf_Call) Run(run func(format string, args ...interface{})) *Logger_Warnf_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]interface{}, len(args)-1) - for i, a := range args[1:] { - if a != nil { - variadicArgs[i] = a.(interface{}) - } - } - run(args[0].(string), variadicArgs...) - }) - return _c -} - -func (_c *Logger_Warnf_Call) Return() *Logger_Warnf_Call { - _c.Call.Return() - return _c -} - -func (_c *Logger_Warnf_Call) RunAndReturn(run func(string, ...interface{})) *Logger_Warnf_Call { - _c.Run(run) - return _c -} - -// NewLogger creates a new instance of Logger. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewLogger(t interface { - mock.TestingT - Cleanup(func()) -}) *Logger { - mock := &Logger{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/aggsender/rpc/aggsender_rpc.go b/aggsender/rpc/aggsender_rpc.go deleted file mode 100644 index 6c0b69869..000000000 --- a/aggsender/rpc/aggsender_rpc.go +++ /dev/null @@ -1,79 +0,0 @@ -package aggsenderrpc - -import ( - "fmt" - - "github.com/0xPolygon/cdk-rpc/rpc" - "github.com/0xPolygon/cdk/aggsender/types" - "github.com/0xPolygon/cdk/log" -) - -const ( - base10 = 10 -) - -type aggsenderStorer interface { - GetCertificateByHeight(height uint64) (*types.CertificateInfo, error) - GetLastSentCertificate() (*types.CertificateInfo, error) -} - -type aggsenderInterface interface { - Info() types.AggsenderInfo -} - -// AggsenderRPC is the RPC interface for the aggsender -type AggsenderRPC struct { - logger *log.Logger - storage aggsenderStorer - aggsender aggsenderInterface -} - -func NewAggsenderRPC( - logger *log.Logger, - storage aggsenderStorer, - aggsender aggsenderInterface, -) *AggsenderRPC { - return &AggsenderRPC{ - logger: logger, - storage: storage, - aggsender: aggsender, - } -} - -// Status returns the status of the aggsender -// curl -X POST http://localhost:5576/ -H "Con -application/json" \ -// -d '{"method":"aggsender_status", "params":[], "id":1}' -func (b *AggsenderRPC) Status() (interface{}, rpc.Error) { - info := b.aggsender.Info() - return info, nil -} - -// GetCertificateHeaderPerHeight returns the certificate header for the given height -// if param is `nil` it returns the last sent certificate -// latest: -// -// curl -X POST http://localhost:5576/ -H "Con -application/json" \ -// -d '{"method":"aggsender_getCertificateHeaderPerHeight", "params":[], "id":1}' -// -// specific height: -// -// curl -X POST http://localhost:5576/ -H "Con -application/json" \ -// -d '{"method":"aggsender_getCertificateHeaderPerHeight", "params":[$height], "id":1}' -func (b *AggsenderRPC) GetCertificateHeaderPerHeight(height *uint64) (interface{}, rpc.Error) { - var ( - certInfo *types.CertificateInfo - err error - ) - if height == nil { - certInfo, err = b.storage.GetLastSentCertificate() - } else { - certInfo, err = b.storage.GetCertificateByHeight(*height) - } - if err != nil { - return nil, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("error getting certificate by height: %v", err)) - } - if certInfo == nil { - return nil, rpc.NewRPCError(rpc.NotFoundErrorCode, "certificate not found") - } - return certInfo, nil -} diff --git a/aggsender/rpc/aggsender_rpc_test.go b/aggsender/rpc/aggsender_rpc_test.go deleted file mode 100644 index 38e73ed07..000000000 --- a/aggsender/rpc/aggsender_rpc_test.go +++ /dev/null @@ -1,93 +0,0 @@ -package aggsenderrpc - -import ( - "fmt" - "testing" - - "github.com/0xPolygon/cdk/aggsender/mocks" - "github.com/0xPolygon/cdk/aggsender/types" - "github.com/stretchr/testify/require" -) - -func TestAggsenderRPCStatus(t *testing.T) { - testData := newAggsenderData(t) - testData.mockAggsender.EXPECT().Info().Return(types.AggsenderInfo{}) - res, err := testData.sut.Status() - require.NoError(t, err) - require.NotNil(t, res) -} - -func TestAggsenderRPCGetCertificateHeaderPerHeight(t *testing.T) { - testData := newAggsenderData(t) - height := uint64(1) - cases := []struct { - name string - height *uint64 - certResult *types.CertificateInfo - certError error - expectedError string - expectedNil bool - }{ - { - name: "latest, no error", - certResult: &types.CertificateInfo{}, - certError: nil, - }, - { - name: "latest,no error, no cert", - certResult: nil, - certError: nil, - expectedError: "not found", - expectedNil: true, - }, - { - name: "latest,error", - certResult: &types.CertificateInfo{}, - certError: fmt.Errorf("my_error"), - expectedError: "my_error", - expectedNil: true, - }, - { - name: "hight, no error", - height: &height, - certResult: &types.CertificateInfo{}, - certError: nil, - }, - } - - for _, tt := range cases { - t.Run(tt.name, func(t *testing.T) { - if tt.height == nil { - testData.mockStore.EXPECT().GetLastSentCertificate().Return(tt.certResult, tt.certError).Once() - } else { - testData.mockStore.EXPECT().GetCertificateByHeight(*tt.height).Return(tt.certResult, tt.certError).Once() - } - res, err := testData.sut.GetCertificateHeaderPerHeight(tt.height) - if tt.expectedError != "" { - require.Error(t, err) - require.Contains(t, err.Error(), tt.expectedError) - } else { - require.NoError(t, err) - } - if tt.expectedNil { - require.Nil(t, res) - } else { - require.NotNil(t, res) - } - }) - } -} - -type aggsenderRPCTestData struct { - sut *AggsenderRPC - mockStore *mocks.AggsenderStorer - mockAggsender *mocks.AggsenderInterface -} - -func newAggsenderData(t *testing.T) *aggsenderRPCTestData { - t.Helper() - mockStore := mocks.NewAggsenderStorer(t) - mockAggsender := mocks.NewAggsenderInterface(t) - sut := NewAggsenderRPC(nil, mockStore, mockAggsender) - return &aggsenderRPCTestData{sut, mockStore, mockAggsender} -} diff --git a/aggsender/rpcclient/client.go b/aggsender/rpcclient/client.go deleted file mode 100644 index 7d1312fb1..000000000 --- a/aggsender/rpcclient/client.go +++ /dev/null @@ -1,58 +0,0 @@ -package rpcclient - -import ( - "encoding/json" - "fmt" - - "github.com/0xPolygon/cdk-rpc/rpc" - "github.com/0xPolygon/cdk/aggsender/types" -) - -var jSONRPCCall = rpc.JSONRPCCall - -// Client wraps all the available endpoints of the data abailability committee node server -type Client struct { - url string -} - -func NewClient(url string) *Client { - return &Client{ - url: url, - } -} - -func (c *Client) GetStatus() (*types.AggsenderInfo, error) { - response, err := jSONRPCCall(c.url, "aggsender_status") - if err != nil { - return nil, err - } - - // Check if the response is an error - if response.Error != nil { - return nil, fmt.Errorf("error in the response calling aggsender_status: %v", response.Error) - } - result := types.AggsenderInfo{} - err = json.Unmarshal(response.Result, &result) - if err != nil { - return nil, err - } - return &result, nil -} - -func (c *Client) GetCertificateHeaderPerHeight(height *uint64) (*types.CertificateInfo, error) { - response, err := jSONRPCCall(c.url, "aggsender_getCertificateHeaderPerHeight", height) - if err != nil { - return nil, err - } - - // Check if the response is an error - if response.Error != nil { - return nil, fmt.Errorf("error in the response calling aggsender_getCertificateHeaderPerHeight: %v", response.Error) - } - cert := types.CertificateInfo{} - err = json.Unmarshal(response.Result, &cert) - if err != nil { - return nil, err - } - return &cert, nil -} diff --git a/aggsender/rpcclient/client_test.go b/aggsender/rpcclient/client_test.go deleted file mode 100644 index f831713e5..000000000 --- a/aggsender/rpcclient/client_test.go +++ /dev/null @@ -1,45 +0,0 @@ -package rpcclient - -import ( - "encoding/json" - "testing" - - "github.com/0xPolygon/cdk-rpc/rpc" - "github.com/0xPolygon/cdk/aggsender/types" - "github.com/stretchr/testify/require" -) - -func TestGetCertificateHeaderPerHeight(t *testing.T) { - sut := NewClient("url") - height := uint64(1) - responseCert := types.CertificateInfo{} - responseCertJSON, err := json.Marshal(responseCert) - require.NoError(t, err) - response := rpc.Response{ - Result: responseCertJSON, - } - jSONRPCCall = func(_, _ string, _ ...interface{}) (rpc.Response, error) { - return response, nil - } - cert, err := sut.GetCertificateHeaderPerHeight(&height) - require.NoError(t, err) - require.NotNil(t, cert) - require.Equal(t, responseCert, *cert) -} - -func TestGetStatus(t *testing.T) { - sut := NewClient("url") - responseData := types.AggsenderInfo{} - responseDataJSON, err := json.Marshal(responseData) - require.NoError(t, err) - response := rpc.Response{ - Result: responseDataJSON, - } - jSONRPCCall = func(_, _ string, _ ...interface{}) (rpc.Response, error) { - return response, nil - } - result, err := sut.GetStatus() - require.NoError(t, err) - require.NotNil(t, result) - require.Equal(t, responseData, *result) -} diff --git a/aggsender/types/block_notifier.go b/aggsender/types/block_notifier.go deleted file mode 100644 index 5dde27028..000000000 --- a/aggsender/types/block_notifier.go +++ /dev/null @@ -1,20 +0,0 @@ -package types - -import ( - "time" - - "github.com/0xPolygon/cdk/etherman" -) - -type EventNewBlock struct { - BlockNumber uint64 - BlockFinalityType etherman.BlockNumberFinality - BlockRate time.Duration -} - -// BlockNotifier is the interface that wraps the basic methods to notify a new block. -type BlockNotifier interface { - // NotifyEpochStarted notifies the epoch has started. - Subscribe(id string) <-chan EventNewBlock - String() string -} diff --git a/aggsender/types/certificate_build_params.go b/aggsender/types/certificate_build_params.go deleted file mode 100644 index 1ffd7563e..000000000 --- a/aggsender/types/certificate_build_params.go +++ /dev/null @@ -1,112 +0,0 @@ -package types - -import ( - "fmt" - - "github.com/0xPolygon/cdk/bridgesync" -) - -const ( - EstimatedSizeBridgeExit = 230 - EstimatedSizeClaim = 8000 - byteArrayJSONSizeFactor = 1.5 -) - -// CertificateBuildParams is a struct that holds the parameters to build a certificate -type CertificateBuildParams struct { - FromBlock uint64 - ToBlock uint64 - Bridges []bridgesync.Bridge - Claims []bridgesync.Claim - CreatedAt uint32 -} - -func (c *CertificateBuildParams) String() string { - return fmt.Sprintf("FromBlock: %d, ToBlock: %d, numBridges: %d, numClaims: %d, createdAt: %d", - c.FromBlock, c.ToBlock, c.NumberOfBridges(), c.NumberOfClaims(), c.CreatedAt) -} - -// Range create a new CertificateBuildParams with the given range -func (c *CertificateBuildParams) Range(fromBlock, toBlock uint64) (*CertificateBuildParams, error) { - if c.FromBlock == fromBlock && c.ToBlock == toBlock { - return c, nil - } - if c.FromBlock > fromBlock || c.ToBlock < toBlock { - return nil, fmt.Errorf("invalid range") - } - newCert := &CertificateBuildParams{ - FromBlock: fromBlock, - ToBlock: toBlock, - Bridges: make([]bridgesync.Bridge, 0), - Claims: make([]bridgesync.Claim, 0), - } - - for _, bridge := range c.Bridges { - if bridge.BlockNum >= fromBlock && bridge.BlockNum <= toBlock { - newCert.Bridges = append(newCert.Bridges, bridge) - } - } - - for _, claim := range c.Claims { - if claim.BlockNum >= fromBlock && claim.BlockNum <= toBlock { - newCert.Claims = append(newCert.Claims, claim) - } - } - return newCert, nil -} - -// NumberOfBridges returns the number of bridges in the certificate -func (c *CertificateBuildParams) NumberOfBridges() int { - if c == nil { - return 0 - } - return len(c.Bridges) -} - -// NumberOfClaims returns the number of claims in the certificate -func (c *CertificateBuildParams) NumberOfClaims() int { - if c == nil { - return 0 - } - return len(c.Claims) -} - -// NumberOfBlocks returns the number of blocks in the certificate -func (c *CertificateBuildParams) NumberOfBlocks() int { - if c == nil { - return 0 - } - return int(c.ToBlock - c.FromBlock + 1) -} - -// EstimatedSize returns the estimated size of the certificate -func (c *CertificateBuildParams) EstimatedSize() uint { - if c == nil { - return 0 - } - sizeBridges := int(0) - for _, bridge := range c.Bridges { - sizeBridges += EstimatedSizeBridgeExit - sizeBridges += int(byteArrayJSONSizeFactor * float32(len(bridge.Metadata))) - } - - sizeClaims := int(0) - for _, claim := range c.Claims { - sizeClaims += EstimatedSizeClaim - sizeClaims += int(byteArrayJSONSizeFactor * float32(len(claim.Metadata))) - } - return uint(sizeBridges + sizeClaims) -} - -// IsEmpty returns true if the certificate is empty -func (c *CertificateBuildParams) IsEmpty() bool { - return c.NumberOfBridges() == 0 && c.NumberOfClaims() == 0 -} - -// MaxDepoitCount returns the maximum deposit count in the certificate -func (c *CertificateBuildParams) MaxDepositCount() uint32 { - if c == nil || c.NumberOfBridges() == 0 { - return 0 - } - return c.Bridges[len(c.Bridges)-1].DepositCount -} diff --git a/aggsender/types/epoch_notifier.go b/aggsender/types/epoch_notifier.go deleted file mode 100644 index 426ad3622..000000000 --- a/aggsender/types/epoch_notifier.go +++ /dev/null @@ -1,25 +0,0 @@ -package types - -import ( - "context" - "fmt" -) - -// EpochEvent is the event that notifies the neear end epoch -type EpochEvent struct { - Epoch uint64 - // ExtraInfo if a detailed information about the epoch that depends on implementation - ExtraInfo fmt.Stringer -} - -func (e EpochEvent) String() string { - return fmt.Sprintf("EpochEvent: epoch=%d extra=%s", e.Epoch, e.ExtraInfo) -} - -type EpochNotifier interface { - // NotifyEpochStarted notifies the epoch is close to end. - Subscribe(id string) <-chan EpochEvent - // Start starts the notifier synchronously - Start(ctx context.Context) - String() string -} diff --git a/aggsender/types/generic_subscriber.go b/aggsender/types/generic_subscriber.go deleted file mode 100644 index 67038c5ce..000000000 --- a/aggsender/types/generic_subscriber.go +++ /dev/null @@ -1,6 +0,0 @@ -package types - -type GenericSubscriber[T any] interface { - Subscribe(subscriberName string) <-chan T - Publish(data T) -} diff --git a/aggsender/types/status.go b/aggsender/types/status.go deleted file mode 100644 index be28fe112..000000000 --- a/aggsender/types/status.go +++ /dev/null @@ -1,42 +0,0 @@ -package types - -import ( - "time" - - zkevm "github.com/0xPolygon/cdk" -) - -type AggsenderStatusType string - -const ( - StatusNone AggsenderStatusType = "none" - StatusCheckingInitialStage AggsenderStatusType = "checking_initial_stage" - StatusCertificateStage AggsenderStatusType = "certificate_stage" -) - -type AggsenderStatus struct { - Running bool `json:"running"` - StartTime time.Time `json:"start_time"` - Status AggsenderStatusType `json:"status"` - LastError string `json:"last_error"` -} - -type AggsenderInfo struct { - AggsenderStatus AggsenderStatus `json:"aggsender_status"` - Version zkevm.FullVersion - EpochNotifierDescription string `json:"epoch_notifier_description"` - NetworkID uint32 `json:"network_id"` -} - -func (a *AggsenderStatus) Start(startTime time.Time) { - a.Running = true - a.StartTime = startTime -} - -func (a *AggsenderStatus) SetLastError(err error) { - if err == nil { - a.LastError = "" - } else { - a.LastError = err.Error() - } -} diff --git a/aggsender/types/status_test.go b/aggsender/types/status_test.go deleted file mode 100644 index d48ca0043..000000000 --- a/aggsender/types/status_test.go +++ /dev/null @@ -1,16 +0,0 @@ -package types - -import ( - "errors" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestAggsenderStatusSetLastError(t *testing.T) { - sut := AggsenderStatus{} - sut.SetLastError(nil) - require.Equal(t, "", sut.LastError) - sut.SetLastError(errors.New("error")) - require.Equal(t, "error", sut.LastError) -} diff --git a/aggsender/types/types.go b/aggsender/types/types.go deleted file mode 100644 index 6d9f75349..000000000 --- a/aggsender/types/types.go +++ /dev/null @@ -1,195 +0,0 @@ -package types - -import ( - "context" - "encoding/binary" - "fmt" - "math/big" - "time" - - "github.com/0xPolygon/cdk/agglayer" - "github.com/0xPolygon/cdk/bridgesync" - "github.com/0xPolygon/cdk/etherman" - "github.com/0xPolygon/cdk/l1infotreesync" - treeTypes "github.com/0xPolygon/cdk/tree/types" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" -) - -// L1InfoTreeSyncer is an interface defining functions that an L1InfoTreeSyncer should implement -type L1InfoTreeSyncer interface { - GetInfoByGlobalExitRoot(globalExitRoot common.Hash) (*l1infotreesync.L1InfoTreeLeaf, error) - GetL1InfoTreeMerkleProofFromIndexToRoot( - ctx context.Context, index uint32, root common.Hash, - ) (treeTypes.Proof, error) - GetL1InfoTreeRootByIndex(ctx context.Context, index uint32) (treeTypes.Root, error) -} - -// L2BridgeSyncer is an interface defining functions that an L2BridgeSyncer should implement -type L2BridgeSyncer interface { - GetBlockByLER(ctx context.Context, ler common.Hash) (uint64, error) - GetExitRootByIndex(ctx context.Context, index uint32) (treeTypes.Root, error) - GetBridgesPublished(ctx context.Context, fromBlock, toBlock uint64) ([]bridgesync.Bridge, error) - GetClaims(ctx context.Context, fromBlock, toBlock uint64) ([]bridgesync.Claim, error) - OriginNetwork() uint32 - BlockFinality() etherman.BlockNumberFinality - GetLastProcessedBlock(ctx context.Context) (uint64, error) -} - -// EthClient is an interface defining functions that an EthClient should implement -type EthClient interface { - BlockNumber(ctx context.Context) (uint64, error) - HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) -} - -// Logger is an interface that defines the methods to log messages -type Logger interface { - Fatalf(format string, args ...interface{}) - Info(args ...interface{}) - Infof(format string, args ...interface{}) - Error(args ...interface{}) - Errorf(format string, args ...interface{}) - Warn(args ...interface{}) - Warnf(format string, args ...interface{}) - Debug(args ...interface{}) - Debugf(format string, args ...interface{}) -} - -type CertificateInfo struct { - Height uint64 `meddler:"height"` - RetryCount int `meddler:"retry_count"` - CertificateID common.Hash `meddler:"certificate_id,hash"` - // PreviousLocalExitRoot if it's nil means no reported - PreviousLocalExitRoot *common.Hash `meddler:"previous_local_exit_root,hash"` - NewLocalExitRoot common.Hash `meddler:"new_local_exit_root,hash"` - FromBlock uint64 `meddler:"from_block"` - ToBlock uint64 `meddler:"to_block"` - Status agglayer.CertificateStatus `meddler:"status"` - CreatedAt uint32 `meddler:"created_at"` - UpdatedAt uint32 `meddler:"updated_at"` - SignedCertificate string `meddler:"signed_certificate"` -} - -func (c *CertificateInfo) String() string { - if c == nil { - //nolint:all - return "nil" - } - previousLocalExitRoot := "nil" - if c.PreviousLocalExitRoot != nil { - previousLocalExitRoot = c.PreviousLocalExitRoot.String() - } - return fmt.Sprintf("aggsender.CertificateInfo: "+ - "Height: %d "+ - "RetryCount: %d "+ - "CertificateID: %s "+ - "PreviousLocalExitRoot: %s "+ - "NewLocalExitRoot: %s "+ - "Status: %s "+ - "FromBlock: %d "+ - "ToBlock: %d "+ - "CreatedAt: %s "+ - "UpdatedAt: %s", - c.Height, - c.RetryCount, - c.CertificateID.String(), - previousLocalExitRoot, - c.NewLocalExitRoot.String(), - c.Status.String(), - c.FromBlock, - c.ToBlock, - time.Unix(int64(c.CreatedAt), 0), - time.Unix(int64(c.UpdatedAt), 0), - ) -} - -// ID returns a string with the unique identifier of the cerificate (height+certificateID) -func (c *CertificateInfo) ID() string { - if c == nil { - return "nil" - } - return fmt.Sprintf("%d/%s (retry %d)", c.Height, c.CertificateID.String(), c.RetryCount) -} - -// IsClosed returns true if the certificate is closed (settled or inError) -func (c *CertificateInfo) IsClosed() bool { - if c == nil { - return false - } - return c.Status.IsClosed() -} - -// ElapsedTimeSinceCreation returns the time elapsed since the certificate was created -func (c *CertificateInfo) ElapsedTimeSinceCreation() time.Duration { - if c == nil { - return 0 - } - return time.Now().UTC().Sub(time.Unix(int64(c.CreatedAt), 0)) -} - -type CertificateMetadata struct { - // ToBlock contains the pre v1 value stored in the metadata certificate field - // is not stored in the hash post v1 - ToBlock uint64 - - // FromBlock is the block number from which the certificate contains data - FromBlock uint64 - - // Offset is the number of blocks from the FromBlock that the certificate contains - Offset uint32 - - // CreatedAt is the timestamp when the certificate was created - CreatedAt uint32 - - // Version is the version of the metadata - Version uint8 -} - -// NewCertificateMetadataFromHash returns a new CertificateMetadata from the given hash -func NewCertificateMetadata(fromBlock uint64, offset uint32, createdAt uint32) *CertificateMetadata { - return &CertificateMetadata{ - FromBlock: fromBlock, - Offset: offset, - CreatedAt: createdAt, - Version: 1, - } -} - -// NewCertificateMetadataFromHash returns a new CertificateMetadata from the given hash -func NewCertificateMetadataFromHash(hash common.Hash) *CertificateMetadata { - b := hash.Bytes() - - if b[0] < 1 { - return &CertificateMetadata{ - ToBlock: hash.Big().Uint64(), - } - } - - return &CertificateMetadata{ - Version: b[0], - FromBlock: binary.BigEndian.Uint64(b[1:9]), - Offset: binary.BigEndian.Uint32(b[9:13]), - CreatedAt: binary.BigEndian.Uint32(b[13:17]), - } -} - -// ToHash returns the hash of the metadata -func (c *CertificateMetadata) ToHash() common.Hash { - b := make([]byte, common.HashLength) // 32-byte hash - - // Encode version - b[0] = c.Version - - // Encode fromBlock - binary.BigEndian.PutUint64(b[1:9], c.FromBlock) - - // Encode offset - binary.BigEndian.PutUint32(b[9:13], c.Offset) - - // Encode createdAt - binary.BigEndian.PutUint32(b[13:17], c.CreatedAt) - - // Last 8 bytes remain as zero padding - - return common.BytesToHash(b) -} diff --git a/aggsender/types/types_test.go b/aggsender/types/types_test.go deleted file mode 100644 index 985127f9b..000000000 --- a/aggsender/types/types_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package types - -import ( - "math/big" - "testing" - - "github.com/ethereum/go-ethereum/common" - "github.com/stretchr/testify/require" -) - -func TestMetadataConversions_toBlock_Only(t *testing.T) { - toBlock := uint64(123567890) - hash := common.BigToHash(new(big.Int).SetUint64(toBlock)) - meta := NewCertificateMetadataFromHash(hash) - require.Equal(t, toBlock, meta.ToBlock) -} - -func TestMetadataConversions(t *testing.T) { - fromBlock := uint64(123567890) - offset := uint32(1000) - createdAt := uint32(0) - meta := NewCertificateMetadata(fromBlock, offset, createdAt) - c := meta.ToHash() - extractBlock := NewCertificateMetadataFromHash(c) - require.Equal(t, fromBlock, extractBlock.FromBlock) - require.Equal(t, offset, extractBlock.Offset) - require.Equal(t, createdAt, extractBlock.CreatedAt) -} diff --git a/bridgesync/processor_test.go b/bridgesync/processor_test.go index b2bcd1116..594d54bb6 100644 --- a/bridgesync/processor_test.go +++ b/bridgesync/processor_test.go @@ -78,7 +78,7 @@ func TestBigIntString(t *testing.T) { } func TestProceessor(t *testing.T) { - path := path.Join(t.TempDir(), "aggsenderTestProceessor.sqlite") + path := path.Join(t.TempDir(), "bridgeSyncTestProcessor.sqlite") logger := log.WithFields("bridge-syncer", "foo") p, err := newProcessor(path, logger) require.NoError(t, err) @@ -729,7 +729,7 @@ func TestDecodeGlobalIndex(t *testing.T) { } func TestInsertAndGetClaim(t *testing.T) { - path := path.Join(t.TempDir(), "aggsenderTestInsertAndGetClaim.sqlite") + path := path.Join(t.TempDir(), "bridgeSyncerTestInsertAndGetClaim.sqlite") log.Debugf("sqlite path: %s", path) err := migrationsBridge.RunMigrations(path) require.NoError(t, err) @@ -850,7 +850,7 @@ func TestGetBridgesPublished(t *testing.T) { } func TestProcessBlockInvalidIndex(t *testing.T) { - path := path.Join(t.TempDir(), "aggsenderTestProceessor.sqlite") + path := path.Join(t.TempDir(), "testProcessorBlockInvalidIndex.sqlite") logger := log.WithFields("bridge-syncer", "foo") p, err := newProcessor(path, logger) require.NoError(t, err) diff --git a/cmd/main.go b/cmd/main.go index 5cd86b261..39a9df30b 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -41,7 +41,7 @@ var ( Aliases: []string{"co"}, Usage: "List of components to run", Required: false, - Value: cli.NewStringSlice(common.SEQUENCE_SENDER, common.AGGREGATOR, common.BRIDGE, common.AGGSENDER), + Value: cli.NewStringSlice(common.SEQUENCE_SENDER, common.AGGREGATOR, common.BRIDGE), } saveConfigFlag = cli.StringFlag{ Name: config.FlagSaveConfigPath, diff --git a/cmd/run.go b/cmd/run.go index e088c740a..afbcae3c1 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -12,10 +12,8 @@ import ( zkevm "github.com/0xPolygon/cdk" dataCommitteeClient "github.com/0xPolygon/cdk-data-availability/client" jRPC "github.com/0xPolygon/cdk-rpc/rpc" - "github.com/0xPolygon/cdk/agglayer" "github.com/0xPolygon/cdk/aggregator" "github.com/0xPolygon/cdk/aggregator/db" - "github.com/0xPolygon/cdk/aggsender" "github.com/0xPolygon/cdk/bridgesync" "github.com/0xPolygon/cdk/claimsponsor" cdkcommon "github.com/0xPolygon/cdk/common" @@ -72,7 +70,7 @@ func start(cliCtx *cli.Context) error { } }() - rollupID := getRollUpIDIfNeeded(components, cfg.NetworkConfig.L1Config, l1Client) + rollupID := getRollupID(cfg.NetworkConfig.L1Config, l1Client) l1InfoTreeSync := runL1InfoTreeSyncerIfNeeded(cliCtx.Context, components, *cfg, l1Client, reorgDetectorL1) claimSponsor := runClaimSponsorIfNeeded(cliCtx.Context, components, l2Client, cfg.ClaimSponsor) l1BridgeSync := runBridgeSyncL1IfNeeded(cliCtx.Context, components, cfg.BridgeL1Sync, reorgDetectorL1, @@ -112,21 +110,6 @@ func start(cliCtx *cli.Context) error { l2BridgeSync, ) rpcServices = append(rpcServices, rpcBridge...) - - case cdkcommon.AGGSENDER: - aggsender, err := createAggSender( - cliCtx.Context, - cfg.AggSender, - l1Client, - l1InfoTreeSync, - l2BridgeSync, - ) - if err != nil { - log.Fatal(err) - } - rpcServices = append(rpcServices, aggsender.GetRPCServices()...) - - go aggsender.Start(cliCtx.Context) } } if len(rpcServices) > 0 { @@ -142,40 +125,6 @@ func start(cliCtx *cli.Context) error { return nil } -func createAggSender( - ctx context.Context, - cfg aggsender.Config, - l1EthClient *ethclient.Client, - l1InfoTreeSync *l1infotreesync.L1InfoTreeSync, - l2Syncer *bridgesync.BridgeSync) (*aggsender.AggSender, error) { - logger := log.WithFields("module", cdkcommon.AGGSENDER) - agglayerClient := agglayer.NewAggLayerClient(cfg.AggLayerURL) - blockNotifier, err := aggsender.NewBlockNotifierPolling(l1EthClient, aggsender.ConfigBlockNotifierPolling{ - BlockFinalityType: etherman.BlockNumberFinality(cfg.BlockFinality), - CheckNewBlockInterval: aggsender.AutomaticBlockInterval, - }, logger, nil) - if err != nil { - return nil, err - } - - notifierCfg, err := aggsender.NewConfigEpochNotifierPerBlock(agglayerClient, cfg.EpochNotificationPercentage) - if err != nil { - return nil, fmt.Errorf("cant generate config for Epoch Notifier because: %w", err) - } - epochNotifier, err := aggsender.NewEpochNotifierPerBlock( - blockNotifier, - logger, - *notifierCfg, nil) - if err != nil { - return nil, err - } - log.Infof("Starting blockNotifier: %s", blockNotifier.String()) - go blockNotifier.Start(ctx) - log.Infof("Starting epochNotifier: %s", epochNotifier.String()) - go epochNotifier.Start(ctx) - return aggsender.New(ctx, logger, cfg, agglayerClient, l1InfoTreeSync, l2Syncer, epochNotifier) -} - func createAggregator(ctx context.Context, c config.Config, runMigrations bool) *aggregator.Aggregator { logger := log.WithFields("module", cdkcommon.AGGREGATOR) // Migrations @@ -452,7 +401,7 @@ func runL1InfoTreeSyncerIfNeeded( reorgDetector *reorgdetector.ReorgDetector, ) *l1infotreesync.L1InfoTreeSync { if !isNeeded([]string{cdkcommon.BRIDGE, - cdkcommon.SEQUENCE_SENDER, cdkcommon.AGGSENDER, cdkcommon.L1INFOTREESYNC}, components) { + cdkcommon.SEQUENCE_SENDER, cdkcommon.L1INFOTREESYNC}, components) { return nil } l1InfoTreeSync, err := l1infotreesync.New( @@ -481,8 +430,7 @@ func runL1InfoTreeSyncerIfNeeded( func runL1ClientIfNeeded(components []string, urlRPCL1 string) *ethclient.Client { if !isNeeded([]string{ cdkcommon.SEQUENCE_SENDER, cdkcommon.AGGREGATOR, - cdkcommon.BRIDGE, cdkcommon.AGGSENDER, - cdkcommon.L1INFOTREESYNC, + cdkcommon.BRIDGE, cdkcommon.L1INFOTREESYNC, }, components) { return nil } @@ -495,13 +443,8 @@ func runL1ClientIfNeeded(components []string, urlRPCL1 string) *ethclient.Client return l1CLient } -func getRollUpIDIfNeeded(components []string, networkConfig ethermanconfig.L1Config, +func getRollupID(networkConfig ethermanconfig.L1Config, l1Client *ethclient.Client) uint32 { - if !isNeeded([]string{ - cdkcommon.AGGSENDER, - }, components) { - return 0 - } rollupID, err := etherman.GetRollupID(networkConfig, networkConfig.ZkEVMAddr, l1Client) if err != nil { log.Fatal(err) @@ -510,7 +453,7 @@ func getRollUpIDIfNeeded(components []string, networkConfig ethermanconfig.L1Con } func runL2ClientIfNeeded(components []string, urlRPCL2 string) *ethclient.Client { - if !isNeeded([]string{cdkcommon.BRIDGE, cdkcommon.AGGSENDER}, components) { + if !isNeeded([]string{cdkcommon.BRIDGE}, components) { return nil } @@ -531,8 +474,7 @@ func runReorgDetectorL1IfNeeded( ) (*reorgdetector.ReorgDetector, chan error) { if !isNeeded([]string{ cdkcommon.SEQUENCE_SENDER, cdkcommon.AGGREGATOR, - cdkcommon.BRIDGE, cdkcommon.AGGSENDER, - cdkcommon.L1INFOTREESYNC}, + cdkcommon.BRIDGE, cdkcommon.L1INFOTREESYNC}, components) { return nil, nil } @@ -555,7 +497,7 @@ func runReorgDetectorL2IfNeeded( l2Client *ethclient.Client, cfg *reorgdetector.Config, ) (*reorgdetector.ReorgDetector, chan error) { - if !isNeeded([]string{cdkcommon.BRIDGE, cdkcommon.AGGSENDER}, components) { + if !isNeeded([]string{cdkcommon.BRIDGE}, components) { return nil, nil } rd := newReorgDetector(cfg, l2Client) @@ -686,7 +628,7 @@ func runBridgeSyncL2IfNeeded( l2Client *ethclient.Client, rollupID uint32, ) *bridgesync.BridgeSync { - if !isNeeded([]string{cdkcommon.BRIDGE, cdkcommon.AGGSENDER}, components) { + if !isNeeded([]string{cdkcommon.BRIDGE}, components) { return nil } @@ -748,8 +690,8 @@ func createRPC(cfg jRPC.Config, services []jRPC.Service) *jRPC.Server { } func getL2RPCUrl(c *config.Config) string { - if c.AggSender.URLRPCL2 != "" { - return c.AggSender.URLRPCL2 + if c.SequenceSender.RPCURL != "" { + return c.SequenceSender.RPCURL } return "" diff --git a/common/components.go b/common/components.go index f1398ae9a..45c8ae327 100644 --- a/common/components.go +++ b/common/components.go @@ -11,8 +11,6 @@ const ( CLAIM_SPONSOR = "claim-sponsor" //nolint:stylecheck // PROVER name to identify the prover component PROVER = "prover" - // AGGSENDER name to identify the aggsender component - AGGSENDER = "aggsender" // L1INFOTREESYNC name to identify the l1infotreesync component L1INFOTREESYNC = "l1infotreesync" ) diff --git a/config/config.go b/config/config.go index 7665dc41d..9554559d5 100644 --- a/config/config.go +++ b/config/config.go @@ -9,7 +9,6 @@ import ( jRPC "github.com/0xPolygon/cdk-rpc/rpc" "github.com/0xPolygon/cdk/aggregator" - "github.com/0xPolygon/cdk/aggsender" "github.com/0xPolygon/cdk/bridgesync" "github.com/0xPolygon/cdk/claimsponsor" "github.com/0xPolygon/cdk/common" @@ -167,9 +166,6 @@ type Config struct { // LastGERSync is the config for the synchronizer in charge of syncing the last GER injected on L2. // Needed for the bridge service (RPC) LastGERSync lastgersync.Config - - // AggSender is the configuration of the agg sender service - AggSender aggsender.Config } // Load loads the configuration diff --git a/config/default.go b/config/default.go index 0c78a2ce9..8d2b5047b 100644 --- a/config/default.go +++ b/config/default.go @@ -293,21 +293,4 @@ ZkEVMAddr = "{{L1Config.polygonZkEVMAddress}}" RollupManagerAddr = "{{L1Config.polygonRollupManagerAddress}}" GlobalExitRootManagerAddr = "{{L1Config.polygonZkEVMGlobalExitRootAddress}}" - -[AggSender] -StoragePath = "{{PathRWData}}/aggsender.sqlite" -AggLayerURL = "{{AggLayerURL}}" -AggsenderPrivateKey = {Path = "{{SequencerPrivateKeyPath}}", Password = "{{SequencerPrivateKeyPassword}}"} -URLRPCL2="{{L2URL}}" -BlockFinality = "LatestBlock" -EpochNotificationPercentage = 50 -SaveCertificatesToFilesPath = "" -MaxRetriesStoreCertificate = 3 -DelayBeetweenRetries = "60s" -KeepCertificatesHistory = true -# MaxSize of the certificate to 8Mb -MaxCertSize = 8388608 -BridgeMetadataAsHash = true -DryRun = false -EnableRPC = true ` diff --git a/scripts/local_config b/scripts/local_config index 90b5ae116..f052d927f 100755 --- a/scripts/local_config +++ b/scripts/local_config @@ -202,10 +202,7 @@ function export_values_of_cdk_node_config(){ if [ $? -ne 0 ]; then export_key_from_toml_file_or_fatal zkevm_rollup_fork_id $_CDK_CONFIG_FILE "." ForkId fi - export_key_from_toml_file zkevm_l2_agglayer_keystore_password $_CDK_CONFIG_FILE AggSender.SequencerPrivateKey Password - if [ $? -ne 0 ]; then - export_key_from_toml_file_or_fatal zkevm_l2_agglayer_keystore_password $_CDK_CONFIG_FILE "." SequencerPrivateKeyPassword - fi + export_key_from_toml_file_or_fatal zkevm_l2_agglayer_keystore_password $_CDK_CONFIG_FILE "." SequencerPrivateKeyPassword export_key_from_toml_file zkevm_bridge_address $_CDK_CONFIG_FILE BridgeL1Sync BridgeAddr if [ $? -ne 0 ]; then export_key_from_toml_file_or_fatal zkevm_bridge_address $_CDK_CONFIG_FILE "." polygonBridgeAddr @@ -436,9 +433,6 @@ cat << EOF "-components", "sequence-sender,aggregator", ] }, - - To run AggSender change components to: - "-components", "aggsender", EOF echo " -----------------------------------------------------------" diff --git a/test/Makefile b/test/Makefile index 4cbedecb7..2b621797f 100644 --- a/test/Makefile +++ b/test/Makefile @@ -3,7 +3,7 @@ generate-mocks: generate-mocks-bridgesync generate-mocks-reorgdetector \ generate-mocks-sequencesender generate-mocks-da \ generate-mocks-l1infotreesync generate-mocks-helpers \ generate-mocks-sync generate-mocks-aggregator \ - generate-mocks-aggsender generate-mocks-agglayer + generate-mocks-agglayer .PHONY: generate-mocks-bridgesync generate-mocks-bridgesync: ## Generates mocks for bridgesync, using mockery tool @@ -44,7 +44,6 @@ generate-mocks-sync: ## Generates mocks for sync, using mockery tool export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=evmDownloaderFull --dir=../sync --output=../sync --outpkg=sync --inpackage --structname=EVMDownloaderMock --filename=mock_downloader_test.go ${COMMON_MOCKERY_PARAMS} export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=processorInterface --dir=../sync --output=../sync --outpkg=sync --inpackage --structname=ProcessorMock --filename=mock_processor_test.go ${COMMON_MOCKERY_PARAMS} export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=ReorgDetector --dir=../sync --output=../sync --outpkg=sync --inpackage --structname=ReorgDetectorMock --filename=mock_reorgdetector_test.go ${COMMON_MOCKERY_PARAMS} - .PHONY: generate-mocks-aggregator generate-mocks-aggregator: ## Generates mocks for aggregator, using mockery tool @@ -57,11 +56,6 @@ generate-mocks-aggregator: ## Generates mocks for aggregator, using mockery tool export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=RPCInterface --dir=../aggregator --output=../aggregator/mocks --outpkg=mocks --structname=RPCInterfaceMock --filename=mock_rpc.go ${COMMON_MOCKERY_PARAMS} export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=AggregatorService_ChannelServer --dir=../aggregator/prover --output=../aggregator/prover/mocks --outpkg=mocks --structname=ChannelMock --filename=mock_channel.go ${COMMON_MOCKERY_PARAMS} - -.PHONY: generate-mocks-aggsender -generate-mocks-aggsender: ## Generates mocks for aggsender, using mockery tool - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --all --case snake --dir ../aggsender --output ../aggsender/mocks --outpkg mocks ${COMMON_MOCKERY_PARAMS} - .PHONY: generate-mocks-agglayer generate-mocks-agglayer: ## Generates mocks for agglayer, using mockery tool export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=AgglayerClientInterface --dir=../agglayer --output=../agglayer --outpkg=agglayer --inpackage --structname=AgglayerClientMock --filename=mock_agglayer_client.go ${COMMON_MOCKERY_PARAMS} @@ -86,16 +80,6 @@ test-e2e-fork12-rollup: stop ./run-e2e.sh fork12 rollup bats bats/fep/ -.PHONY: test-e2e-fork12-pessimistic -test-e2e-fork12-pessimistic: stop - ./run-e2e.sh fork12 pessimistic - bats bats/pp/bridge-e2e.bats bats/pp/e2e-pp.bats - -.PHONY: test-e2e-fork12-multi-pessimistic -test-e2e-fork12-multi-pessimistic: stop - ./run-e2e-multi_pp.sh - bats bats/pp-multi - .PHONY: stop stop: kurtosis clean --all diff --git a/test/bats/helpers/aggsender.bash b/test/bats/helpers/aggsender.bash deleted file mode 100644 index 6a7399a94..000000000 --- a/test/bats/helpers/aggsender.bash +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/env bash - -function wait_to_settled_certificate_containing_global_index(){ - local _l2_pp1_cdk_node_url=$1 - local _global_index=$2 - local _check_frequency=${3:-30} - local _timeout=${4:-300} - echo "... waiting for certificate with global index $_global_index" >&3 - run_with_timeout "settle cert for $_global_index" $_check_frequency $_timeout $aggsender_find_imported_bridge $_l2_pp1_cdk_node_url $_global_index -} \ No newline at end of file diff --git a/test/bats/helpers/common-multi_cdk-setup.bash b/test/bats/helpers/common-multi_cdk-setup.bash deleted file mode 100644 index 2758c9f75..000000000 --- a/test/bats/helpers/common-multi_cdk-setup.bash +++ /dev/null @@ -1,81 +0,0 @@ -#!/usr/bin/env bash - -_common_multi_setup() { - load '../helpers/common-setup' - _common_setup - # generated with cast wallet new - readonly target_address=0xbecE3a31343c6019CDE0D5a4dF2AF8Df17ebcB0f - readonly target_private_key=0x51caa196504216b1730280feb63ddd8c5ae194d13e57e58d559f1f1dc3eda7c9 - - kurtosis service exec $enclave contracts-001 "cat /opt/zkevm/combined-001.json" | tail -n +2 | jq '.' > combined-001.json - kurtosis service exec $enclave contracts-002 "cat /opt/zkevm/combined-002.json" | tail -n +2 | jq '.' > combined-002.json - kurtosis service exec $enclave contracts-002 "cat /opt/zkevm-contracts/deployment/v2/create_rollup_parameters.json" | tail -n +2 | jq -r '.gasTokenAddress' > gas-token-address.json - - readonly private_key="0x12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625" - readonly eth_address=$(cast wallet address --private-key $private_key) - readonly l1_rpc_url=http://$(kurtosis port print $enclave el-1-geth-lighthouse rpc) - readonly l2_pp1_url=$(kurtosis port print $enclave cdk-erigon-rpc-001 rpc) - readonly l2_pp2_url=$(kurtosis port print $enclave cdk-erigon-rpc-002 rpc) - readonly bridge_address=$(cat combined-001.json | jq -r .polygonZkEVMBridgeAddress) - readonly pol_address=$(cat combined-001.json | jq -r .polTokenAddress) - readonly gas_token_address=$(&3 - echo "=== POL address=$pol_address ===" >&3 - echo "=== Gas token address=$gas_token_address ===" >&3 - echo "=== L1 network id=$l1_rpc_network_id ===" >&3 - echo "=== L2 PP1 network id=$l2_pp1b_network_id ===" >&3 - echo "=== L2 PP2 network id=$l2_pp2b_network_id ===" >&3 - echo "=== L1 RPC URL=$l1_rpc_url ===" >&3 - echo "=== L2 PP1 URL=$l2_pp1_url ===" >&3 - echo "=== L2 PP2 URL=$l2_pp2_url ===" >&3 - echo "=== L2 PP1B URL=$l2_pp1b_url ===" >&3 - echo "=== L2 PP2B URL=$l2_pp2b_url ===" >&3 - -} - -add_cdk_network2_to_agglayer(){ - echo "=== Checking if network 2 is in agglayer ===" >&3 - local _prev=$(kurtosis service exec $enclave agglayer "grep \"2 = \" /etc/zkevm/agglayer-config.toml || true" | tail -n +2) - if [ ! -z "$_prev" ]; then - echo "Network 2 already added to agglayer" >&3 - return - fi - echo "=== Adding network 2 to agglayer === ($_prev)" >&3 - kurtosis service exec $enclave agglayer "sed -i 's/\[proof\-signers\]/2 = \"http:\/\/cdk-erigon-rpc-002:8123\"\n\[proof-signers\]/i' /etc/zkevm/agglayer-config.toml" - kurtosis service stop $enclave agglayer - kurtosis service start $enclave agglayer -} - -fund_claim_tx_manager(){ - echo "=== Funding bridge auto-claim ===" >&3 - cast send --legacy --value 100ether --rpc-url $l2_pp1_url --private-key $private_key 0x5f5dB0D4D58310F53713eF4Df80ba6717868A9f8 - cast send --legacy --value 100ether --rpc-url $l2_pp2_url --private-key $private_key 0x93F63c24735f45Cd0266E87353071B64dd86bc05 -} - - -mint_pol_token(){ - echo "=== Minting POL ===" >&3 - cast send \ - --rpc-url $l1_rpc_url \ - --private-key $private_key \ - $pol_address \ - "$mint_fn_sig" \ - $eth_address 10000000000000000000000 - # Allow bridge to spend it - cast send \ - --rpc-url $l1_rpc_url \ - --private-key $private_key \ - $pol_address \ - "$approve_fn_sig" \ - $bridge_address 10000000000000000000000 -} diff --git a/test/bats/pp-multi/bridge-l2_to_l2-e2e.bats b/test/bats/pp-multi/bridge-l2_to_l2-e2e.bats deleted file mode 100644 index a9a904b6f..000000000 --- a/test/bats/pp-multi/bridge-l2_to_l2-e2e.bats +++ /dev/null @@ -1,82 +0,0 @@ -# based on: https://github.com/0xPolygon/kurtosis-cdk/blob/jhilliard/multi-pp-testing/multi-pp-test.sh.md - -setup() { - load '../helpers/common-multi_cdk-setup' - _common_multi_setup - load '../helpers/common' - load '../helpers/lxly-bridge' - load '../helpers/aggsender' - - if [ ! -f $aggsender_find_imported_bridge ]; then - echo "missing required tool: $aggsender_find_imported_bridge" >&3 - return 1 - fi - - add_cdk_network2_to_agglayer - fund_claim_tx_manager - mint_pol_token - - ether_value=${ETHER_VALUE:-"0.0200000054"} - amount=$(cast to-wei $ether_value ether) - native_token_addr="0x0000000000000000000000000000000000000000" - readonly sender_private_key=${SENDER_PRIVATE_KEY:-"12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625"} - readonly sender_addr="$(cast wallet address --private-key $sender_private_key)" - # Params for lxly-bridge functions - is_forced=${IS_FORCED:-"true"} - bridge_addr=$bridge_address - meta_bytes=${META_BYTES:-"0x1234"} - destination_addr=$target_address - timeout="600" - claim_frequency="30" - - gas_price=$(cast gas-price --rpc-url "$l2_rpc_url") -} - -@test "Test L2 to L2 bridge" { - echo "=== Running LxLy bridge eth L1 to L2(PP1) amount:$amount" >&3 - destination_net=$l2_pp1b_network_id - bridge_asset "$native_token_addr" "$l1_rpc_url" - bridge_tx_hash_pp1=$bridge_tx_hash - - echo "=== Running LxLy bridge eth L1 to L2(PP2) amount:$amount" >&3 - destination_net=$l2_pp2b_network_id - bridge_asset "$native_token_addr" "$l1_rpc_url" - bridge_tx_hash_pp2=$bridge_tx_hash - - echo "=== Running LxLy claim L1 to L2(PP1) for $bridge_tx_hash_pp1" >&3 - run claim_tx_hash "$timeout" "$bridge_tx_hash_pp1" "$destination_addr" "$l2_pp1_url" "$l2_pp1b_url" - assert_success - - echo "=== Running LxLy claim L1 to L2(PP2) for $bridge_tx_hash_pp2" >&3 - run claim_tx_hash "$timeout" "$bridge_tx_hash_pp2" "$destination_addr" "$l2_pp2_url" "$l2_pp2b_url" - assert_success - - - # reduce eth amount - amount=1234567 - echo "=== Running LxLy bridge L2(PP2) to L2(PP1) amount:$amount" >&3 - destination_net=$l2_pp1b_network_id - meta_bytes="0xbeef" - bridge_asset "$native_token_addr" "$l2_pp2_url" - - echo "=== Running LxLy claim L2(PP2) to L2(PP1) for: $bridge_tx_hash" >&3 - claim_tx_hash "$timeout" "$bridge_tx_hash" "$destination_addr" "$l2_pp1_url" "$l2_pp2b_url" - echo "... deposit [$global_index]" - global_index_pp2_to_pp1="$global_index" - - # Now we need to do a bridge on L2(PP1) to trigger a certificate: - ether_value=${ETHER_VALUE:-"0.0100000054"} - amount=$(cast to-wei $ether_value ether) - echo "=== Running LxLy bridge eth L2(PP1) to L1 (trigger a certificate on PP1) amount:$amount" >&3 - destination_net=$l1_rpc_network_id - meta_bytes="0xabcd" - bridge_asset "$native_token_addr" "$l2_pp1_url" - - echo "=== Running LxLy claim L2(PP1) to L1 for $bridge_tx_hash" >&3 - run claim_tx_hash "$timeout" "$bridge_tx_hash" "$destination_addr" "$l1_rpc_url" "$l2_pp1b_url" - assert_success - - echo "=== Waiting to settled certificate with imported bridge for global_index: $global_index_pp2_to_pp1" - wait_to_settled_certificate_containing_global_index $l2_pp1_cdk_node_url $global_index_pp2_to_pp1 - -} diff --git a/test/bats/pp/bridge-e2e-msg.bats b/test/bats/pp/bridge-e2e-msg.bats deleted file mode 100644 index b55259156..000000000 --- a/test/bats/pp/bridge-e2e-msg.bats +++ /dev/null @@ -1,68 +0,0 @@ -setup() { - load '../../helpers/common-setup' - _common_setup - load '../../helpers/common' - load '../../helpers/lxly-bridge-test' - - if [ -z "$BRIDGE_ADDRESS" ]; then - local combined_json_file="/opt/zkevm/combined.json" - echo "BRIDGE_ADDRESS env variable is not provided, resolving the bridge address from the Kurtosis CDK '$combined_json_file'" >&3 - - # Fetching the combined JSON output and filtering to get polygonZkEVMBridgeAddress - combined_json_output=$($contracts_service_wrapper "cat $combined_json_file" | tail -n +2) - bridge_default_address=$(echo "$combined_json_output" | jq -r .polygonZkEVMBridgeAddress) - BRIDGE_ADDRESS=$bridge_default_address - fi - echo "Bridge address=$BRIDGE_ADDRESS" >&3 - - readonly sender_private_key=${SENDER_PRIVATE_KEY:-"12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625"} - readonly sender_addr="$(cast wallet address --private-key $sender_private_key)" - destination_net=${DESTINATION_NET:-"1"} - destination_addr=${DESTINATION_ADDRESS:-"0x0bb7AA0b4FdC2D2862c088424260e99ed6299148"} - ether_value=${ETHER_VALUE:-"0.0200000054"} - amount=$(cast to-wei $ether_value ether) - readonly native_token_addr=${NATIVE_TOKEN_ADDRESS:-"0x0000000000000000000000000000000000000000"} - if [[ -n "$GAS_TOKEN_ADDR" ]]; then - echo "Using provided GAS_TOKEN_ADDR: $GAS_TOKEN_ADDR" >&3 - gas_token_addr="$GAS_TOKEN_ADDR" - else - echo "GAS_TOKEN_ADDR not provided, retrieving from rollup parameters file." >&3 - readonly rollup_params_file=/opt/zkevm/create_rollup_parameters.json - run bash -c "$contracts_service_wrapper 'cat $rollup_params_file' | tail -n +2 | jq -r '.gasTokenAddress'" - assert_success - assert_output --regexp "0x[a-fA-F0-9]{40}" - gas_token_addr=$output - fi - readonly is_forced=${IS_FORCED:-"true"} - readonly bridge_addr=$BRIDGE_ADDRESS - readonly meta_bytes=${META_BYTES:-"0x1234"} - - readonly l1_rpc_url=${L1_ETH_RPC_URL:-"$(kurtosis port print $enclave el-1-geth-lighthouse rpc)"} - readonly bridge_api_url=${BRIDGE_API_URL:-"$(kurtosis port print $enclave zkevm-bridge-service-001 rpc)"} - - readonly dry_run=${DRY_RUN:-"false"} - readonly l1_rpc_network_id=$(cast call --rpc-url $l1_rpc_url $bridge_addr 'networkID() (uint32)') - readonly l2_rpc_network_id=$(cast call --rpc-url $l2_rpc_url $bridge_addr 'networkID() (uint32)') - gas_price=$(cast gas-price --rpc-url "$l2_rpc_url") - readonly weth_token_addr=$(cast call --rpc-url $l2_rpc_url $bridge_addr 'WETHToken()' | cast parse-bytes32-address) -} - - -@test "transfer message" { - echo "====== bridgeMessage L1 -> L2" >&3 - destination_addr=$sender_addr - destination_net=$l2_rpc_network_id - run bridge_message "$native_token_addr" "$l1_rpc_url" - assert_success - - echo "====== Claim in L2" >&3 - timeout="120" - claim_frequency="10" - run wait_for_claim "$timeout" "$claim_frequency" "$l2_rpc_url" "bridgeMessage" - assert_success - - echo "====== bridgeMessage L2->L1" >&3 - destination_net=0 - run bridge_message "$destination_addr" "$l2_rpc_url" - assert_success -} \ No newline at end of file diff --git a/test/bats/pp/bridge-e2e.bats b/test/bats/pp/bridge-e2e.bats deleted file mode 100644 index 1f358315b..000000000 --- a/test/bats/pp/bridge-e2e.bats +++ /dev/null @@ -1,73 +0,0 @@ -setup() { - load '../helpers/common-setup' - load '../helpers/common' - load '../helpers/lxly-bridge' - - _common_setup - - if [ -z "$BRIDGE_ADDRESS" ]; then - local combined_json_file="/opt/zkevm/combined.json" - echo "BRIDGE_ADDRESS env variable is not provided, resolving the bridge address from the Kurtosis CDK '$combined_json_file'" >&3 - - # Fetching the combined JSON output and filtering to get polygonZkEVMBridgeAddress - combined_json_output=$($contracts_service_wrapper "cat $combined_json_file" | tail -n +2) - bridge_default_address=$(echo "$combined_json_output" | jq -r .polygonZkEVMBridgeAddress) - BRIDGE_ADDRESS=$bridge_default_address - fi - echo "Bridge address=$BRIDGE_ADDRESS" >&3 - - readonly sender_private_key=${SENDER_PRIVATE_KEY:-"12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625"} - readonly sender_addr="$(cast wallet address --private-key $sender_private_key)" - destination_net=${DESTINATION_NET:-"1"} - destination_addr=${DESTINATION_ADDRESS:-"0x0bb7AA0b4FdC2D2862c088424260e99ed6299148"} - ether_value=${ETHER_VALUE:-"0.0200000054"} - amount=$(cast to-wei $ether_value ether) - readonly native_token_addr=${NATIVE_TOKEN_ADDRESS:-"0x0000000000000000000000000000000000000000"} - if [[ -n "$GAS_TOKEN_ADDR" ]]; then - echo "Using provided GAS_TOKEN_ADDR: $GAS_TOKEN_ADDR" >&3 - gas_token_addr="$GAS_TOKEN_ADDR" - else - echo "GAS_TOKEN_ADDR not provided, retrieving from rollup parameters file." >&3 - readonly rollup_params_file=/opt/zkevm/create_rollup_parameters.json - run bash -c "$contracts_service_wrapper 'cat $rollup_params_file' | tail -n +2 | jq -r '.gasTokenAddress'" - assert_success - assert_output --regexp "0x[a-fA-F0-9]{40}" - gas_token_addr=$output - fi - readonly is_forced=${IS_FORCED:-"true"} - readonly bridge_addr=$BRIDGE_ADDRESS - readonly meta_bytes=${META_BYTES:-"0x1234"} - - readonly l1_rpc_url=${L1_ETH_RPC_URL:-"$(kurtosis port print $enclave el-1-geth-lighthouse rpc)"} - readonly bridge_api_url=${BRIDGE_API_URL:-"$(kurtosis port print $enclave zkevm-bridge-service-001 rpc)"} - - readonly dry_run=${DRY_RUN:-"false"} - readonly l1_rpc_network_id=$(cast call --rpc-url $l1_rpc_url $bridge_addr 'networkID() (uint32)') - readonly l2_rpc_network_id=$(cast call --rpc-url $l2_rpc_url $bridge_addr 'networkID() (uint32)') - gas_price=$(cast gas-price --rpc-url "$l2_rpc_url") - readonly weth_token_addr=$(cast call --rpc-url $l2_rpc_url $bridge_addr 'WETHToken()' | cast parse-bytes32-address) -} - -@test "Native gas token deposit to WETH" { - destination_addr=$sender_addr - local initial_receiver_balance=$(cast call --rpc-url "$l2_rpc_url" "$weth_token_addr" "$balance_of_fn_sig" "$destination_addr" | awk '{print $1}') - echo "Initial receiver balance of native token on L2 $initial_receiver_balance" >&3 - - echo "=== Running LxLy deposit on L1 to network: $l2_rpc_network_id native_token: $native_token_addr" >&3 - - destination_net=$l2_rpc_network_id - run bridge_asset "$native_token_addr" "$l1_rpc_url" - assert_success - - echo "=== Running LxLy claim on L2" >&3 - timeout="120" - claim_frequency="10" - run wait_for_claim "$timeout" "$claim_frequency" "$l2_rpc_url" - assert_success - - echo "=== bridgeAsset L2 WETH: $weth_token_addr to L1 ETH" >&3 - destination_addr=$sender_addr - destination_net=0 - run bridge_asset "$weth_token_addr" "$l2_rpc_url" - assert_success -} diff --git a/test/bats/pp/e2e-pp.bats b/test/bats/pp/e2e-pp.bats deleted file mode 100644 index 4ef831e7b..000000000 --- a/test/bats/pp/e2e-pp.bats +++ /dev/null @@ -1,26 +0,0 @@ -setup() { - load '../helpers/common-setup' - - _common_setup - - if [ -z "$BRIDGE_ADDRESS" ]; then - local combined_json_file="/opt/zkevm/combined.json" - echo "BRIDGE_ADDRESS env variable is not provided, resolving the bridge address from the Kurtosis CDK '$combined_json_file'" >&3 - - # Fetching the combined JSON output and filtering to get polygonZkEVMBridgeAddress - combined_json_output=$($contracts_service_wrapper "cat $combined_json_file" | tail -n +2) - bridge_default_address=$(echo "$combined_json_output" | jq -r .polygonZkEVMBridgeAddress) - BRIDGE_ADDRESS=$bridge_default_address - fi - echo "Bridge address=$BRIDGE_ADDRESS" >&3 -} - -@test "Verify certificate settlement" { - echo "Waiting 10 minutes to get some settle certificate...." >&3 - - readonly bridge_addr=$BRIDGE_ADDRESS - readonly l2_rpc_network_id=$(cast call --rpc-url $l2_rpc_url $bridge_addr 'networkID() (uint32)') - - run $PROJECT_ROOT/../scripts/agglayer_certificates_monitor.sh 1 600 $l2_rpc_network_id - assert_success -} diff --git a/test/combinations/fork12-pessimistic-multi-attach-second-cdk.yml b/test/combinations/fork12-pessimistic-multi-attach-second-cdk.yml deleted file mode 100644 index 47aa6d789..000000000 --- a/test/combinations/fork12-pessimistic-multi-attach-second-cdk.yml +++ /dev/null @@ -1,40 +0,0 @@ -deployment_stages: - deploy_l1: false - deploy_agglayer: false - -args: - deployment_suffix: "-002" - zkevm_rollup_chain_id: 20202 - zkevm_rollup_id: 2 - - # The following accounts have been generated using the following command: - # polycli wallet inspect --mnemonic 'bless share truly shadow primary sun relief border van gallery stairs edit reflect gentle athlete main device smile response rescue mirror floor say people' --addresses 9 | tee keys.txt | jq -r '.Addresses[] | [.ETHAddress, .HexPrivateKey] | @tsv' | awk 'BEGIN{split("sequencer,aggregator,claimtxmanager,timelock,admin,loadtest,agglayer,dac,proofsigner",roles,",")} {print "zkevm_l2_" roles[NR] "_address: \"" $1 "\""; print "zkevm_l2_" roles[NR] "_private_key: \"0x" $2 "\"\n"}' - # Note that admin and agglayer accounts have been removed since we're using the default accounts. - zkevm_l2_sequencer_address: "0xA670342930242407b9984e467353044f8472055e" - zkevm_l2_sequencer_private_key: "0x902ed4ce26b536617a4f26da5e0cd0ef61b514a076b4bd766d6ab8b97efbb8c1" - zkevm_l2_aggregator_address: "0xfC419a9d9Fe0DfA4Cf9971AcD1Fbcd356DD768FD" - zkevm_l2_aggregator_private_key: "0xa70db9fb4b84a6ba18c03cd2266116dd110538d6c4c88e67ca35a29b910da25d" - zkevm_l2_claimtxmanager_address: "0x93F63c24735f45Cd0266E87353071B64dd86bc05" - zkevm_l2_claimtxmanager_private_key: "0x38718f22097afba13be48d818964326c9c5c48133f51e3c3bfd6faf05f813b34" - zkevm_l2_timelock_address: "0xDB22C6f61A82d6AA6d3607289fC93774AC09413a" - zkevm_l2_timelock_private_key: "0xae4a69010583a09709baa563fa66f9e6f2dacf9e9c84b89932406b9a0521b561" - zkevm_l2_loadtest_address: "0xD5278fC3Dc72A226d5C04c3d2C85fd397A46fA08" - zkevm_l2_loadtest_private_key: "0xef4db4f97684b8307adc332ed6c1bc82d66d160f08e7427d082d66a23889625e" - zkevm_l2_dac_address: "0xDa07AAD7226B136bc24157Dc4Ff5A813490E20D0" - zkevm_l2_dac_private_key: "0x992c9ab11d5eab6b6c2634b8bb0b85f3d8d1acf25024dc99c359cb2afd9b40a7" - zkevm_l2_proofsigner_address: "0xf1a661D7b601Ec46a040f57193cC99aB8c4132FA" - zkevm_l2_proofsigner_private_key: "0xc7fe3a006d75ba9326d9792523385abb49057c66aee0b8b4248821a89713f975" - - - cdk_node_image: cdk:latest - cdk_erigon_node_image: hermeznetwork/cdk-erigon:v2.60.0 - zkevm_contracts_image: leovct/zkevm-contracts:v9.0.0-rc.3-pp-fork.12-patch.1 - additional_services: [] - consensus_contract_type: pessimistic - sequencer_type: erigon - erigon_strict_mode: false - gas_token_enabled: false - zkevm_use_real_verifier: true - enable_normalcy: true - verifier_program_vkey: 0x00766aa16a6efe4ac05c0fe21d4b50f9631dbd1a2663a982da861427085ea2ea - diff --git a/test/combinations/fork12-pessimistic-multi.yml b/test/combinations/fork12-pessimistic-multi.yml deleted file mode 100644 index e2ca1e9d7..000000000 --- a/test/combinations/fork12-pessimistic-multi.yml +++ /dev/null @@ -1,15 +0,0 @@ -args: - cdk_node_image: cdk:latest - agglayer_image: ghcr.io/agglayer/agglayer:0.2.0-rc.20 - cdk_erigon_node_image: hermeznetwork/cdk-erigon:v2.60.0 - zkevm_contracts_image: leovct/zkevm-contracts:v9.0.0-rc.3-pp-fork.12-patch.1 - additional_services: [] - consensus_contract_type: pessimistic - sequencer_type: erigon - erigon_strict_mode: false - gas_token_enabled: false - zkevm_use_real_verifier: true - enable_normalcy: true - verifier_program_vkey: 0x00766aa16a6efe4ac05c0fe21d4b50f9631dbd1a2663a982da861427085ea2ea - agglayer_prover_sp1_key: {{.agglayer_prover_sp1_key}} - diff --git a/test/combinations/fork12-pessimistic.yml b/test/combinations/fork12-pessimistic.yml deleted file mode 100644 index c7714e45b..000000000 --- a/test/combinations/fork12-pessimistic.yml +++ /dev/null @@ -1,15 +0,0 @@ -args: - agglayer_image: ghcr.io/agglayer/agglayer:0.2.0-rc.20 - cdk_erigon_node_image: hermeznetwork/cdk-erigon:v2.60.0-beta8 - cdk_node_image: cdk - zkevm_bridge_proxy_image: haproxy:3.0-bookworm - zkevm_bridge_service_image: hermeznetwork/zkevm-bridge-service:v0.6.0-RC1 - zkevm_bridge_ui_image: leovct/zkevm-bridge-ui:multi-network - zkevm_contracts_image: leovct/zkevm-contracts:v9.0.0-rc.3-pp-fork.12-patch.1 - additional_services: [] - consensus_contract_type: pessimistic - sequencer_type: erigon - erigon_strict_mode: false - gas_token_enabled: true - agglayer_prover_sp1_key: {{.agglayer_prover_sp1_key}} - enable_normalcy: true diff --git a/test/config/kurtosis-cdk-node-config.toml.template b/test/config/kurtosis-cdk-node-config.toml.template index 45b938a2e..6729f7b71 100644 --- a/test/config/kurtosis-cdk-node-config.toml.template +++ b/test/config/kurtosis-cdk-node-config.toml.template @@ -53,8 +53,3 @@ Outputs = ["stderr"] VerifyProofInterval = "10s" GasOffset = 150000 SettlementBackend = "agglayer" - -[AggSender] -SaveCertificatesToFilesPath = "{{.zkevm_path_rw_data}}/" - - diff --git a/test/run-e2e-multi_pp.sh b/test/run-e2e-multi_pp.sh deleted file mode 100755 index f63013568..000000000 --- a/test/run-e2e-multi_pp.sh +++ /dev/null @@ -1,70 +0,0 @@ -#!/bin/bash -source $(dirname $0)/scripts/env.sh - -function log_error() { - echo -e "\033[0;31mError: $*" "\033[0m" -} - -function log_fatal() { - log_error $* - exit 1 -} - -function ok_or_fatal(){ - if [ $? -ne 0 ]; then - log_fatal $* - fi -} - -function build_docker_if_required(){ - docker images -q cdk:latest > /dev/null - if [ $? -ne 0 ] ; then - echo "Building cdk:latest" - pushd $BASE_FOLDER/.. - make build-docker - ok_or_fatal "Failed to build docker image" - popd - else - echo "docker cdk:latest already exists" - fi -} - -function resolve_template(){ - local _TEMPLATE_FILE="$1" - local _RESULT_VARNAME="$2" - local _TEMP_FILE=$(mktemp --suffix ".yml") - echo "rendering $_TEMPLATE_FILE to temp file $_TEMP_FILE" - go run ../scripts/run_template.go $_TEMPLATE_FILE > $_TEMP_FILE - ok_or_fatal "Failed to render template $_TEMPLATE_FILE" - grep "" "$_TEMP_FILE" - if [ $? -eq 0 ]; then - log_fatal "Failed to render template $_TEMPLATE_FILE. missing values" - fi - eval $_RESULT_VARNAME="$_TEMP_FILE" -} - -############################################################################### -# MAIN -############################################################################### -BASE_FOLDER=$(dirname $0) -PP1_ORIGIN_CONFIG_FILE=combinations/fork12-pessimistic-multi.yml -PP2_ORIGIN_CONFIG_FILE=combinations/fork12-pessimistic-multi-attach-second-cdk.yml -KURTOSIS_ENCLAVE=cdk - -[ -z $KURTOSIS_FOLDER ] && echo "KURTOSIS_FOLDER is not set" && exit 1 -[ ! -d $KURTOSIS_FOLDER ] && echo "KURTOSIS_FOLDER is not a directory ($KURTOSIS_FOLDER)" && exit 1 - - -[ ! -f $PP1_ORIGIN_CONFIG_FILE ] && echo "File $PP1_ORIGIN_CONFIG_FILE does not exist" && exit 1 -[ ! -f $PP2_ORIGIN_CONFIG_FILE ] && echo "File $PP2_ORIGIN_CONFIG_FILE does not exist" && exit 1 - -build_docker_if_required -resolve_template $PP1_ORIGIN_CONFIG_FILE PP1_RENDERED_CONFIG_FILE -resolve_template $PP2_ORIGIN_CONFIG_FILE PP2_RENDERED_CONFIG_FILE - -kurtosis clean --all -kurtosis run --enclave $KURTOSIS_ENCLAVE --args-file "$PP1_RENDERED_CONFIG_FILE" --image-download always $KURTOSIS_FOLDER -ok_or_fatal "Failed to run kurtosis pp1" - -kurtosis run --enclave $KURTOSIS_ENCLAVE --args-file "$PP2_RENDERED_CONFIG_FILE" --image-download always $KURTOSIS_FOLDER -ok_or_fatal "Failed to run kurtosis attached second cdk" diff --git a/test/scripts/agglayer_certificates_monitor.sh b/test/scripts/agglayer_certificates_monitor.sh deleted file mode 100755 index c530548f8..000000000 --- a/test/scripts/agglayer_certificates_monitor.sh +++ /dev/null @@ -1,89 +0,0 @@ -#!/usr/bin/env bash -# This script monitors the agglayer certificates progress of pessimistic proof. - -function parse_params(){ - # Check if the required arguments are provided. - if [ "$#" -lt 3 ]; then - echo "Usage: $0 " - exit 1 - fi - - # The number of batches to be verified. - settle_certificates_target="$1" - - # The script timeout (in seconds). - timeout="$2" - - # The network id of the L2 network. - l2_rpc_network_id="$3" -} - -function check_timeout(){ - local _end_time=$1 - current_time=$(date +%s) - if ((current_time > _end_time)); then - echo "[$(date '+%Y-%m-%d %H:%M:%S')] ❌ Exiting... Timeout reached not found the expected numbers of settled certs!" - exit 1 - fi -} - -function check_num_certificates(){ - readonly agglayer_rpc_url="$(kurtosis port print cdk agglayer agglayer)" - - cast_output=$(cast rpc --rpc-url "$agglayer_rpc_url" "interop_getLatestKnownCertificateHeader" "$l2_rpc_network_id" 2>&1) - - if [ $? -ne 0 ]; then - echo "[$(date '+%Y-%m-%d %H:%M:%S')] Error executing command cast rpc: $cast_output" - return - fi - - height=$(extract_certificate_height "$cast_output") - [[ -z "$height" ]] && { - echo "Error: Failed to extract certificate height: $height." >&3 - return - } - - status=$(extract_certificate_status "$cast_output") - [[ -z "$status" ]] && { - echo "Error: Failed to extract certificate status." >&3 - return - } - - echo "[$(date '+%Y-%m-%d %H:%M:%S')] Last known agglayer certificate height: $height, status: $status" >&3 - - if (( height > settle_certificates_target - 1 )); then - echo "[$(date '+%Y-%m-%d %H:%M:%S')] ✅ Success! The number of settled certificates has reached the target." >&3 - exit 0 - fi - - if (( height == settle_certificates_target - 1 )); then - if [ "$status" == "Settled" ]; then - echo "[$(date '+%Y-%m-%d %H:%M:%S')] ✅ Success! The number of settled certificates has reached the target." >&3 - exit 0 - fi - - echo "[$(date '+%Y-%m-%d %H:%M:%S')] ⚠️ Warning! The number of settled certificates is one less than the target." >&3 - fi -} - -function extract_certificate_height() { - local cast_output="$1" - echo "$cast_output" | jq -r '.height' -} - -function extract_certificate_status() { - local cast_output="$1" - echo "$cast_output" | jq -r '.status' -} - -# MAIN - -parse_params $* -start_time=$(date +%s) -end_time=$((start_time + timeout)) -echo "[$(date '+%Y-%m-%d %H:%M:%S')] Start monitoring agglayer certificates progress..." -while true; do - check_num_certificates - check_timeout $end_time - sleep 10 -done diff --git a/tools/aggsender_find_imported_bridge/aggsender_find_imported_bridge.go b/tools/aggsender_find_imported_bridge/aggsender_find_imported_bridge.go deleted file mode 100644 index 28115129d..000000000 --- a/tools/aggsender_find_imported_bridge/aggsender_find_imported_bridge.go +++ /dev/null @@ -1,121 +0,0 @@ -package main - -import ( - "encoding/json" - "fmt" - "math/big" - "os" - - "github.com/0xPolygon/cdk/agglayer" - "github.com/0xPolygon/cdk/aggsender/rpcclient" - "github.com/0xPolygon/cdk/aggsender/types" - "github.com/0xPolygon/cdk/bridgesync" - "github.com/0xPolygon/cdk/log" -) - -const ( - errLevelUnexpected = 1 - errLevelWrongParams = 2 - errLevelComms = 3 - errLevelNotFound = 4 - errLevelFoundButNotSettled = 5 - - base10 = 10 - minimumNumArgs = 3 -) - -func unmarshalGlobalIndex(globalIndex string) (*agglayer.GlobalIndex, error) { - var globalIndexParsed agglayer.GlobalIndex - // First try if it's already decomposed - err := json.Unmarshal([]byte(globalIndex), &globalIndexParsed) - if err != nil { - bigInt := new(big.Int) - _, ok := bigInt.SetString(globalIndex, base10) - if !ok { - return nil, fmt.Errorf("invalid global index: %v", globalIndex) - } - mainnetFlag, rollupIndex, leafIndex, err := bridgesync.DecodeGlobalIndex(bigInt) - if err != nil { - return nil, fmt.Errorf("invalid global index, fail to decode: %v", globalIndex) - } - globalIndexParsed.MainnetFlag = mainnetFlag - globalIndexParsed.RollupIndex = rollupIndex - globalIndexParsed.LeafIndex = leafIndex - } - return &globalIndexParsed, nil -} - -// This function find out the certificate for a deposit -// It use the aggsender RPC -func certContainsGlobalIndex(cert *types.CertificateInfo, globalIndex *agglayer.GlobalIndex) (bool, error) { - if cert == nil { - return false, nil - } - var certSigned agglayer.SignedCertificate - err := json.Unmarshal([]byte(cert.SignedCertificate), &certSigned) - if err != nil { - log.Debugf("cert: %v", cert.SignedCertificate) - return false, fmt.Errorf("error Unmarshal cert. Err: %w", err) - } - for _, importedBridge := range certSigned.ImportedBridgeExits { - if *importedBridge.GlobalIndex == *globalIndex { - return true, nil - } - } - return false, nil -} - -func main() { - if len(os.Args) != minimumNumArgs { - fmt.Printf("Wrong number of arguments\n") - fmt.Printf(" Usage: %v \n", os.Args[0]) - os.Exit(errLevelWrongParams) - } - aggsenderRPC := os.Args[1] - globalIndex := os.Args[2] - decodedGlobalIndex, err := unmarshalGlobalIndex(globalIndex) - if err != nil { - log.Errorf("Error unmarshalGlobalIndex: %v", err) - os.Exit(errLevelWrongParams) - } - log.Debugf("decodedGlobalIndex: %v", decodedGlobalIndex) - aggsenderClient := rpcclient.NewClient(aggsenderRPC) - // Get latest certificate - cert, err := aggsenderClient.GetCertificateHeaderPerHeight(nil) - if err != nil { - log.Errorf("Error: %v", err) - os.Exit(errLevelComms) - } - - currentHeight := cert.Height - for cert != nil { - found, err := certContainsGlobalIndex(cert, decodedGlobalIndex) - if err != nil { - log.Errorf("Error: %v", err) - os.Exit(errLevelUnexpected) - } - if found { - log.Infof("Found certificate for global index: %v", globalIndex) - if cert.Status.IsSettled() { - log.Infof("Certificate is settled: %s status:%s", cert.ID(), cert.Status.String()) - os.Exit(0) - } - log.Errorf("Certificate is not settled") - os.Exit(errLevelFoundButNotSettled) - } else { - log.Debugf("Certificate not found for global index: %v", globalIndex) - } - // We have check the oldest cert - if currentHeight == 0 { - log.Errorf("Checked all certs and it's not found") - os.Exit(errLevelNotFound) - } - log.Infof("Checking previous certificate, height: %v", currentHeight) - cert, err = aggsenderClient.GetCertificateHeaderPerHeight(¤tHeight) - if err != nil { - log.Errorf("Error: %v", err) - os.Exit(errLevelComms) - } - currentHeight-- - } -}