From 731b9dc386a34ddb79e3b2df3ef3fc48421b12e6 Mon Sep 17 00:00:00 2001 From: Arnau Date: Wed, 7 Aug 2024 19:22:31 +0200 Subject: [PATCH] implementation done, missing finish e2e test --- bridgesync/bridgesync.go | 4 + claimsponsor/claimsponsor.go | 327 +++++++++++++++++++++++++++++++ claimsponsor/e2e_test.go | 79 ++++++++ claimsponsor/evmclaimsponsor.go | 168 ++++++++++++++++ l1infotreesync/l1infotreesync.go | 11 +- l1infotreesync/processor.go | 8 +- test/helpers/aggoracle_e2e.go | 207 ++++++++++++++++--- tree/appendonlytree.go | 14 +- tree/tree.go | 28 +-- tree/updatabletree.go | 6 +- 10 files changed, 789 insertions(+), 63 deletions(-) create mode 100644 claimsponsor/claimsponsor.go create mode 100644 claimsponsor/e2e_test.go create mode 100644 claimsponsor/evmclaimsponsor.go diff --git a/bridgesync/bridgesync.go b/bridgesync/bridgesync.go index 3223a0953..fdbac6285 100644 --- a/bridgesync/bridgesync.go +++ b/bridgesync/bridgesync.go @@ -152,3 +152,7 @@ func (s *BridgeSync) GetBridgeIndexByRoot(ctx context.Context, root common.Hash) func (s *BridgeSync) GetClaimsAndBridges(ctx context.Context, fromBlock, toBlock uint64) ([]Event, error) { return s.processor.GetClaimsAndBridges(ctx, fromBlock, toBlock) } + +func (s *BridgeSync) GetProof(ctx context.Context, depositCount uint32, localExitRoot common.Hash) ([32]common.Hash, error) { + return s.processor.exitTree.GetProof(ctx, depositCount, localExitRoot) +} diff --git a/claimsponsor/claimsponsor.go b/claimsponsor/claimsponsor.go new file mode 100644 index 000000000..204e868b1 --- /dev/null +++ b/claimsponsor/claimsponsor.go @@ -0,0 +1,327 @@ +package claimsponsor + +import ( + "context" + "encoding/json" + "errors" + "math" + "math/big" + "time" + + dbCommon "github.com/0xPolygon/cdk/common" + "github.com/0xPolygon/cdk/log" + "github.com/0xPolygon/cdk/sync" + "github.com/ethereum/go-ethereum/common" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/iter" + "github.com/ledgerwatch/erigon-lib/kv/mdbx" +) + +type ClaimStatus string + +const ( + PendingClaimStatus = "pending" + WIPStatus = "work in progress" + SuccessClaimStatus = "success" + FailedClaimStatus = "failed" + + claimTable = "claimsponsor-tx" + queueTable = "claimsponsor-queue" +) + +var ( + ErrInvalidClaim = errors.New("invalid claim") + ErrNotFound = errors.New("not found") +) + +// Claim representation of a claim event +type Claim struct { + LeafType uint8 + ProofLocalExitRoot [32]common.Hash + ProofRollupExitRoot [32]common.Hash + GlobalIndex *big.Int + MainnetExitRoot common.Hash + RollupExitRoot common.Hash + OriginNetwork uint32 + OriginTokenAddress common.Address + DestinationNetwork uint32 + DestinationAddress common.Address + Amount *big.Int + Metadata []byte + + Status ClaimStatus + TxID string +} + +func (c *Claim) Key() []byte { + return c.GlobalIndex.Bytes() +} + +type ClaimSender interface { + checkClaim(ctx context.Context, claim *Claim) error + sendClaim(ctx context.Context, claim *Claim) (string, error) + claimStatus(id string) (ClaimStatus, error) +} + +type ClaimSponsor struct { + db kv.RwDB + sender ClaimSender + rh *sync.RetryHandler + waitTxToBeMinedPeriod time.Duration +} + +func newClaimSponsor(dbPath string) (*ClaimSponsor, error) { + tableCfgFunc := func(defaultBuckets kv.TableCfg) kv.TableCfg { + cfg := kv.TableCfg{ + claimTable: {}, + queueTable: {}, + } + return cfg + } + db, err := mdbx.NewMDBX(nil). + Path(dbPath). + WithTableCfg(tableCfgFunc). + Open() + if err != nil { + return nil, err + } + return &ClaimSponsor{ + db: db, + }, nil +} + +func (c *ClaimSponsor) Start(ctx context.Context) { + var ( + attempts int + err error + ) + for { + if err != nil { + attempts++ + c.rh.Handle("claimsponsor start", attempts) + } + tx, err2 := c.db.BeginRw(ctx) + if err2 != nil { + err = err2 + log.Errorf("error calling BeginRw: %v", err) + continue + } + queueIndex, globalIndex, err2 := getFirstQueueIndex(tx) + if err2 != nil { + err = err2 + tx.Rollback() + log.Errorf("error calling getFirstQueueIndex: %v", err) + continue + } + claim, err2 := getClaim(tx, globalIndex) + if err2 != nil { + err = err2 + tx.Rollback() + log.Errorf("error calling getClaim with globalIndex %s: %v", globalIndex.String(), err) + continue + } + if claim.TxID == "" { + txID, err2 := c.sender.sendClaim(ctx, claim) + if err2 != nil { + err = err2 + tx.Rollback() + log.Errorf("error calling sendClaim with globalIndex %s: %v", globalIndex.String(), err) + continue + } + claim.TxID = txID + claim.Status = WIPStatus + err2 = putClaim(tx, claim) + if err2 != nil { + err = err2 + tx.Rollback() + log.Errorf("error calling putClaim with globalIndex %s: %v", globalIndex.String(), err) + continue + } + } + err2 = tx.Commit() + if err2 != nil { + err = err2 + log.Errorf("error calling tx.Commit after putting claim: %v", err) + continue + } + + log.Infof("waiting for tx %s with global index %s to success or fail", claim.TxID, globalIndex.String()) + status, err2 := c.waitTxToBeSuccessOrFail(ctx, claim.TxID) + if err2 != nil { + err = err2 + log.Errorf("error calling waitTxToBeMinedOrFail for tx %s: %v", claim.TxID, err) + continue + } + log.Infof("tx %s with global index %s concluded with status: %s", claim.TxID, globalIndex.String(), status) + tx, err2 = c.db.BeginRw(ctx) + if err2 != nil { + err = err2 + log.Errorf("error calling BeginRw: %v", err) + continue + } + claim.Status = status + err2 = putClaim(tx, claim) + if err2 != nil { + err = err2 + tx.Rollback() + log.Errorf("error calling putClaim with globalIndex %s: %v", globalIndex.String(), err) + continue + } + err2 = tx.Delete(queueTable, dbCommon.Uint64ToBytes(queueIndex)) + if err2 != nil { + err = err2 + tx.Rollback() + log.Errorf("error calling delete on the queue table with index %d: %v", queueIndex, err) + continue + } + err2 = tx.Commit() + if err2 != nil { + err = err2 + log.Errorf("error calling tx.Commit after putting claim: %v", err) + continue + } + + attempts = 0 + } +} + +func (c *ClaimSponsor) waitTxToBeSuccessOrFail(ctx context.Context, txID string) (ClaimStatus, error) { + t := time.NewTicker(c.waitTxToBeMinedPeriod) + for { + select { + case <-ctx.Done(): + return "", errors.New("context cancelled") + case <-t.C: + status, err := c.sender.claimStatus(txID) + if err != nil { + return "", err + } + if status == FailedClaimStatus || status == SuccessClaimStatus { + return status, nil + } + } + } +} + +func (c *ClaimSponsor) AddClaimToQueue(ctx context.Context, claim *Claim) error { + if claim.GlobalIndex == nil { + return ErrInvalidClaim + } + claim.Status = PendingClaimStatus + tx, err := c.db.BeginRw(ctx) + if err != nil { + return err + } + + _, err = getClaim(tx, claim.GlobalIndex) + if err != ErrNotFound { + if err != nil { + return err + } else { + return errors.New("claim already added") + } + } + + err = putClaim(tx, claim) + if err != nil { + tx.Rollback() + return err + } + + lastQueuePosition, _, err := getLastQueueIndex(tx) + if err != nil { + tx.Rollback() + return err + } + queuePosition := lastQueuePosition + 1 + err = tx.Put(queueTable, dbCommon.Uint64ToBytes(queuePosition), claim.Key()) + if err != nil { + tx.Rollback() + return err + } + + return tx.Commit() +} + +func putClaim(tx kv.RwTx, claim *Claim) error { + value, err := json.Marshal(claim) + if err != nil { + return err + } + return tx.Put(claimTable, claim.Key(), value) +} + +func (c *ClaimSponsor) getClaimByQueueIndex(ctx context.Context, queueIndex uint64) (*Claim, error) { + tx, err := c.db.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + + globalIndexBytes, err := tx.GetOne(queueTable, dbCommon.Uint64ToBytes(queueIndex)) + if err != nil { + return nil, err + } + if globalIndexBytes == nil { + return nil, ErrNotFound + } + + return getClaim(tx, new(big.Int).SetBytes(globalIndexBytes)) +} + +func getLastQueueIndex(tx kv.Tx) (uint64, *big.Int, error) { + iter, err := tx.RangeDescend( + queueTable, + dbCommon.Uint64ToBytes(math.MaxUint64), + dbCommon.Uint64ToBytes(0), 1, + ) + if err != nil { + return 0, nil, err + } + return getIndex(iter) +} + +func getFirstQueueIndex(tx kv.Tx) (uint64, *big.Int, error) { + iter, err := tx.RangeAscend( + queueTable, + dbCommon.Uint64ToBytes(0), + nil, 1, + ) + if err != nil { + return 0, nil, err + } + return getIndex(iter) +} + +func getIndex(iter iter.KV) (uint64, *big.Int, error) { + k, v, err := iter.Next() + if err != nil { + return 0, nil, err + } + if k == nil { + return 0, nil, ErrNotFound + } + globalIndex := new(big.Int).SetBytes(v) + return dbCommon.BytesToUint64(k), globalIndex, nil +} + +func (c *ClaimSponsor) GetClaim(ctx context.Context, globalIndex *big.Int) (*Claim, error) { + tx, err := c.db.BeginRo(ctx) + if err != nil { + return nil, err + } + return getClaim(tx, globalIndex) +} + +func getClaim(tx kv.Tx, globalIndex *big.Int) (*Claim, error) { + claimBytes, err := tx.GetOne(claimTable, globalIndex.Bytes()) + if err != nil { + return nil, err + } + if claimBytes == nil { + return nil, ErrNotFound + } + claim := &Claim{} + err = json.Unmarshal(claimBytes, claim) + return claim, err +} diff --git a/claimsponsor/e2e_test.go b/claimsponsor/e2e_test.go new file mode 100644 index 000000000..0eda6e82d --- /dev/null +++ b/claimsponsor/e2e_test.go @@ -0,0 +1,79 @@ +package claimsponsor_test + +import ( + "context" + "fmt" + "math/big" + "testing" + "time" + + "github.com/0xPolygon/cdk/bridgesync" + "github.com/0xPolygon/cdk/claimsponsor" + "github.com/0xPolygon/cdk/etherman" + "github.com/0xPolygon/cdk/test/helpers" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestE2EL1toEVML2(t *testing.T) { + // start other needed components + ctx := context.Background() + env := helpers.SetupAggoracleWithEVMChain(t) + dbPathBridgeSyncL1 := t.TempDir() + bridgeSyncL1, err := bridgesync.NewL1(ctx, dbPathBridgeSyncL1, env.BridgeL1Addr, 10, etherman.LatestBlock, env.ReorgDetector, env.L1Client.Client(), 0) + require.NoError(t, err) + go bridgeSyncL1.Start(ctx) + + // start claim sponsor + dbPathClaimSponsor := t.TempDir() + ethTxMock := helpers.NewEthTxManagerMock(t) + claimer, err := claimsponsor.NewEVMClaimSponsor(dbPathClaimSponsor, env.L2Client.Client(), env.BridgeL2Addr, env.AuthL2.From, 100_000, 0, ethTxMock) + require.NoError(t, err) + go claimer.Start(ctx) + + // test + for i := 0; i < 10; i++ { + // Send bridges to L2, wait for GER to be injected on L2 + amount := big.NewInt(int64(i) + 1) + _, err := env.BridgeL1Contract.BridgeAsset(env.AuthL1, env.NetworkIDL2, env.AuthL2.From, amount, common.Address{}, true, nil) + require.NoError(t, err) + env.L1Client.Commit() + time.Sleep(time.Millisecond * 50) + expectedGER, err := env.GERL1Contract.GetLastGlobalExitRoot(&bind.CallOpts{Pending: false}) + require.NoError(t, err) + isInjected, err := env.AggOracleSender.IsGERAlreadyInjected(expectedGER) + require.NoError(t, err) + require.True(t, isInjected, fmt.Sprintf("iteration %d, GER: %s", i, common.Bytes2Hex(expectedGER[:]))) + + // Build MP using bridgeSyncL1 & env.L1InfoTreeSync + info, err := env.L1InfoTreeSync.GetInfoByIndex(ctx, uint32(i)) + require.NoError(t, err) + localProof, err := bridgeSyncL1.GetProof(ctx, uint32(i), info.MainnetExitRoot) + require.NoError(t, err) + rollupProof, err := env.L1InfoTreeSync.GetRollupExitTreeMerkleProof(ctx, 0, common.Hash{}) + + // Request to sponsor claim + claimer.AddClaimToQueue(ctx, &claimsponsor.Claim{ + LeafType: 0, + ProofLocalExitRoot: localProof, + ProofRollupExitRoot: rollupProof, + GlobalIndex: nil, // TODO + MainnetExitRoot: info.MainnetExitRoot, + RollupExitRoot: info.RollupExitRoot, + OriginNetwork: 0, + OriginTokenAddress: common.Address{}, + DestinationNetwork: env.NetworkIDL2, + DestinationAddress: env.AuthL2.From, + Amount: amount, + Metadata: nil, + }) + + // TODO: Wait until success + + // Check on contract that is claimed + isClaimed, err := env.BridgeL2Contract.IsClaimed(&bind.CallOpts{Pending: false}, uint32(i), 0) + require.NoError(t, err) + require.True(t, isClaimed) + } +} diff --git a/claimsponsor/evmclaimsponsor.go b/claimsponsor/evmclaimsponsor.go new file mode 100644 index 000000000..111a5ebdc --- /dev/null +++ b/claimsponsor/evmclaimsponsor.go @@ -0,0 +1,168 @@ +package claimsponsor + +import ( + "context" + "fmt" + "math/big" + + "github.com/0xPolygon/cdk-contracts-tooling/contracts/etrog/polygonzkevmbridgev2" + "github.com/0xPolygonHermez/zkevm-ethtx-manager/ethtxmanager" + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" +) + +const ( + // LeafTypeAsset represents a bridge asset + LeafTypeAsset uint8 = 0 + // LeafTypeMessage represents a bridge message + LeafTypeMessage uint8 = 1 + gasTooHighErrTemplate = "Claim tx estimated to consume more gas than the maximum allowed by the service. Estimated %d, maximum allowed: %d" +) + +type EthClienter interface { + ethereum.GasEstimator + bind.ContractBackend +} + +type EthTxManager interface { + Remove(ctx context.Context, id common.Hash) error + ResultsByStatus(ctx context.Context, statuses []ethtxmanager.MonitoredTxStatus) ([]ethtxmanager.MonitoredTxResult, error) + Result(ctx context.Context, id common.Hash) (ethtxmanager.MonitoredTxResult, error) + Add(ctx context.Context, to *common.Address, forcedNonce *uint64, value *big.Int, data []byte, gasOffset uint64, sidecar *types.BlobTxSidecar) (common.Hash, error) +} + +type EVMClaimSponsor struct { + *ClaimSponsor + l2Client EthClienter + bridgeABI *abi.ABI + bridgeAddr common.Address + bridgeContract *polygonzkevmbridgev2.Polygonzkevmbridgev2 + ethTxManager EthTxManager + sender common.Address + gasOffest uint64 + maxGas uint64 +} + +func NewEVMClaimSponsor( + dbPath string, + l2Client EthClienter, + bridge common.Address, + sender common.Address, + maxGas, gasOffset uint64, + ethTxManager EthTxManager, +) (*EVMClaimSponsor, error) { + contract, err := polygonzkevmbridgev2.NewPolygonzkevmbridgev2(bridge, l2Client) + if err != nil { + return nil, err + } + abi, err := polygonzkevmbridgev2.Polygonzkevmbridgev2MetaData.GetAbi() + if err != nil { + return nil, err + } + sponsor, err := newClaimSponsor(dbPath) + if err != nil { + return nil, err + } + return &EVMClaimSponsor{ + ClaimSponsor: sponsor, + l2Client: l2Client, + bridgeABI: abi, + bridgeAddr: bridge, + bridgeContract: contract, + sender: sender, + gasOffest: gasOffset, + maxGas: maxGas, + ethTxManager: ethTxManager, + }, nil +} + +func (c *EVMClaimSponsor) checkClaim(ctx context.Context, claim *Claim) error { + data, err := c.buildClaimTxData(claim) + if err != nil { + return err + } + gas, err := c.l2Client.EstimateGas(ctx, ethereum.CallMsg{ + From: c.sender, + To: &c.bridgeAddr, + Data: data, + }) + if err != nil { + return err + } + if gas > c.maxGas { + return fmt.Errorf(gasTooHighErrTemplate, gas, c.maxGas) + } + return nil +} + +func (c *EVMClaimSponsor) sendTx(ctx context.Context, claim *Claim) (string, error) { + data, err := c.buildClaimTxData(claim) + if err != nil { + return "", err + } + id, err := c.ethTxManager.Add(ctx, &c.bridgeAddr, nil, big.NewInt(0), data, c.gasOffest, nil) + if err != nil { + return "", err + } + return id.Hex(), nil +} + +func (c *EVMClaimSponsor) claimStatus(ctx context.Context, id string) (ClaimStatus, error) { + res, err := c.ethTxManager.Result(ctx, common.HexToHash(id)) + if err != nil { + return "", err + } + switch res.Status { + case ethtxmanager.MonitoredTxStatusCreated, + ethtxmanager.MonitoredTxStatusSent: + return WIPStatus, nil + case ethtxmanager.MonitoredTxStatusFailed: + return FailedClaimStatus, nil + case ethtxmanager.MonitoredTxStatusMined, + ethtxmanager.MonitoredTxStatusSafe, + ethtxmanager.MonitoredTxStatusFinalized: + return SuccessClaimStatus, nil + default: + return "", fmt.Errorf("unexpected tx status: %v", res.Status) + } +} + +func (c *EVMClaimSponsor) buildClaimTxData(claim *Claim) ([]byte, error) { + switch claim.LeafType { + case LeafTypeAsset: + return c.bridgeABI.Pack( + "claimAsset", + claim.ProofLocalExitRoot, // bytes32[32] smtProofLocalExitRoot + claim.ProofRollupExitRoot, // bytes32[32] smtProofRollupExitRoot + claim.GlobalIndex, // uint256 globalIndex + claim.MainnetExitRoot, // bytes32 mainnetExitRoot + claim.RollupExitRoot, // bytes32 rollupExitRoot + claim.OriginNetwork, // uint32 originNetwork + claim.OriginTokenAddress, // address originTokenAddress, + claim.DestinationNetwork, // uint32 destinationNetwork + claim.DestinationAddress, // address destinationAddress + claim.Amount, // uint256 amount + claim.Metadata, // bytes metadata + ) + case LeafTypeMessage: + return c.bridgeABI.Pack( + "claimMessage", + claim.ProofLocalExitRoot, // bytes32[32] smtProofLocalExitRoot + claim.ProofRollupExitRoot, // bytes32[32] smtProofRollupExitRoot + claim.GlobalIndex, // uint256 globalIndex + claim.MainnetExitRoot, // bytes32 mainnetExitRoot + claim.RollupExitRoot, // bytes32 rollupExitRoot + claim.OriginNetwork, // uint32 originNetwork + claim.OriginTokenAddress, // address originTokenAddress, + claim.DestinationNetwork, // uint32 destinationNetwork + claim.DestinationAddress, // address destinationAddress + claim.Amount, // uint256 amount + claim.Metadata, // bytes metadata + ) + default: + return nil, fmt.Errorf("unexpected leaf type %d", claim.LeafType) + } +} diff --git a/l1infotreesync/l1infotreesync.go b/l1infotreesync/l1infotreesync.go index 32db87513..0063f6588 100644 --- a/l1infotreesync/l1infotreesync.go +++ b/l1infotreesync/l1infotreesync.go @@ -7,6 +7,7 @@ import ( "github.com/0xPolygon/cdk/config/types" "github.com/0xPolygon/cdk/etherman" "github.com/0xPolygon/cdk/sync" + "github.com/0xPolygon/cdk/tree" "github.com/ethereum/go-ethereum/common" ) @@ -104,10 +105,18 @@ func (s *L1InfoTreeSync) Start(ctx context.Context) { } // GetL1InfoTreeMerkleProof creates a merkle proof for the L1 Info tree -func (s *L1InfoTreeSync) GetL1InfoTreeMerkleProof(ctx context.Context, index uint32) ([]common.Hash, common.Hash, error) { +func (s *L1InfoTreeSync) GetL1InfoTreeMerkleProof(ctx context.Context, index uint32) ([32]common.Hash, common.Hash, error) { return s.processor.GetL1InfoTreeMerkleProof(ctx, index) } +// GetRollupExitTreeMerkleProof creates a merkle proof for the rollup exit tree +func (s *L1InfoTreeSync) GetRollupExitTreeMerkleProof(ctx context.Context, networkID uint32, root common.Hash) ([32]common.Hash, error) { + if networkID == 0 { + return tree.EmptyProof, nil + } + return s.processor.rollupExitTree.GetProof(ctx, networkID-1, root) +} + // GetLatestInfoUntilBlock returns the most recent L1InfoTreeLeaf that occurred before or at blockNum. // If the blockNum has not been processed yet the error ErrBlockNotProcessed will be returned func (s *L1InfoTreeSync) GetLatestInfoUntilBlock(ctx context.Context, blockNum uint64) (*L1InfoTreeLeaf, error) { diff --git a/l1infotreesync/processor.go b/l1infotreesync/processor.go index 1d2588890..1f02038c2 100644 --- a/l1infotreesync/processor.go +++ b/l1infotreesync/processor.go @@ -153,21 +153,21 @@ func newProcessor(ctx context.Context, dbPath string) (*processor, error) { } // GetL1InfoTreeMerkleProof creates a merkle proof for the L1 Info tree -func (p *processor) GetL1InfoTreeMerkleProof(ctx context.Context, index uint32) ([]ethCommon.Hash, ethCommon.Hash, error) { +func (p *processor) GetL1InfoTreeMerkleProof(ctx context.Context, index uint32) ([32]ethCommon.Hash, ethCommon.Hash, error) { tx, err := p.db.BeginRo(ctx) if err != nil { - return nil, ethCommon.Hash{}, err + return [32]ethCommon.Hash{}, ethCommon.Hash{}, err } defer tx.Rollback() root, err := p.l1InfoTree.GetRootByIndex(tx, index) if err != nil { - return nil, ethCommon.Hash{}, err + return [32]ethCommon.Hash{}, ethCommon.Hash{}, err } proof, err := p.l1InfoTree.GetProof(ctx, index, root) if err != nil { - return nil, ethCommon.Hash{}, err + return [32]ethCommon.Hash{}, ethCommon.Hash{}, err } // TODO: check if we need to return root or wat diff --git a/test/helpers/aggoracle_e2e.go b/test/helpers/aggoracle_e2e.go index 27c3027b6..86db8a455 100644 --- a/test/helpers/aggoracle_e2e.go +++ b/test/helpers/aggoracle_e2e.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/0xPolygon/cdk-contracts-tooling/contracts/etrog/polygonzkevmbridgev2" gerContractL1 "github.com/0xPolygon/cdk-contracts-tooling/contracts/manual/globalexitrootnopush0" gerContractEVMChain "github.com/0xPolygon/cdk-contracts-tooling/contracts/manual/pessimisticglobalexitrootnopush0" "github.com/0xPolygon/cdk/aggoracle" @@ -15,6 +16,7 @@ import ( "github.com/0xPolygon/cdk/l1infotreesync" "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/reorgdetector" + "github.com/0xPolygon/cdk/test/contracts/transparentupgradableproxy" "github.com/0xPolygonHermez/zkevm-ethtx-manager/ethtxmanager" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi/bind" @@ -26,42 +28,55 @@ import ( "github.com/stretchr/testify/require" ) +const ( + NetworkIDL2 = uint32(1) +) + type AggoracleWithEVMChainEnv struct { - L1Client *simulated.Backend - L2Client *simulated.Backend - L1InfoTreeSync *l1infotreesync.L1InfoTreeSync - GERL1Contract *gerContractL1.Globalexitrootnopush0 - GERL1Addr common.Address - GERL2Contract *gerContractEVMChain.Pessimisticglobalexitrootnopush0 - GERL2Addr common.Address - AuthL1 *bind.TransactOpts - AuthL2 *bind.TransactOpts - AggOracle *aggoracle.AggOracle - AggOracleSender aggoracle.ChainSender - ReorgDetector *reorgdetector.ReorgDetector + L1Client *simulated.Backend + L2Client *simulated.Backend + L1InfoTreeSync *l1infotreesync.L1InfoTreeSync + GERL1Contract *gerContractL1.Globalexitrootnopush0 + GERL1Addr common.Address + GERL2Contract *gerContractEVMChain.Pessimisticglobalexitrootnopush0 + GERL2Addr common.Address + AuthL1 *bind.TransactOpts + AuthL2 *bind.TransactOpts + AggOracle *aggoracle.AggOracle + AggOracleSender aggoracle.ChainSender + ReorgDetector *reorgdetector.ReorgDetector + BridgeL1Contract *polygonzkevmbridgev2.Polygonzkevmbridgev2 + BridgeL1Addr common.Address + BridgeL2Contract *polygonzkevmbridgev2.Polygonzkevmbridgev2 + BridgeL2Addr common.Address + NetworkIDL2 uint32 } func SetupAggoracleWithEVMChain(t *testing.T) *AggoracleWithEVMChainEnv { ctx := context.Background() - l1Client, syncer, gerL1Contract, gerL1Addr, authL1, rd := CommonSetup(t) - sender, l2Client, gerL2Contract, gerL2Addr, authL2 := EVMSetup(t) + l1Client, syncer, gerL1Contract, gerL1Addr, bridgeL1Contract, bridgeL1Addr, authL1, rd := CommonSetup(t) + sender, l2Client, gerL2Contract, gerL2Addr, bridgeL2Contract, bridgeL2Addr, authL2 := EVMSetup(t) oracle, err := aggoracle.New(sender, l1Client.Client(), syncer, etherman.LatestBlock, time.Millisecond) require.NoError(t, err) go oracle.Start(ctx) return &AggoracleWithEVMChainEnv{ - L1Client: l1Client, - L2Client: l2Client, - L1InfoTreeSync: syncer, - GERL1Contract: gerL1Contract, - GERL1Addr: gerL1Addr, - GERL2Contract: gerL2Contract, - GERL2Addr: gerL2Addr, - AuthL1: authL1, - AuthL2: authL2, - AggOracle: oracle, - AggOracleSender: sender, - ReorgDetector: rd, + L1Client: l1Client, + L2Client: l2Client, + L1InfoTreeSync: syncer, + GERL1Contract: gerL1Contract, + GERL1Addr: gerL1Addr, + GERL2Contract: gerL2Contract, + GERL2Addr: gerL2Addr, + AuthL1: authL1, + AuthL2: authL2, + AggOracle: oracle, + AggOracleSender: sender, + ReorgDetector: rd, + BridgeL1Contract: bridgeL1Contract, + BridgeL1Addr: bridgeL1Addr, + BridgeL2Contract: bridgeL2Contract, + BridgeL2Addr: bridgeL2Addr, } } @@ -70,6 +85,8 @@ func CommonSetup(t *testing.T) ( *l1infotreesync.L1InfoTreeSync, *gerContractL1.Globalexitrootnopush0, common.Address, + *polygonzkevmbridgev2.Polygonzkevmbridgev2, + common.Address, *bind.TransactOpts, *reorgdetector.ReorgDetector, ) { @@ -80,7 +97,7 @@ func CommonSetup(t *testing.T) ( require.NoError(t, err) authL1, err := bind.NewKeyedTransactorWithChainID(privateKeyL1, big.NewInt(1337)) require.NoError(t, err) - l1Client, gerL1Addr, gerL1Contract, err := newSimulatedL1(authL1) + l1Client, gerL1Addr, gerL1Contract, bridgeL1Addr, bridgeL1Contract, err := newSimulatedL1(authL1) require.NoError(t, err) // Reorg detector dbPathReorgDetector := t.TempDir() @@ -92,7 +109,7 @@ func CommonSetup(t *testing.T) ( require.NoError(t, err) go syncer.Start(ctx) - return l1Client, syncer, gerL1Contract, gerL1Addr, authL1, reorg + return l1Client, syncer, gerL1Contract, gerL1Addr, bridgeL1Contract, bridgeL1Addr, authL1, reorg } func EVMSetup(t *testing.T) ( @@ -100,13 +117,15 @@ func EVMSetup(t *testing.T) ( *simulated.Backend, *gerContractEVMChain.Pessimisticglobalexitrootnopush0, common.Address, + *polygonzkevmbridgev2.Polygonzkevmbridgev2, + common.Address, *bind.TransactOpts, ) { privateKeyL2, err := crypto.GenerateKey() require.NoError(t, err) authL2, err := bind.NewKeyedTransactorWithChainID(privateKeyL2, big.NewInt(1337)) require.NoError(t, err) - l2Client, gerL2Addr, gerL2Sc, err := newSimulatedEVMAggSovereignChain(authL2) + l2Client, gerL2Addr, gerL2Sc, bridgeL2Addr, bridgeL2Sc, err := newSimulatedEVMAggSovereignChain(authL2) require.NoError(t, err) ethTxManMock := NewEthTxManagerMock(t) // id, err := c.ethTxMan.Add(ctx, &c.gerAddr, nil, big.NewInt(0), tx.Data(), c.gasOffset, nil) @@ -170,15 +189,23 @@ func EVMSetup(t *testing.T) ( sender, err := chaingersender.NewEVMChainGERSender(gerL2Addr, authL2.From, l2Client.Client(), ethTxManMock, 0, time.Millisecond*50) require.NoError(t, err) - return sender, l2Client, gerL2Sc, gerL2Addr, authL2 + return sender, l2Client, gerL2Sc, gerL2Addr, bridgeL2Sc, bridgeL2Addr, authL2 } func newSimulatedL1(auth *bind.TransactOpts) ( client *simulated.Backend, gerAddr common.Address, gerContract *gerContractL1.Globalexitrootnopush0, + bridgeAddr common.Address, + bridgeContract *polygonzkevmbridgev2.Polygonzkevmbridgev2, err error, ) { + ctx := context.Background() + privateKeyL1, err := crypto.GenerateKey() + if err != nil { + return + } + authDeployer, err := bind.NewKeyedTransactorWithChainID(privateKeyL1, big.NewInt(1337)) balance, _ := new(big.Int).SetString("10000000000000000000000000", 10) //nolint:gomnd address := auth.From genesisAlloc := map[common.Address]types.Account{ @@ -189,9 +216,65 @@ func newSimulatedL1(auth *bind.TransactOpts) ( blockGasLimit := uint64(999999999999999999) //nolint:gomnd client = simulated.NewBackend(genesisAlloc, simulated.WithBlockGasLimit(blockGasLimit)) + bridgeImplementationAddr, _, _, err := polygonzkevmbridgev2.DeployPolygonzkevmbridgev2(authDeployer, client.Client()) + if err != nil { + return + } + client.Commit() + + nonce, err := client.Client().PendingNonceAt(ctx, authDeployer.From) + if err != nil { + return + } + precalculatedAddr := crypto.CreateAddress(authDeployer.From, nonce+1) + bridgeABI, err := polygonzkevmbridgev2.Polygonzkevmbridgev2MetaData.GetAbi() + if err != nil { + return + } + if bridgeABI == nil { + err = errors.New("GetABI returned nil") + return + } + dataCallProxy, err := bridgeABI.Pack("initialize", + uint32(0), // networkIDMainnet + common.Address{}, // gasTokenAddressMainnet" + uint32(0), // gasTokenNetworkMainnet + precalculatedAddr, + common.Address{}, + []byte{}, // gasTokenMetadata + ) + if err != nil { + return + } + bridgeAddr, _, _, err = transparentupgradableproxy.DeployTransparentupgradableproxy( + authDeployer, + client.Client(), + bridgeImplementationAddr, + authDeployer.From, + dataCallProxy, + ) + if err != nil { + return + } + client.Commit() + bridgeContract, err = polygonzkevmbridgev2.NewPolygonzkevmbridgev2(bridgeAddr, client.Client()) + if err != nil { + return + } + checkGERAddr, err := bridgeContract.GlobalExitRootManager(&bind.CallOpts{}) + if err != nil { + return + } + if precalculatedAddr != checkGERAddr { + err = errors.New("error deploying bridge") + } + gerAddr, _, gerContract, err = gerContractL1.DeployGlobalexitrootnopush0(auth, client.Client(), auth.From, auth.From) client.Commit() + if precalculatedAddr != gerAddr { + err = errors.New("error calculating addr") + } return } @@ -199,8 +282,16 @@ func newSimulatedEVMAggSovereignChain(auth *bind.TransactOpts) ( client *simulated.Backend, gerAddr common.Address, gerContract *gerContractEVMChain.Pessimisticglobalexitrootnopush0, + bridgeAddr common.Address, + bridgeContract *polygonzkevmbridgev2.Polygonzkevmbridgev2, err error, ) { + ctx := context.Background() + privateKeyL1, err := crypto.GenerateKey() + if err != nil { + return + } + authDeployer, err := bind.NewKeyedTransactorWithChainID(privateKeyL1, big.NewInt(1337)) balance, _ := new(big.Int).SetString("10000000000000000000000000", 10) //nolint:gomnd address := auth.From genesisAlloc := map[common.Address]types.Account{ @@ -211,6 +302,59 @@ func newSimulatedEVMAggSovereignChain(auth *bind.TransactOpts) ( blockGasLimit := uint64(999999999999999999) //nolint:gomnd client = simulated.NewBackend(genesisAlloc, simulated.WithBlockGasLimit(blockGasLimit)) + bridgeImplementationAddr, _, _, err := polygonzkevmbridgev2.DeployPolygonzkevmbridgev2(authDeployer, client.Client()) + if err != nil { + return + } + client.Commit() + + nonce, err := client.Client().PendingNonceAt(ctx, authDeployer.From) + if err != nil { + return + } + precalculatedAddr := crypto.CreateAddress(authDeployer.From, nonce+1) + bridgeABI, err := polygonzkevmbridgev2.Polygonzkevmbridgev2MetaData.GetAbi() + if err != nil { + return + } + if bridgeABI == nil { + err = errors.New("GetABI returned nil") + return + } + dataCallProxy, err := bridgeABI.Pack("initialize", + NetworkIDL2, + common.Address{}, // gasTokenAddressMainnet" + uint32(0), // gasTokenNetworkMainnet + precalculatedAddr, + common.Address{}, + []byte{}, // gasTokenMetadata + ) + if err != nil { + return + } + bridgeAddr, _, _, err = transparentupgradableproxy.DeployTransparentupgradableproxy( + authDeployer, + client.Client(), + bridgeImplementationAddr, + authDeployer.From, + dataCallProxy, + ) + if err != nil { + return + } + client.Commit() + bridgeContract, err = polygonzkevmbridgev2.NewPolygonzkevmbridgev2(bridgeAddr, client.Client()) + if err != nil { + return + } + checkGERAddr, err := bridgeContract.GlobalExitRootManager(&bind.CallOpts{}) + if err != nil { + return + } + if precalculatedAddr != checkGERAddr { + err = errors.New("error deploying bridge") + } + gerAddr, _, gerContract, err = gerContractEVMChain.DeployPessimisticglobalexitrootnopush0(auth, client.Client(), auth.From) if err != nil { return @@ -224,5 +368,8 @@ func newSimulatedEVMAggSovereignChain(auth *bind.TransactOpts) ( if !hasRole { err = errors.New("failed to set role") } + if precalculatedAddr != gerAddr { + err = errors.New("error calculating addr") + } return } diff --git a/tree/appendonlytree.go b/tree/appendonlytree.go index 376f4c058..d97405132 100644 --- a/tree/appendonlytree.go +++ b/tree/appendonlytree.go @@ -12,7 +12,7 @@ import ( // AppendOnlyTree is a tree where leaves are added sequentially (by index) type AppendOnlyTree struct { *Tree - lastLeftCache []common.Hash + lastLeftCache [defaultHeight]common.Hash lastIndex int64 } @@ -36,8 +36,8 @@ func (t *AppendOnlyTree) AddLeaves(tx kv.RwTx, leaves []Leaf) (func(), error) { } backupIndx := t.lastIndex - backupCache := make([]common.Hash, len(t.lastLeftCache)) - copy(backupCache, t.lastLeftCache) + backupCache := [defaultHeight]common.Hash{} + copy(backupCache[:], t.lastLeftCache[:]) rollback := func() { t.lastIndex = backupIndx t.lastLeftCache = backupCache @@ -62,7 +62,7 @@ func (t *AppendOnlyTree) addLeaf(tx kv.RwTx, leaf Leaf) error { // Calculate new tree nodes currentChildHash := leaf.Hash newNodes := []treeNode{} - for h := uint8(0); h < t.height; h++ { + for h := uint8(0); h < defaultHeight; h++ { var parent treeNode if leaf.Index&(1< 0 { // Add child to the right @@ -152,7 +152,7 @@ func (t *AppendOnlyTree) initLastIndex(tx kv.Tx) (common.Hash, error) { return root, nil } func (t *AppendOnlyTree) initLastLeftCache(tx kv.Tx, lastIndex int64, lastRoot common.Hash) error { - siblings := make([]common.Hash, t.height, t.height) + siblings := [defaultHeight]common.Hash{} if lastIndex == -1 { t.lastLeftCache = siblings return nil @@ -161,7 +161,7 @@ func (t *AppendOnlyTree) initLastLeftCache(tx kv.Tx, lastIndex int64, lastRoot c currentNodeHash := lastRoot // It starts in height-1 because 0 is the level of the leafs - for h := int(t.height - 1); h >= 0; h-- { + for h := int(defaultHeight - 1); h >= 0; h-- { currentNode, err := t.getRHTNode(tx, currentNodeHash) if err != nil { return fmt.Errorf( @@ -172,7 +172,7 @@ func (t *AppendOnlyTree) initLastLeftCache(tx kv.Tx, lastIndex int64, lastRoot c if currentNode == nil { return ErrNotFound } - siblings = append(siblings, currentNode.left) + siblings[h] = currentNode.left if index&(1< 0 { currentNodeHash = currentNode.right } else { diff --git a/tree/tree.go b/tree/tree.go index 30f846db8..ba6a8ea0f 100644 --- a/tree/tree.go +++ b/tree/tree.go @@ -20,6 +20,7 @@ const ( ) var ( + EmptyProof = [32]common.Hash{} ErrNotFound = errors.New("not found") ) @@ -33,7 +34,6 @@ type Tree struct { rhtTable string rootTable string indexTable string - height uint8 zeroHashes []common.Hash } @@ -83,7 +83,6 @@ func newTree(db kv.RwDB, dbPrefix string) *Tree { rootTable: rootTable, indexTable: indexTable, db: db, - height: defaultHeight, zeroHashes: generateZeroHashes(defaultHeight), } @@ -113,21 +112,19 @@ func (t *Tree) getIndexByRoot(tx kv.Tx, root common.Hash) (uint64, error) { } func (t *Tree) getSiblings(tx kv.Tx, index uint32, root common.Hash) ( - siblings []common.Hash, + siblings [32]common.Hash, hasUsedZeroHashes bool, err error, ) { - siblings = make([]common.Hash, int(t.height)) - currentNodeHash := root // It starts in height-1 because 0 is the level of the leafs - for h := int(t.height - 1); h >= 0; h-- { + for h := int(defaultHeight - 1); h >= 0; h-- { var currentNode *treeNode currentNode, err = t.getRHTNode(tx, currentNodeHash) if err != nil { if err == ErrNotFound { hasUsedZeroHashes = true - siblings = append(siblings, t.zeroHashes[h]) + siblings[h] = t.zeroHashes[h] err = nil continue } else { @@ -160,35 +157,30 @@ func (t *Tree) getSiblings(tx kv.Tx, index uint32, root common.Hash) ( * Now, let's do AND operation => 100&100=100 which is higher than 0 so we need the left sibling (O5) */ if index&(1< 0 { - siblings = append(siblings, currentNode.left) + siblings[h] = currentNode.left currentNodeHash = currentNode.right } else { - siblings = append(siblings, currentNode.right) + siblings[h] = currentNode.right currentNodeHash = currentNode.left } } - // Reverse siblings to go from leafs to root - for i, j := 0, len(siblings)-1; i < j; i, j = i+1, j-1 { - siblings[i], siblings[j] = siblings[j], siblings[i] - } - return } // GetProof returns the merkle proof for a given index and root. -func (t *Tree) GetProof(ctx context.Context, index uint32, root common.Hash) ([]common.Hash, error) { +func (t *Tree) GetProof(ctx context.Context, index uint32, root common.Hash) ([defaultHeight]common.Hash, error) { tx, err := t.db.BeginRw(ctx) if err != nil { - return nil, err + return [defaultHeight]common.Hash{}, err } defer tx.Rollback() siblings, isErrNotFound, err := t.getSiblings(tx, index, root) if err != nil { - return nil, err + return [defaultHeight]common.Hash{}, err } if isErrNotFound { - return nil, ErrNotFound + return [defaultHeight]common.Hash{}, ErrNotFound } return siblings, nil } diff --git a/tree/updatabletree.go b/tree/updatabletree.go index ddebd5dfc..48365ee21 100644 --- a/tree/updatabletree.go +++ b/tree/updatabletree.go @@ -29,7 +29,7 @@ func NewUpdatableTree(ctx context.Context, db kv.RwDB, dbPrefix string) (*Updata return nil, err } if rootIndex == -1 { - root = t.zeroHashes[t.height] + root = t.zeroHashes[defaultHeight] } ut := &UpdatableTree{ Tree: t, @@ -70,7 +70,7 @@ func (t *UpdatableTree) upsertLeaf(tx kv.RwTx, leaf Leaf) error { } currentChildHash := leaf.Hash newNodes := []treeNode{} - for h := uint8(0); h < t.height; h++ { + for h := uint8(0); h < defaultHeight; h++ { var parent treeNode if leaf.Index&(1< 0 { // Add child to the right @@ -130,7 +130,7 @@ func (t *UpdatableTree) Reorg(tx kv.RwTx, firstReorgedIndex uint64) (func(), err } // no root found after reorg, going back to empty tree - t.lastRoot = t.zeroHashes[t.height] + t.lastRoot = t.zeroHashes[defaultHeight] return rollback, nil }