From cebea6fb233da8ba7b348c68525df7d7e3068757 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Tue, 18 Jul 2023 02:24:52 +0000 Subject: [PATCH 01/64] add storage keys caching --- arbos/addressSet/addressSet.go | 4 +- arbos/addressTable/addressTable.go | 2 +- arbos/arbosState/arbosstate.go | 32 +++++------ arbos/arbosState/arbosstate_test.go | 2 +- arbos/blockhash/blockhash.go | 2 +- arbos/l1pricing/batchPoster.go | 8 +-- arbos/l1pricing/l1pricing.go | 4 +- arbos/queue_test.go | 2 +- arbos/retryables/retryable.go | 14 ++--- arbos/storage/queue.go | 2 +- arbos/storage/storage.go | 63 ++++++++++++++++++---- util/containers/safe_lru.go | 82 +++++++++++++++++++++++++++++ 12 files changed, 171 insertions(+), 46 deletions(-) create mode 100644 util/containers/safe_lru.go diff --git a/arbos/addressSet/addressSet.go b/arbos/addressSet/addressSet.go index ae2e6a34c1..72200c0373 100644 --- a/arbos/addressSet/addressSet.go +++ b/arbos/addressSet/addressSet.go @@ -24,9 +24,9 @@ func Initialize(sto *storage.Storage) error { func OpenAddressSet(sto *storage.Storage) *AddressSet { return &AddressSet{ - sto, + sto.NoCacheCopy(), sto.OpenStorageBackedUint64(0), - sto.OpenSubStorage([]byte{0}), + sto.OpenSubStorage([]byte{0}, false), } } diff --git a/arbos/addressTable/addressTable.go b/arbos/addressTable/addressTable.go index 220c2700f4..56f04badff 100644 --- a/arbos/addressTable/addressTable.go +++ b/arbos/addressTable/addressTable.go @@ -25,7 +25,7 @@ func Initialize(sto *storage.Storage) { func Open(sto *storage.Storage) *AddressTable { numItems := sto.OpenStorageBackedUint64(0) - return &AddressTable{sto, sto.OpenSubStorage([]byte{}), numItems} + return &AddressTable{sto.NoCacheCopy(), sto.OpenSubStorage([]byte{}, false), numItems} } func (atab *AddressTable) Register(addr common.Address) (uint64, error) { diff --git a/arbos/arbosState/arbosstate.go b/arbos/arbosState/arbosstate.go index 2bea8f7c54..93ec1ec469 100644 --- a/arbos/arbosState/arbosstate.go +++ b/arbos/arbosState/arbosstate.go @@ -71,13 +71,13 @@ func OpenArbosState(stateDB vm.StateDB, burner burn.Burner) (*ArbosState, error) backingStorage.OpenStorageBackedUint64(uint64(upgradeVersionOffset)), backingStorage.OpenStorageBackedUint64(uint64(upgradeTimestampOffset)), backingStorage.OpenStorageBackedAddress(uint64(networkFeeAccountOffset)), - l1pricing.OpenL1PricingState(backingStorage.OpenSubStorage(l1PricingSubspace)), - l2pricing.OpenL2PricingState(backingStorage.OpenSubStorage(l2PricingSubspace)), - retryables.OpenRetryableState(backingStorage.OpenSubStorage(retryablesSubspace), stateDB), - addressTable.Open(backingStorage.OpenSubStorage(addressTableSubspace)), - addressSet.OpenAddressSet(backingStorage.OpenSubStorage(chainOwnerSubspace)), - merkleAccumulator.OpenMerkleAccumulator(backingStorage.OpenSubStorage(sendMerkleSubspace)), - blockhash.OpenBlockhashes(backingStorage.OpenSubStorage(blockhashesSubspace)), + l1pricing.OpenL1PricingState(backingStorage.OpenSubStorage(l1PricingSubspace, true)), + l2pricing.OpenL2PricingState(backingStorage.OpenSubStorage(l2PricingSubspace, true)), + retryables.OpenRetryableState(backingStorage.OpenSubStorage(retryablesSubspace, true), stateDB), + addressTable.Open(backingStorage.OpenSubStorage(addressTableSubspace, true)), + addressSet.OpenAddressSet(backingStorage.OpenSubStorage(chainOwnerSubspace, true)), + merkleAccumulator.OpenMerkleAccumulator(backingStorage.OpenSubStorage(sendMerkleSubspace, true)), + blockhash.OpenBlockhashes(backingStorage.OpenSubStorage(blockhashesSubspace, true)), backingStorage.OpenStorageBackedBigInt(uint64(chainIdOffset)), backingStorage.OpenStorageBackedBytes(chainConfigSubspace), backingStorage.OpenStorageBackedUint64(uint64(genesisBlockNumOffset)), @@ -220,14 +220,14 @@ func InitializeArbosState(stateDB vm.StateDB, burner burn.Burner, chainConfig *p if desiredArbosVersion >= 2 { initialRewardsRecipient = initialChainOwner } - _ = l1pricing.InitializeL1PricingState(sto.OpenSubStorage(l1PricingSubspace), initialRewardsRecipient, initMessage.InitialL1BaseFee) - _ = l2pricing.InitializeL2PricingState(sto.OpenSubStorage(l2PricingSubspace)) - _ = retryables.InitializeRetryableState(sto.OpenSubStorage(retryablesSubspace)) - addressTable.Initialize(sto.OpenSubStorage(addressTableSubspace)) - merkleAccumulator.InitializeMerkleAccumulator(sto.OpenSubStorage(sendMerkleSubspace)) - blockhash.InitializeBlockhashes(sto.OpenSubStorage(blockhashesSubspace)) - - ownersStorage := sto.OpenSubStorage(chainOwnerSubspace) + _ = l1pricing.InitializeL1PricingState(sto.OpenSubStorage(l1PricingSubspace, true), initialRewardsRecipient, initMessage.InitialL1BaseFee) + _ = l2pricing.InitializeL2PricingState(sto.OpenSubStorage(l2PricingSubspace, true)) + _ = retryables.InitializeRetryableState(sto.OpenSubStorage(retryablesSubspace, true)) + addressTable.Initialize(sto.OpenSubStorage(addressTableSubspace, true)) + merkleAccumulator.InitializeMerkleAccumulator(sto.OpenSubStorage(sendMerkleSubspace, true)) + blockhash.InitializeBlockhashes(sto.OpenSubStorage(blockhashesSubspace, true)) + + ownersStorage := sto.OpenSubStorage(chainOwnerSubspace, true) _ = addressSet.Initialize(ownersStorage) _ = addressSet.OpenAddressSet(ownersStorage).Add(initialChainOwner) @@ -385,7 +385,7 @@ func (state *ArbosState) ChainOwners() *addressSet.AddressSet { func (state *ArbosState) SendMerkleAccumulator() *merkleAccumulator.MerkleAccumulator { if state.sendMerkle == nil { - state.sendMerkle = merkleAccumulator.OpenMerkleAccumulator(state.backingStorage.OpenSubStorage(sendMerkleSubspace)) + state.sendMerkle = merkleAccumulator.OpenMerkleAccumulator(state.backingStorage.OpenSubStorage(sendMerkleSubspace, true)) } return state.sendMerkle } diff --git a/arbos/arbosState/arbosstate_test.go b/arbos/arbosState/arbosstate_test.go index c4643c9183..384fc9c72f 100644 --- a/arbos/arbosState/arbosstate_test.go +++ b/arbos/arbosState/arbosstate_test.go @@ -64,7 +64,7 @@ func TestStorageBackedInt64(t *testing.T) { func TestStorageSlots(t *testing.T) { state, _ := NewArbosMemoryBackedArbOSState() - sto := state.BackingStorage().OpenSubStorage([]byte{}) + sto := state.BackingStorage().OpenSubStorage([]byte{}, true) println("nil address", colors.Blue, storage.NilAddressRepresentation.String(), colors.Clear) diff --git a/arbos/blockhash/blockhash.go b/arbos/blockhash/blockhash.go index 2eedf7f5bb..99fb3ef470 100644 --- a/arbos/blockhash/blockhash.go +++ b/arbos/blockhash/blockhash.go @@ -21,7 +21,7 @@ func InitializeBlockhashes(backingStorage *storage.Storage) { } func OpenBlockhashes(backingStorage *storage.Storage) *Blockhashes { - return &Blockhashes{backingStorage, backingStorage.OpenStorageBackedUint64(0)} + return &Blockhashes{backingStorage.NoCacheCopy(), backingStorage.OpenStorageBackedUint64(0)} } func (bh *Blockhashes) L1BlockNumber() (uint64, error) { diff --git a/arbos/l1pricing/batchPoster.go b/arbos/l1pricing/batchPoster.go index 97b7b16234..4ac86307ec 100644 --- a/arbos/l1pricing/batchPoster.go +++ b/arbos/l1pricing/batchPoster.go @@ -42,13 +42,13 @@ func InitializeBatchPostersTable(storage *storage.Storage) error { if err := totalFundsDue.SetChecked(common.Big0); err != nil { return err } - return addressSet.Initialize(storage.OpenSubStorage(PosterAddrsKey)) + return addressSet.Initialize(storage.OpenSubStorage(PosterAddrsKey, true)) } func OpenBatchPostersTable(storage *storage.Storage) *BatchPostersTable { return &BatchPostersTable{ - posterAddrs: addressSet.OpenAddressSet(storage.OpenSubStorage(PosterAddrsKey)), - posterInfo: storage.OpenSubStorage(PosterInfoKey), + posterAddrs: addressSet.OpenAddressSet(storage.OpenSubStorage(PosterAddrsKey, true)), + posterInfo: storage.OpenSubStorage(PosterInfoKey, false), totalFundsDue: storage.OpenStorageBackedBigInt(totalFundsDueOffset), } } @@ -68,7 +68,7 @@ func (bpt *BatchPostersTable) OpenPoster(poster common.Address, createIfNotExist } func (bpt *BatchPostersTable) internalOpen(poster common.Address) *BatchPosterState { - bpStorage := bpt.posterInfo.OpenSubStorage(poster.Bytes()) + bpStorage := bpt.posterInfo.OpenSubStorage(poster.Bytes(), false) return &BatchPosterState{ fundsDue: bpStorage.OpenStorageBackedBigInt(0), payTo: bpStorage.OpenStorageBackedAddress(1), diff --git a/arbos/l1pricing/l1pricing.go b/arbos/l1pricing/l1pricing.go index 9772ac028b..0d6bca7bf9 100644 --- a/arbos/l1pricing/l1pricing.go +++ b/arbos/l1pricing/l1pricing.go @@ -82,7 +82,7 @@ var InitialEquilibrationUnitsV0 = arbmath.UintToBig(60 * params.TxDataNonZeroGas var InitialEquilibrationUnitsV6 = arbmath.UintToBig(params.TxDataNonZeroGasEIP2028 * 10000000) func InitializeL1PricingState(sto *storage.Storage, initialRewardsRecipient common.Address, initialL1BaseFee *big.Int) error { - bptStorage := sto.OpenSubStorage(BatchPosterTableKey) + bptStorage := sto.OpenSubStorage(BatchPosterTableKey, true) if err := InitializeBatchPostersTable(bptStorage); err != nil { return err } @@ -117,7 +117,7 @@ func InitializeL1PricingState(sto *storage.Storage, initialRewardsRecipient comm func OpenL1PricingState(sto *storage.Storage) *L1PricingState { return &L1PricingState{ sto, - OpenBatchPostersTable(sto.OpenSubStorage(BatchPosterTableKey)), + OpenBatchPostersTable(sto.OpenSubStorage(BatchPosterTableKey, true)), sto.OpenStorageBackedAddress(payRewardsToOffset), sto.OpenStorageBackedBigUint(equilibrationUnitsOffset), sto.OpenStorageBackedUint64(inertiaOffset), diff --git a/arbos/queue_test.go b/arbos/queue_test.go index d8d491bdb0..abeec49a93 100644 --- a/arbos/queue_test.go +++ b/arbos/queue_test.go @@ -14,7 +14,7 @@ import ( func TestQueue(t *testing.T) { state, statedb := arbosState.NewArbosMemoryBackedArbOSState() - sto := state.BackingStorage().OpenSubStorage([]byte{}) + sto := state.BackingStorage().OpenSubStorage([]byte{}, true) Require(t, storage.InitializeQueue(sto)) q := storage.OpenQueue(sto) diff --git a/arbos/retryables/retryable.go b/arbos/retryables/retryable.go index abea2ab7bd..0322938541 100644 --- a/arbos/retryables/retryable.go +++ b/arbos/retryables/retryable.go @@ -31,13 +31,13 @@ var ( ) func InitializeRetryableState(sto *storage.Storage) error { - return storage.InitializeQueue(sto.OpenSubStorage(timeoutQueueKey)) + return storage.InitializeQueue(sto.OpenSubStorage(timeoutQueueKey, true)) } func OpenRetryableState(sto *storage.Storage, statedb vm.StateDB) *RetryableState { return &RetryableState{ sto, - storage.OpenQueue(sto.OpenSubStorage(timeoutQueueKey)), + storage.OpenQueue(sto.OpenSubStorage(timeoutQueueKey, true)), } } @@ -73,7 +73,7 @@ func (rs *RetryableState) CreateRetryable( beneficiary common.Address, calldata []byte, ) (*Retryable, error) { - sto := rs.retryables.OpenSubStorage(id.Bytes()) + sto := rs.retryables.OpenSubStorage(id.Bytes(), false) ret := &Retryable{ id, sto, @@ -100,7 +100,7 @@ func (rs *RetryableState) CreateRetryable( } func (rs *RetryableState) OpenRetryable(id common.Hash, currentTimestamp uint64) (*Retryable, error) { - sto := rs.retryables.OpenSubStorage(id.Bytes()) + sto := rs.retryables.OpenSubStorage(id.Bytes(), false) timeoutStorage := sto.OpenStorageBackedUint64(timeoutOffset) timeout, err := timeoutStorage.Get() if timeout == 0 || timeout < currentTimestamp || err != nil { @@ -134,7 +134,7 @@ func (rs *RetryableState) RetryableSizeBytes(id common.Hash, currentTime uint64) } func (rs *RetryableState) DeleteRetryable(id common.Hash, evm *vm.EVM, scenario util.TracingScenario) (bool, error) { - retStorage := rs.retryables.OpenSubStorage(id.Bytes()) + retStorage := rs.retryables.OpenSubStorage(id.Bytes(), false) timeout, err := retStorage.GetByUint64(timeoutOffset) if timeout == (common.Hash{}) || err != nil { return false, err @@ -157,7 +157,7 @@ func (rs *RetryableState) DeleteRetryable(id common.Hash, evm *vm.EVM, scenario _ = retStorage.ClearByUint64(beneficiaryOffset) _ = retStorage.ClearByUint64(timeoutOffset) _ = retStorage.ClearByUint64(timeoutWindowsLeftOffset) - err = retStorage.OpenSubStorage(calldataKey).ClearBytes() + err = retStorage.OpenSubStorage(calldataKey, false).ClearBytes() return true, err } @@ -291,7 +291,7 @@ func (rs *RetryableState) TryToReapOneRetryable(currentTimestamp uint64, evm *vm if err != nil || id == nil { return err } - retryableStorage := rs.retryables.OpenSubStorage(id.Bytes()) + retryableStorage := rs.retryables.OpenSubStorage(id.Bytes(), false) timeoutStorage := retryableStorage.OpenStorageBackedUint64(timeoutOffset) timeout, err := timeoutStorage.Get() if err != nil { diff --git a/arbos/storage/queue.go b/arbos/storage/queue.go index 55231d3a90..032ac11aad 100644 --- a/arbos/storage/queue.go +++ b/arbos/storage/queue.go @@ -25,7 +25,7 @@ func InitializeQueue(sto *Storage) error { func OpenQueue(sto *Storage) *Queue { return &Queue{ - sto, + sto.NoCacheCopy(), sto.OpenStorageBackedUint64(0), sto.OpenStorageBackedUint64(1), } diff --git a/arbos/storage/storage.go b/arbos/storage/storage.go index 478ad68f8f..b93c835ff0 100644 --- a/arbos/storage/storage.go +++ b/arbos/storage/storage.go @@ -4,6 +4,7 @@ package storage import ( + "bytes" "fmt" "math/big" @@ -17,6 +18,7 @@ import ( "github.com/offchainlabs/nitro/arbos/burn" "github.com/offchainlabs/nitro/arbos/util" "github.com/offchainlabs/nitro/util/arbmath" + "github.com/offchainlabs/nitro/util/containers" ) // Storage allows ArbOS to store data persistently in the Ethereum-compatible stateDB. This is represented in @@ -43,12 +45,18 @@ type Storage struct { db vm.StateDB storageKey []byte burner burn.Burner + hashCache *containers.SafeLruCache[string, []byte] } const StorageReadCost = params.SloadGasEIP2200 const StorageWriteCost = params.SstoreSetGasEIP2200 const StorageWriteZeroCost = params.SstoreResetGasEIP2200 +const storageKeyCacheSize = 1024 + +// TODO(magic) rename? +var storageHashCache = containers.NewSafeLruCache[string, []byte](storageKeyCacheSize) + // NewGeth uses a Geth database to create an evm key-value store func NewGeth(statedb vm.StateDB, burner burn.Burner) *Storage { account := common.HexToAddress("0xA4B05FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF") @@ -58,6 +66,7 @@ func NewGeth(statedb vm.StateDB, burner burn.Burner) *Storage { db: statedb, storageKey: []byte{}, burner: burner, + hashCache: storageHashCache, } } @@ -81,15 +90,16 @@ func NewMemoryBackedStateDB() vm.StateDB { // a page, to preserve contiguity within a page. This will reduce cost if/when Ethereum switches to storage // representations that reward contiguity. // Because page numbers are 248 bits, this gives us 124-bit security against collision attacks, which is good enough. -func mapAddress(storageKey []byte, key common.Hash) common.Hash { +func (store *Storage) mapAddress(storageKey []byte, key common.Hash) common.Hash { keyBytes := key.Bytes() boundary := common.HashLength - 1 - return common.BytesToHash( + mapped := common.BytesToHash( append( - crypto.Keccak256(storageKey, keyBytes[:boundary])[:boundary], + store.cachedKeccak(storageKey, keyBytes[:boundary])[:boundary], keyBytes[boundary], ), ) + return mapped } func writeCost(value common.Hash) uint64 { @@ -111,11 +121,11 @@ func (store *Storage) Get(key common.Hash) (common.Hash, error) { if info := store.burner.TracingInfo(); info != nil { info.RecordStorageGet(key) } - return store.db.GetState(store.account, mapAddress(store.storageKey, key)), nil + return store.db.GetState(store.account, store.mapAddress(store.storageKey, key)), nil } func (store *Storage) GetStorageSlot(key common.Hash) common.Hash { - return mapAddress(store.storageKey, key) + return store.mapAddress(store.storageKey, key) } func (store *Storage) GetUint64(key common.Hash) (uint64, error) { @@ -143,7 +153,7 @@ func (store *Storage) Set(key common.Hash, value common.Hash) error { if info := store.burner.TracingInfo(); info != nil { info.RecordStorageSet(key, value) } - store.db.SetState(store.account, mapAddress(store.storageKey, key), value) + store.db.SetState(store.account, store.mapAddress(store.storageKey, key), value) return nil } @@ -171,12 +181,27 @@ func (store *Storage) Swap(key common.Hash, newValue common.Hash) (common.Hash, return oldValue, store.Set(key, newValue) } -func (store *Storage) OpenSubStorage(id []byte) *Storage { +func (store *Storage) OpenSubStorage(id []byte, cacheKeys bool) *Storage { + var hashCache *containers.SafeLruCache[string, []byte] + if cacheKeys { + hashCache = storageHashCache + } return &Storage{ store.account, store.db, - crypto.Keccak256(store.storageKey, id), + store.cachedKeccak(store.storageKey, id), store.burner, + hashCache, + } +} + +func (store *Storage) NoCacheCopy() *Storage { + return &Storage{ + store.account, + store.db, + store.storageKey, + store.burner, + nil, } } @@ -266,6 +291,24 @@ func (store *Storage) Keccak(data ...[]byte) ([]byte, error) { return crypto.Keccak256(data...), nil } +func (store *Storage) cachedKeccak(data ...[]byte) []byte { + if store.hashCache == nil { + return crypto.Keccak256(data...) + } + keyString := string(bytes.Join(data, []byte{})) + hash, isCached := store.hashCache.Get(keyString) + if isCached { + return hash + } + // TODO(magic) we might miss the warning if concurrent Add will be before + if store.hashCache.Size()-store.hashCache.Len() == 1 { + log.Warn("Hash cache almost full, but we didn't expect that. We may be caching some non-static keys.") + } + hash = crypto.Keccak256(data...) + store.hashCache.Add(keyString, hash) + return hash +} + func (store *Storage) KeccakHash(data ...[]byte) (common.Hash, error) { bytes, err := store.Keccak(data...) return common.BytesToHash(bytes), err @@ -279,7 +322,7 @@ type StorageSlot struct { } func (store *Storage) NewSlot(offset uint64) StorageSlot { - return StorageSlot{store.account, store.db, mapAddress(store.storageKey, util.UintToHash(offset)), store.burner} + return StorageSlot{store.account, store.db, store.mapAddress(store.storageKey, util.UintToHash(offset)), store.burner} } func (ss *StorageSlot) Get() (common.Hash, error) { @@ -590,7 +633,7 @@ type StorageBackedBytes struct { func (store *Storage) OpenStorageBackedBytes(id []byte) StorageBackedBytes { return StorageBackedBytes{ - *store.OpenSubStorage(id), + *store.OpenSubStorage(id, false), } } diff --git a/util/containers/safe_lru.go b/util/containers/safe_lru.go new file mode 100644 index 0000000000..40e7d993ec --- /dev/null +++ b/util/containers/safe_lru.go @@ -0,0 +1,82 @@ +package containers + +import ( + "sync" +) + +// thread safe version of containers.LruCache +type SafeLruCache[K comparable, V any] struct { + inner *LruCache[K, V] + mutex sync.RWMutex +} + +func NewSafeLruCache[K comparable, V any](size int) *SafeLruCache[K, V] { + return NewSafeLruCacheWithOnEvict[K, V](size, nil) +} + +func NewSafeLruCacheWithOnEvict[K comparable, V any](size int, onEvict func(K, V)) *SafeLruCache[K, V] { + return &SafeLruCache[K, V]{ + inner: NewLruCacheWithOnEvict(size, onEvict), + } +} + +// Returns true if an item was evicted +func (c *SafeLruCache[K, V]) Add(key K, value V) bool { + c.mutex.Lock() + defer c.mutex.Unlock() + return c.inner.Add(key, value) +} + +func (c *SafeLruCache[K, V]) Get(key K) (V, bool) { + c.mutex.Lock() + defer c.mutex.Unlock() + return c.inner.Get(key) +} + +func (c *SafeLruCache[K, V]) Contains(key K) bool { + c.mutex.RLock() + defer c.mutex.RUnlock() + return c.inner.Contains(key) +} + +func (c *SafeLruCache[K, V]) Remove(key K) { + c.mutex.Lock() + defer c.mutex.Unlock() + c.inner.Remove(key) +} + +func (c *SafeLruCache[K, V]) GetOldest() (K, V, bool) { + c.mutex.RLock() + defer c.mutex.RUnlock() + return c.inner.GetOldest() +} + +func (c *SafeLruCache[K, V]) RemoveOldest() { + c.mutex.Lock() + defer c.mutex.Unlock() + c.inner.RemoveOldest() +} + +func (c *SafeLruCache[K, V]) Len() int { + c.mutex.RLock() + defer c.mutex.RUnlock() + return c.inner.Len() +} + +func (c *SafeLruCache[K, V]) Size() int { + c.mutex.RLock() + defer c.mutex.RUnlock() + return c.inner.Size() +} + +func (c *SafeLruCache[K, V]) Clear() { + c.mutex.Lock() + defer c.mutex.Unlock() + c.inner.Clear() +} + +func (c *SafeLruCache[K, V]) Resize(newSize int) { + c.mutex.Lock() + defer c.mutex.Unlock() + c.inner.Resize(newSize) +} From 691a213eb0127a20d32114dfcd73a652a84c2ea4 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Tue, 18 Jul 2023 16:42:33 +0000 Subject: [PATCH 02/64] add onetime warning when hash cache unexpectedly fills up --- arbos/storage/storage.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/arbos/storage/storage.go b/arbos/storage/storage.go index b93c835ff0..39af072f0a 100644 --- a/arbos/storage/storage.go +++ b/arbos/storage/storage.go @@ -7,6 +7,7 @@ import ( "bytes" "fmt" "math/big" + "sync/atomic" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" @@ -54,8 +55,8 @@ const StorageWriteZeroCost = params.SstoreResetGasEIP2200 const storageKeyCacheSize = 1024 -// TODO(magic) rename? var storageHashCache = containers.NewSafeLruCache[string, []byte](storageKeyCacheSize) +var cacheFullLogged atomic.Bool // NewGeth uses a Geth database to create an evm key-value store func NewGeth(statedb vm.StateDB, burner burn.Burner) *Storage { @@ -300,12 +301,11 @@ func (store *Storage) cachedKeccak(data ...[]byte) []byte { if isCached { return hash } - // TODO(magic) we might miss the warning if concurrent Add will be before - if store.hashCache.Size()-store.hashCache.Len() == 1 { - log.Warn("Hash cache almost full, but we didn't expect that. We may be caching some non-static keys.") - } hash = crypto.Keccak256(data...) - store.hashCache.Add(keyString, hash) + evicted := store.hashCache.Add(keyString, hash) + if evicted && cacheFullLogged.CompareAndSwap(false, true) { + log.Warn("Hash cache full, we didn't expect that. Some non-static storage keys may fill up the cache.") + } return hash } From f802560c966b27eeba1b68a51585e1238ecb5787 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Tue, 18 Jul 2023 18:44:13 +0000 Subject: [PATCH 03/64] fix race in mapAddress --- arbos/storage/storage.go | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/arbos/storage/storage.go b/arbos/storage/storage.go index 39af072f0a..f234e69d5a 100644 --- a/arbos/storage/storage.go +++ b/arbos/storage/storage.go @@ -94,13 +94,10 @@ func NewMemoryBackedStateDB() vm.StateDB { func (store *Storage) mapAddress(storageKey []byte, key common.Hash) common.Hash { keyBytes := key.Bytes() boundary := common.HashLength - 1 - mapped := common.BytesToHash( - append( - store.cachedKeccak(storageKey, keyBytes[:boundary])[:boundary], - keyBytes[boundary], - ), - ) - return mapped + mapped := make([]byte, 0, common.HashLength) + mapped = append(mapped, store.cachedKeccak(storageKey, keyBytes[:boundary])[:boundary]...) + mapped = append(mapped, keyBytes[boundary]) + return common.BytesToHash(mapped) } func writeCost(value common.Hash) uint64 { @@ -292,13 +289,14 @@ func (store *Storage) Keccak(data ...[]byte) ([]byte, error) { return crypto.Keccak256(data...), nil } +// note: returned slice is not thread-safe func (store *Storage) cachedKeccak(data ...[]byte) []byte { if store.hashCache == nil { return crypto.Keccak256(data...) } keyString := string(bytes.Join(data, []byte{})) - hash, isCached := store.hashCache.Get(keyString) - if isCached { + hash, wasCached := store.hashCache.Get(keyString) + if wasCached { return hash } hash = crypto.Keccak256(data...) From 0fd07c6dbbee64e8c80f45125aec670c52ce7114 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Tue, 18 Jul 2023 19:04:26 +0000 Subject: [PATCH 04/64] remove redundant mapAddress parameter --- arbos/storage/storage.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/arbos/storage/storage.go b/arbos/storage/storage.go index f234e69d5a..66f3d49473 100644 --- a/arbos/storage/storage.go +++ b/arbos/storage/storage.go @@ -91,11 +91,11 @@ func NewMemoryBackedStateDB() vm.StateDB { // a page, to preserve contiguity within a page. This will reduce cost if/when Ethereum switches to storage // representations that reward contiguity. // Because page numbers are 248 bits, this gives us 124-bit security against collision attacks, which is good enough. -func (store *Storage) mapAddress(storageKey []byte, key common.Hash) common.Hash { +func (store *Storage) mapAddress(key common.Hash) common.Hash { keyBytes := key.Bytes() boundary := common.HashLength - 1 mapped := make([]byte, 0, common.HashLength) - mapped = append(mapped, store.cachedKeccak(storageKey, keyBytes[:boundary])[:boundary]...) + mapped = append(mapped, store.cachedKeccak(store.storageKey, keyBytes[:boundary])[:boundary]...) mapped = append(mapped, keyBytes[boundary]) return common.BytesToHash(mapped) } @@ -119,11 +119,11 @@ func (store *Storage) Get(key common.Hash) (common.Hash, error) { if info := store.burner.TracingInfo(); info != nil { info.RecordStorageGet(key) } - return store.db.GetState(store.account, store.mapAddress(store.storageKey, key)), nil + return store.db.GetState(store.account, store.mapAddress(key)), nil } func (store *Storage) GetStorageSlot(key common.Hash) common.Hash { - return store.mapAddress(store.storageKey, key) + return store.mapAddress(key) } func (store *Storage) GetUint64(key common.Hash) (uint64, error) { @@ -151,7 +151,7 @@ func (store *Storage) Set(key common.Hash, value common.Hash) error { if info := store.burner.TracingInfo(); info != nil { info.RecordStorageSet(key, value) } - store.db.SetState(store.account, store.mapAddress(store.storageKey, key), value) + store.db.SetState(store.account, store.mapAddress(key), value) return nil } @@ -320,7 +320,7 @@ type StorageSlot struct { } func (store *Storage) NewSlot(offset uint64) StorageSlot { - return StorageSlot{store.account, store.db, store.mapAddress(store.storageKey, util.UintToHash(offset)), store.burner} + return StorageSlot{store.account, store.db, store.mapAddress(util.UintToHash(offset)), store.burner} } func (ss *StorageSlot) Get() (common.Hash, error) { From 48c6a705cd529763af17e91becb056aebe567e63 Mon Sep 17 00:00:00 2001 From: Goran Vladika Date: Fri, 22 Sep 2023 19:19:39 +0200 Subject: [PATCH 05/64] Add nativeToken flag to deployer and update createRollup call --- .gitmodules | 2 +- Dockerfile | 2 + arbnode/node.go | 77 +++++++++++++------------------------ cmd/chaininfo/chain_info.go | 1 + cmd/deploy/deploy.go | 3 ++ system_tests/common_test.go | 1 + 6 files changed, 35 insertions(+), 51 deletions(-) diff --git a/.gitmodules b/.gitmodules index 7c78791c78..444199c4ed 100644 --- a/.gitmodules +++ b/.gitmodules @@ -13,7 +13,7 @@ [submodule "contracts"] path = contracts url = https://github.com/OffchainLabs/nitro-contracts.git - branch = develop + branch = feature-orbit-bridge [submodule "arbitrator/wasm-testsuite/testsuite"] path = arbitrator/wasm-testsuite/testsuite url = https://github.com/WebAssembly/testsuite.git diff --git a/Dockerfile b/Dockerfile index c1a28760c4..88e239356c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -76,6 +76,7 @@ COPY ./fastcache ./fastcache COPY ./go-ethereum ./go-ethereum COPY --from=brotli-wasm-export / target/ COPY --from=contracts-builder workspace/contracts/build/contracts/src/precompiles/ contracts/build/contracts/src/precompiles/ +COPY --from=contracts-builder workspace/contracts/node_modules/@offchainlabs/upgrade-executor/build/contracts/src/UpgradeExecutor.sol/UpgradeExecutor.json contracts/ COPY --from=contracts-builder workspace/.make/ .make/ RUN PATH="$PATH:/usr/local/go/bin" NITRO_BUILD_IGNORE_TIMESTAMPS=1 make build-wasm-bin @@ -179,6 +180,7 @@ COPY fastcache/go.mod fastcache/go.sum fastcache/ RUN go mod download COPY . ./ COPY --from=contracts-builder workspace/contracts/build/ contracts/build/ +COPY --from=contracts-builder workspace/contracts/node_modules/@offchainlabs/upgrade-executor/build/contracts/src/UpgradeExecutor.sol/UpgradeExecutor.json contracts/node_modules/@offchainlabs/upgrade-executor/build/contracts/src/UpgradeExecutor.sol/ COPY --from=contracts-builder workspace/.make/ .make/ COPY --from=prover-header-export / target/ COPY --from=brotli-library-export / target/ diff --git a/arbnode/node.go b/arbnode/node.go index 5bdc716264..0509f17bb1 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -42,6 +42,7 @@ import ( "github.com/offchainlabs/nitro/solgen/go/ospgen" "github.com/offchainlabs/nitro/solgen/go/precompilesgen" "github.com/offchainlabs/nitro/solgen/go/rollupgen" + "github.com/offchainlabs/nitro/solgen/go/upgrade_executorgen" "github.com/offchainlabs/nitro/staker" "github.com/offchainlabs/nitro/util/contracts" "github.com/offchainlabs/nitro/util/headerreader" @@ -61,53 +62,6 @@ func andTxSucceeded(ctx context.Context, l1Reader *headerreader.HeaderReader, tx return nil } -func deployBridgeCreator(ctx context.Context, l1Reader *headerreader.HeaderReader, auth *bind.TransactOpts) (common.Address, error) { - client := l1Reader.Client() - bridgeTemplate, tx, _, err := bridgegen.DeployBridge(auth, client) - err = andTxSucceeded(ctx, l1Reader, tx, err) - if err != nil { - return common.Address{}, fmt.Errorf("bridge deploy error: %w", err) - } - - seqInboxTemplate, tx, _, err := bridgegen.DeploySequencerInbox(auth, client) - err = andTxSucceeded(ctx, l1Reader, tx, err) - if err != nil { - return common.Address{}, fmt.Errorf("sequencer inbox deploy error: %w", err) - } - - inboxTemplate, tx, _, err := bridgegen.DeployInbox(auth, client) - err = andTxSucceeded(ctx, l1Reader, tx, err) - if err != nil { - return common.Address{}, fmt.Errorf("inbox deploy error: %w", err) - } - - rollupEventBridgeTemplate, tx, _, err := rollupgen.DeployRollupEventInbox(auth, client) - err = andTxSucceeded(ctx, l1Reader, tx, err) - if err != nil { - return common.Address{}, fmt.Errorf("rollup event bridge deploy error: %w", err) - } - - outboxTemplate, tx, _, err := bridgegen.DeployOutbox(auth, client) - err = andTxSucceeded(ctx, l1Reader, tx, err) - if err != nil { - return common.Address{}, fmt.Errorf("outbox deploy error: %w", err) - } - - bridgeCreatorAddr, tx, bridgeCreator, err := rollupgen.DeployBridgeCreator(auth, client) - err = andTxSucceeded(ctx, l1Reader, tx, err) - if err != nil { - return common.Address{}, fmt.Errorf("bridge creator deploy error: %w", err) - } - - tx, err = bridgeCreator.UpdateTemplates(auth, bridgeTemplate, seqInboxTemplate, inboxTemplate, rollupEventBridgeTemplate, outboxTemplate) - err = andTxSucceeded(ctx, l1Reader, tx, err) - if err != nil { - return common.Address{}, fmt.Errorf("bridge creator update templates error: %w", err) - } - - return bridgeCreatorAddr, nil -} - func deployChallengeFactory(ctx context.Context, l1Reader *headerreader.HeaderReader, auth *bind.TransactOpts) (common.Address, common.Address, error) { client := l1Reader.Client() osp0, tx, _, err := ospgen.DeployOneStepProver0(auth, client) @@ -150,9 +104,12 @@ func deployChallengeFactory(ctx context.Context, l1Reader *headerreader.HeaderRe } func deployRollupCreator(ctx context.Context, l1Reader *headerreader.HeaderReader, auth *bind.TransactOpts) (*rollupgen.RollupCreator, common.Address, common.Address, common.Address, error) { - bridgeCreator, err := deployBridgeCreator(ctx, l1Reader, auth) + fmt.Println("Deploying bridge creator...") + auth.GasLimit = uint64(14183487) + bridgeCreator, tx, _, err := rollupgen.DeployBridgeCreator(auth, l1Reader.Client()) + err = andTxSucceeded(ctx, l1Reader, tx, err) if err != nil { - return nil, common.Address{}, common.Address{}, common.Address{}, err + return nil, common.Address{}, common.Address{}, common.Address{}, fmt.Errorf("bridge creator deploy error: %w", err) } ospEntryAddr, challengeManagerAddr, err := deployChallengeFactory(ctx, l1Reader, auth) @@ -178,6 +135,12 @@ func deployRollupCreator(ctx context.Context, l1Reader *headerreader.HeaderReade return nil, common.Address{}, common.Address{}, common.Address{}, fmt.Errorf("rollup creator deploy error: %w", err) } + upgradeExecutor, tx, _, err := upgrade_executorgen.DeployUpgradeExecutor(auth, l1Reader.Client()) + err = andTxSucceeded(ctx, l1Reader, tx, err) + if err != nil { + return nil, common.Address{}, common.Address{}, common.Address{}, fmt.Errorf("upgrade executor deploy error: %w", err) + } + validatorUtils, tx, _, err := rollupgen.DeployValidatorUtils(auth, l1Reader.Client()) err = andTxSucceeded(ctx, l1Reader, tx, err) if err != nil { @@ -190,6 +153,13 @@ func deployRollupCreator(ctx context.Context, l1Reader *headerreader.HeaderReade return nil, common.Address{}, common.Address{}, common.Address{}, fmt.Errorf("validator wallet creator deploy error: %w", err) } + l2FactoriesDeployHelper, tx, _, err := rollupgen.DeployDeployHelper(auth, l1Reader.Client()) + err = andTxSucceeded(ctx, l1Reader, tx, err) + if err != nil { + return nil, common.Address{}, common.Address{}, common.Address{}, fmt.Errorf("deploy helper creator deploy error: %w", err) + } + + tx, err = rollupCreator.SetTemplates( auth, bridgeCreator, @@ -197,8 +167,10 @@ func deployRollupCreator(ctx context.Context, l1Reader *headerreader.HeaderReade challengeManagerAddr, rollupAdminLogic, rollupUserLogic, + upgradeExecutor, validatorUtils, validatorWalletCreator, + l2FactoriesDeployHelper, ) err = andTxSucceeded(ctx, l1Reader, tx, err) if err != nil { @@ -235,7 +207,7 @@ func GenerateRollupConfig(prod bool, wasmModuleRoot common.Hash, rollupOwner com } } -func DeployOnL1(ctx context.Context, parentChainReader *headerreader.HeaderReader, deployAuth *bind.TransactOpts, batchPoster common.Address, authorizeValidators uint64, config rollupgen.Config) (*chaininfo.RollupAddresses, error) { +func DeployOnL1(ctx context.Context, parentChainReader *headerreader.HeaderReader, deployAuth *bind.TransactOpts, batchPoster common.Address, authorizeValidators uint64, config rollupgen.Config, nativeToken common.Address) (*chaininfo.RollupAddresses, error) { if config.WasmModuleRoot == (common.Hash{}) { return nil, errors.New("no machine specified") } @@ -250,11 +222,16 @@ func DeployOnL1(ctx context.Context, parentChainReader *headerreader.HeaderReade validatorAddrs = append(validatorAddrs, crypto.CreateAddress(validatorWalletCreator, i)) } + // 0.1 gwei + maxFeePerGas := big.NewInt(100000000) tx, err := rollupCreator.CreateRollup( deployAuth, config, batchPoster, validatorAddrs, + nativeToken, + false, + maxFeePerGas, ) if err != nil { return nil, fmt.Errorf("error submitting create rollup tx: %w", err) diff --git a/cmd/chaininfo/chain_info.go b/cmd/chaininfo/chain_info.go index f75779b4aa..2902e96115 100644 --- a/cmd/chaininfo/chain_info.go +++ b/cmd/chaininfo/chain_info.go @@ -106,6 +106,7 @@ type RollupAddresses struct { Inbox common.Address `json:"inbox"` SequencerInbox common.Address `json:"sequencer-inbox"` Rollup common.Address `json:"rollup"` + NativeToken common.Address `json:"native-token"` ValidatorUtils common.Address `json:"validator-utils"` ValidatorWalletCreator common.Address `json:"validator-wallet-creator"` DeployedAt uint64 `json:"deployed-at"` diff --git a/cmd/deploy/deploy.go b/cmd/deploy/deploy.go index d687821e8b..4da3ae7652 100644 --- a/cmd/deploy/deploy.go +++ b/cmd/deploy/deploy.go @@ -40,6 +40,7 @@ func main() { deployAccount := flag.String("l1DeployAccount", "", "l1 seq account to use (default is first account in keystore)") ownerAddressString := flag.String("ownerAddress", "", "the rollup owner's address") sequencerAddressString := flag.String("sequencerAddress", "", "the sequencer's address") + nativeTokenAddressString := flag.String("nativeTokenAddress", "", "address of the ERC20 token which is used as native L2 currency") loserEscrowAddressString := flag.String("loserEscrowAddress", "", "the address which half of challenge loser's funds accumulate at") wasmmoduleroot := flag.String("wasmmoduleroot", "", "WASM module root hash") wasmrootpath := flag.String("wasmrootpath", "", "path to machine folders") @@ -137,6 +138,7 @@ func main() { l1Reader.Start(ctx) defer l1Reader.StopAndWait() + nativeToken := common.HexToAddress(*nativeTokenAddressString) deployedAddresses, err := arbnode.DeployOnL1( ctx, l1Reader, @@ -144,6 +146,7 @@ func main() { sequencerAddress, *authorizevalidators, arbnode.GenerateRollupConfig(*prod, moduleRoot, ownerAddress, &chainConfig, chainConfigJson, loserEscrowAddress), + nativeToken, ) if err != nil { flag.Usage() diff --git a/system_tests/common_test.go b/system_tests/common_test.go index 9fd002bd94..1bdb291184 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -489,6 +489,7 @@ func DeployOnTestL1( l1info.GetAddress("Sequencer"), 0, arbnode.GenerateRollupConfig(false, locator.LatestWasmModuleRoot(), l1info.GetAddress("RollupOwner"), chainConfig, serializedChainConfig, common.Address{}), + common.Address{}, ) Require(t, err) l1info.SetContract("Bridge", addresses.Bridge) From e9751ab13e7b517ee10ddff99853baff01b5f5a3 Mon Sep 17 00:00:00 2001 From: Goran Vladika Date: Fri, 22 Sep 2023 19:20:11 +0200 Subject: [PATCH 06/64] Deploy UpgradeExecutor --- solgen/gen.go | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/solgen/gen.go b/solgen/gen.go index c29db93039..5d43946fa5 100644 --- a/solgen/gen.go +++ b/solgen/gen.go @@ -96,6 +96,27 @@ func main() { modInfo.addArtifact(artifact) } + // add upgrade executor module which is not compiled locally, but imported from 'nitro-contracts' depedencies + upgExecutorPath := filepath.Join(parent, "contracts", "node_modules", "@offchainlabs", "upgrade-executor", "build", "contracts", "src", "UpgradeExecutor.sol", "UpgradeExecutor.json") + _, err = os.Stat(upgExecutorPath) + if !os.IsNotExist(err) { + data, err := os.ReadFile(upgExecutorPath) + if err != nil { + // log.Fatal(string(output)) + log.Fatal("could not read", upgExecutorPath, "for contract", "UpgradeExecutor", err) + } + artifact := HardHatArtifact{} + if err := json.Unmarshal(data, &artifact); err != nil { + log.Fatal("failed to parse contract", "UpgradeExecutor", err) + } + modInfo := modules["upgrade_executorgen"] + if modInfo == nil { + modInfo = &moduleInfo{} + modules["upgrade_executorgen"] = modInfo + } + modInfo.addArtifact(artifact) + } + for module, info := range modules { code, err := bind.Bind( From 452499424b67447adae34cd1b89204fc18206e25 Mon Sep 17 00:00:00 2001 From: Goran Vladika Date: Sat, 23 Sep 2023 08:40:42 +0200 Subject: [PATCH 07/64] Update testnode branch --- .gitmodules | 1 + arbnode/node.go | 4 ++-- nitro-testnode | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.gitmodules b/.gitmodules index 444199c4ed..75e66a648c 100644 --- a/.gitmodules +++ b/.gitmodules @@ -20,3 +20,4 @@ [submodule "nitro-testnode"] path = nitro-testnode url = https://github.com/OffchainLabs/nitro-testnode.git + branch = fee-token-support diff --git a/arbnode/node.go b/arbnode/node.go index 0509f17bb1..5e694898bd 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -104,8 +104,8 @@ func deployChallengeFactory(ctx context.Context, l1Reader *headerreader.HeaderRe } func deployRollupCreator(ctx context.Context, l1Reader *headerreader.HeaderReader, auth *bind.TransactOpts) (*rollupgen.RollupCreator, common.Address, common.Address, common.Address, error) { - fmt.Println("Deploying bridge creator...") - auth.GasLimit = uint64(14183487) + // deploying bridge creator takes ~14.2 million gas + auth.GasLimit = uint64(15000000) bridgeCreator, tx, _, err := rollupgen.DeployBridgeCreator(auth, l1Reader.Client()) err = andTxSucceeded(ctx, l1Reader, tx, err) if err != nil { diff --git a/nitro-testnode b/nitro-testnode index 7ad12c0f1b..441166624c 160000 --- a/nitro-testnode +++ b/nitro-testnode @@ -1 +1 @@ -Subproject commit 7ad12c0f1be75a72c7360d5258e0090f8225594e +Subproject commit 441166624c54857a11c0a880f12e19d7dbeebc69 From 4ec56e30588328303c1f918195e616f49c35d49e Mon Sep 17 00:00:00 2001 From: Goran Vladika Date: Thu, 28 Sep 2023 20:15:28 +0200 Subject: [PATCH 08/64] Update nitro-contracts ref --- contracts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contracts b/contracts index 97cfbe00ff..7dc1aa4382 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 97cfbe00ff0eea4d7f5f5f3afb01598c19ddabc4 +Subproject commit 7dc1aa43829d9f4afe7251ad28d96a5d0e1d48c7 From a2a47e85dd729c681101a572fb5be732bc10a2e6 Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Mon, 9 Oct 2023 15:35:45 -0500 Subject: [PATCH 09/64] add secondary feed to go relay --- broadcastclient/broadcastclient.go | 6 +- broadcastclients/broadcastclients.go | 135 ++++++++++++++++++++++----- 2 files changed, 116 insertions(+), 25 deletions(-) diff --git a/broadcastclient/broadcastclient.go b/broadcastclient/broadcastclient.go index 2649c88192..a7ee269a4c 100644 --- a/broadcastclient/broadcastclient.go +++ b/broadcastclient/broadcastclient.go @@ -69,6 +69,7 @@ type Config struct { RequireFeedVersion bool `koanf:"require-feed-version" reload:"hot"` Timeout time.Duration `koanf:"timeout" reload:"hot"` URL []string `koanf:"url"` + SecondaryURL []string `koanf:"secondary-url"` Verify signature.VerifierConfig `koanf:"verify"` EnableCompression bool `koanf:"enable-compression" reload:"hot"` } @@ -85,7 +86,8 @@ func ConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".require-chain-id", DefaultConfig.RequireChainId, "require chain id to be present on connect") f.Bool(prefix+".require-feed-version", DefaultConfig.RequireFeedVersion, "require feed version to be present on connect") f.Duration(prefix+".timeout", DefaultConfig.Timeout, "duration to wait before timing out connection to sequencer feed") - f.StringSlice(prefix+".url", DefaultConfig.URL, "URL of sequencer feed source") + f.StringSlice(prefix+".url", DefaultConfig.URL, "list of primary URLs of sequencer feed source") + f.StringSlice(prefix+".secondary-url", DefaultConfig.SecondaryURL, "list of secondary URLs of sequencer feed source") signature.FeedVerifierConfigAddOptions(prefix+".verify", f) f.Bool(prefix+".enable-compression", DefaultConfig.EnableCompression, "enable per message deflate compression support") } @@ -97,6 +99,7 @@ var DefaultConfig = Config{ RequireFeedVersion: false, Verify: signature.DefultFeedVerifierConfig, URL: []string{""}, + SecondaryURL: []string{}, Timeout: 20 * time.Second, EnableCompression: true, } @@ -108,6 +111,7 @@ var DefaultTestConfig = Config{ RequireFeedVersion: false, Verify: signature.DefultFeedVerifierConfig, URL: []string{""}, + SecondaryURL: []string{}, Timeout: 200 * time.Millisecond, EnableCompression: true, } diff --git a/broadcastclients/broadcastclients.go b/broadcastclients/broadcastclients.go index 74596bb08f..873d6be03b 100644 --- a/broadcastclients/broadcastclients.go +++ b/broadcastclients/broadcastclients.go @@ -6,16 +6,42 @@ package broadcastclients import ( "context" "sync/atomic" + "time" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/broadcastclient" + "github.com/offchainlabs/nitro/broadcaster" "github.com/offchainlabs/nitro/util/contracts" + "github.com/offchainlabs/nitro/util/stopwaiter" ) +const MAX_FEED_INACTIVE_TIME = time.Second * 6 +const ROUTER_QUEUE_SIZE = 1024 + +type Router struct { + stopwaiter.StopWaiter + messageChan chan broadcaster.BroadcastFeedMessage + confirmedSequenceNumberChan chan arbutil.MessageIndex + + forwardTxStreamer broadcastclient.TransactionStreamerInterface + forwardConfirmationChan chan arbutil.MessageIndex +} + +func (r *Router) AddBroadcastMessages(feedMessages []*broadcaster.BroadcastFeedMessage) error { + for _, feedMessage := range feedMessages { + r.messageChan <- *feedMessage + } + return nil +} + type BroadcastClients struct { - clients []*broadcastclient.BroadcastClient + primaryClients []*broadcastclient.BroadcastClient + secondaryClients []*broadcastclient.BroadcastClient + numOfStartedSecondary int + + router *Router // Use atomic access connected int32 @@ -31,34 +57,55 @@ func NewBroadcastClients( addrVerifier contracts.AddressVerifierInterface, ) (*BroadcastClients, error) { config := configFetcher() - urlCount := len(config.URL) - if urlCount <= 0 { + if len(config.URL) == 0 && len(config.SecondaryURL) == 0 { return nil, nil } - clients := BroadcastClients{} - clients.clients = make([]*broadcastclient.BroadcastClient, 0, urlCount) + clients := BroadcastClients{ + router: &Router{ + messageChan: make(chan broadcaster.BroadcastFeedMessage, ROUTER_QUEUE_SIZE), + confirmedSequenceNumberChan: make(chan arbutil.MessageIndex, ROUTER_QUEUE_SIZE), + forwardTxStreamer: txStreamer, + forwardConfirmationChan: confirmedSequenceNumberListener, + }, + } var lastClientErr error - for _, address := range config.URL { - client, err := broadcastclient.NewBroadcastClient( - configFetcher, - address, - l2ChainId, - currentMessageCount, - txStreamer, - confirmedSequenceNumberListener, - fatalErrChan, - addrVerifier, - func(delta int32) { clients.adjustCount(delta) }, - ) - if err != nil { - lastClientErr = err - log.Warn("init broadcast client failed", "address", address) + makeFeeds := func(url []string) []*broadcastclient.BroadcastClient { + feeds := make([]*broadcastclient.BroadcastClient, 0, len(url)) + for _, address := range url { + client, err := broadcastclient.NewBroadcastClient( + configFetcher, + address, + l2ChainId, + currentMessageCount, + clients.router, + clients.router.confirmedSequenceNumberChan, + fatalErrChan, + addrVerifier, + func(delta int32) { clients.adjustCount(delta) }, + ) + if err != nil { + lastClientErr = err + log.Warn("init broadcast client failed", "address", address) + continue + } + feeds = append(feeds, client) } - clients.clients = append(clients.clients, client) + return feeds } - if len(clients.clients) == 0 { + + clients.primaryClients = makeFeeds(config.URL) + clients.secondaryClients = makeFeeds(config.SecondaryURL) + + if len(clients.primaryClients) == 0 && len(clients.secondaryClients) == 0 { log.Error("no connected feed on startup, last error: %w", lastClientErr) + return nil, nil + } + + // have atleast one primary client + if len(clients.primaryClients) == 0 { + clients.primaryClients = append(clients.primaryClients, clients.secondaryClients[0]) + clients.secondaryClients = clients.secondaryClients[1:] } return &clients, nil @@ -72,12 +119,52 @@ func (bcs *BroadcastClients) adjustCount(delta int32) { } func (bcs *BroadcastClients) Start(ctx context.Context) { - for _, client := range bcs.clients { + bcs.router.StopWaiter.Start(ctx, bcs.router) + + for _, client := range bcs.primaryClients { client.Start(ctx) } + + bcs.router.LaunchThread(func(ctx context.Context) { + startNewFeedTimer := time.NewTicker(MAX_FEED_INACTIVE_TIME) + defer startNewFeedTimer.Stop() + for { + select { + case <-ctx.Done(): + return + case cs := <-bcs.router.confirmedSequenceNumberChan: + startNewFeedTimer.Stop() + bcs.router.forwardConfirmationChan <- cs + startNewFeedTimer.Reset(MAX_FEED_INACTIVE_TIME) + case msg := <-bcs.router.messageChan: + startNewFeedTimer.Stop() + if err := bcs.router.forwardTxStreamer.AddBroadcastMessages([]*broadcaster.BroadcastFeedMessage{&msg}); err != nil { + log.Error("Error routing message from Sequencer Feed", "err", err) + } + startNewFeedTimer.Reset(MAX_FEED_INACTIVE_TIME) + case <-startNewFeedTimer.C: + // failed to get messages from primary feed for ~5 seconds, start a new feed + bcs.StartSecondaryFeed(ctx) + } + } + }) } + +func (bcs *BroadcastClients) StartSecondaryFeed(ctx context.Context) { + if bcs.numOfStartedSecondary < len(bcs.secondaryClients) { + client := bcs.secondaryClients[bcs.numOfStartedSecondary] + bcs.numOfStartedSecondary += 1 + client.Start(ctx) + } else { + log.Warn("failed to start a new secondary feed all available secondary feeds were started") + } +} + func (bcs *BroadcastClients) StopAndWait() { - for _, client := range bcs.clients { + for _, client := range bcs.primaryClients { client.StopAndWait() } + for i := 0; i < bcs.numOfStartedSecondary; i++ { + bcs.secondaryClients[i].StopAndWait() + } } From a65d5b58db52441cad55ea39a63248d27614099b Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Mon, 9 Oct 2023 18:25:34 -0500 Subject: [PATCH 10/64] code refactor --- arbnode/node.go | 2 ++ broadcastclients/broadcastclients.go | 6 +++--- relay/relay.go | 1 + 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/arbnode/node.go b/arbnode/node.go index bf57b1c004..1b42880d3b 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -36,6 +36,7 @@ import ( "github.com/offchainlabs/nitro/das" "github.com/offchainlabs/nitro/execution" "github.com/offchainlabs/nitro/execution/gethexec" + "github.com/offchainlabs/nitro/relay" "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/solgen/go/challengegen" "github.com/offchainlabs/nitro/solgen/go/ospgen" @@ -643,6 +644,7 @@ func createNodeImpl( nil, fatalErrChan, bpVerifier, + relay.ConfigDefault.Queue, ) if err != nil { return nil, err diff --git a/broadcastclients/broadcastclients.go b/broadcastclients/broadcastclients.go index 873d6be03b..a92c3b736d 100644 --- a/broadcastclients/broadcastclients.go +++ b/broadcastclients/broadcastclients.go @@ -18,7 +18,6 @@ import ( ) const MAX_FEED_INACTIVE_TIME = time.Second * 6 -const ROUTER_QUEUE_SIZE = 1024 type Router struct { stopwaiter.StopWaiter @@ -55,6 +54,7 @@ func NewBroadcastClients( confirmedSequenceNumberListener chan arbutil.MessageIndex, fatalErrChan chan error, addrVerifier contracts.AddressVerifierInterface, + queueCapcity int, ) (*BroadcastClients, error) { config := configFetcher() if len(config.URL) == 0 && len(config.SecondaryURL) == 0 { @@ -63,8 +63,8 @@ func NewBroadcastClients( clients := BroadcastClients{ router: &Router{ - messageChan: make(chan broadcaster.BroadcastFeedMessage, ROUTER_QUEUE_SIZE), - confirmedSequenceNumberChan: make(chan arbutil.MessageIndex, ROUTER_QUEUE_SIZE), + messageChan: make(chan broadcaster.BroadcastFeedMessage, queueCapcity), + confirmedSequenceNumberChan: make(chan arbutil.MessageIndex, queueCapcity), forwardTxStreamer: txStreamer, forwardConfirmationChan: confirmedSequenceNumberListener, }, diff --git a/relay/relay.go b/relay/relay.go index bb07251190..26894c0a3d 100644 --- a/relay/relay.go +++ b/relay/relay.go @@ -58,6 +58,7 @@ func NewRelay(config *Config, feedErrChan chan error) (*Relay, error) { confirmedSequenceNumberListener, feedErrChan, nil, + config.Queue, ) if err != nil { return nil, err From 226bd1cd129fb34fbb17291327cba43b8aba3a38 Mon Sep 17 00:00:00 2001 From: Goran Vladika Date: Tue, 10 Oct 2023 15:30:41 +0200 Subject: [PATCH 11/64] Align with latest RollupCretor api --- Dockerfile | 2 +- arbnode/node.go | 106 ++++++++++++++++++++++++++++++++---- cmd/chaininfo/chain_info.go | 1 + cmd/deploy/deploy.go | 5 +- 4 files changed, 101 insertions(+), 13 deletions(-) diff --git a/Dockerfile b/Dockerfile index 0698fb60e4..b05cb3d4d9 100644 --- a/Dockerfile +++ b/Dockerfile @@ -29,7 +29,7 @@ RUN apt-get update && \ apt-get install -y git python3 make g++ WORKDIR /workspace COPY contracts/package.json contracts/yarn.lock contracts/ -RUN cd contracts && yarn install --ignore-optional +RUN cd contracts && yarn install COPY contracts contracts/ COPY Makefile . RUN NITRO_BUILD_IGNORE_TIMESTAMPS=1 make build-solidity diff --git a/arbnode/node.go b/arbnode/node.go index 4ee4547273..9d43112364 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -64,6 +64,91 @@ func andTxSucceeded(ctx context.Context, l1Reader *headerreader.HeaderReader, tx return nil } +func deployBridgeCreator(ctx context.Context, l1Reader *headerreader.HeaderReader, auth *bind.TransactOpts, maxDataSize *big.Int) (common.Address, error) { + client := l1Reader.Client() + + /// deploy eth based templates + bridgeTemplate, tx, _, err := bridgegen.DeployBridge(auth, client) + err = andTxSucceeded(ctx, l1Reader, tx, err) + if err != nil { + return common.Address{}, fmt.Errorf("bridge deploy error: %w", err) + } + + // maxDataSize := big.NewInt(117964) + seqInboxTemplate, tx, _, err := bridgegen.DeploySequencerInbox(auth, client, maxDataSize) + err = andTxSucceeded(ctx, l1Reader, tx, err) + if err != nil { + return common.Address{}, fmt.Errorf("sequencer inbox deploy error: %w", err) + } + + inboxTemplate, tx, _, err := bridgegen.DeployInbox(auth, client, maxDataSize) + err = andTxSucceeded(ctx, l1Reader, tx, err) + if err != nil { + return common.Address{}, fmt.Errorf("inbox deploy error: %w", err) + } + + rollupEventBridgeTemplate, tx, _, err := rollupgen.DeployRollupEventInbox(auth, client) + err = andTxSucceeded(ctx, l1Reader, tx, err) + if err != nil { + return common.Address{}, fmt.Errorf("rollup event bridge deploy error: %w", err) + } + + outboxTemplate, tx, _, err := bridgegen.DeployOutbox(auth, client) + err = andTxSucceeded(ctx, l1Reader, tx, err) + if err != nil { + return common.Address{}, fmt.Errorf("outbox deploy error: %w", err) + } + + ethBasedTemplates := rollupgen.BridgeCreatorBridgeContracts{ + Bridge: bridgeTemplate, + SequencerInbox: seqInboxTemplate, + Inbox: inboxTemplate, + RollupEventInbox:rollupEventBridgeTemplate, + Outbox: outboxTemplate, + } + + /// deploy ERC20 based templates + erc20BridgeTemplate, tx, _, err := bridgegen.DeployERC20Bridge(auth, client) + err = andTxSucceeded(ctx, l1Reader, tx, err) + if err != nil { + return common.Address{}, fmt.Errorf("bridge deploy error: %w", err) + } + + erc20InboxTemplate, tx, _, err := bridgegen.DeployERC20Inbox(auth, client, maxDataSize) + err = andTxSucceeded(ctx, l1Reader, tx, err) + if err != nil { + return common.Address{}, fmt.Errorf("inbox deploy error: %w", err) + } + + erc20RollupEventBridgeTemplate, tx, _, err := rollupgen.DeployERC20RollupEventInbox(auth, client) + err = andTxSucceeded(ctx, l1Reader, tx, err) + if err != nil { + return common.Address{}, fmt.Errorf("rollup event bridge deploy error: %w", err) + } + + erc20OutboxTemplate, tx, _, err := bridgegen.DeployERC20Outbox(auth, client) + err = andTxSucceeded(ctx, l1Reader, tx, err) + if err != nil { + return common.Address{}, fmt.Errorf("outbox deploy error: %w", err) + } + + erc20BasedTemplates := rollupgen.BridgeCreatorBridgeContracts{ + Bridge: erc20BridgeTemplate, + SequencerInbox: seqInboxTemplate, + Inbox: erc20InboxTemplate, + RollupEventInbox:erc20RollupEventBridgeTemplate, + Outbox: erc20OutboxTemplate, + } + + bridgeCreatorAddr, tx, _, err := rollupgen.DeployBridgeCreator(auth, client, ethBasedTemplates, erc20BasedTemplates) + err = andTxSucceeded(ctx, l1Reader, tx, err) + if err != nil { + return common.Address{}, fmt.Errorf("bridge creator deploy error: %w", err) + } + + return bridgeCreatorAddr, nil +} + func deployChallengeFactory(ctx context.Context, l1Reader *headerreader.HeaderReader, auth *bind.TransactOpts) (common.Address, common.Address, error) { client := l1Reader.Client() osp0, tx, _, err := ospgen.DeployOneStepProver0(auth, client) @@ -105,11 +190,8 @@ func deployChallengeFactory(ctx context.Context, l1Reader *headerreader.HeaderRe return ospEntryAddr, challengeManagerAddr, nil } -func deployRollupCreator(ctx context.Context, l1Reader *headerreader.HeaderReader, auth *bind.TransactOpts) (*rollupgen.RollupCreator, common.Address, common.Address, common.Address, error) { - // deploying bridge creator takes ~14.2 million gas - auth.GasLimit = uint64(15000000) - bridgeCreator, tx, _, err := rollupgen.DeployBridgeCreator(auth, l1Reader.Client()) - err = andTxSucceeded(ctx, l1Reader, tx, err) +func deployRollupCreator(ctx context.Context, l1Reader *headerreader.HeaderReader, auth *bind.TransactOpts, maxDataSize *big.Int) (*rollupgen.RollupCreator, common.Address, common.Address, common.Address, error) { + bridgeCreator, err := deployBridgeCreator(ctx, l1Reader, auth, maxDataSize) if err != nil { return nil, common.Address{}, common.Address{}, common.Address{}, fmt.Errorf("bridge creator deploy error: %w", err) } @@ -161,7 +243,6 @@ func deployRollupCreator(ctx context.Context, l1Reader *headerreader.HeaderReade return nil, common.Address{}, common.Address{}, common.Address{}, fmt.Errorf("deploy helper creator deploy error: %w", err) } - tx, err = rollupCreator.SetTemplates( auth, bridgeCreator, @@ -209,12 +290,12 @@ func GenerateRollupConfig(prod bool, wasmModuleRoot common.Hash, rollupOwner com } } -func DeployOnL1(ctx context.Context, parentChainReader *headerreader.HeaderReader, deployAuth *bind.TransactOpts, batchPoster common.Address, authorizeValidators uint64, config rollupgen.Config, nativeToken common.Address) (*chaininfo.RollupAddresses, error) { +func DeployOnL1(ctx context.Context, parentChainReader *headerreader.HeaderReader, deployAuth *bind.TransactOpts, batchPoster common.Address, authorizeValidators uint64, config rollupgen.Config, nativeToken common.Address, maxDataSize *big.Int) (*chaininfo.RollupAddresses, error) { if config.WasmModuleRoot == (common.Hash{}) { return nil, errors.New("no machine specified") } - rollupCreator, _, validatorUtils, validatorWalletCreator, err := deployRollupCreator(ctx, parentChainReader, deployAuth) + rollupCreator, _, validatorUtils, validatorWalletCreator, err := deployRollupCreator(ctx, parentChainReader, deployAuth, maxDataSize) if err != nil { return nil, fmt.Errorf("error deploying rollup creator: %w", err) } @@ -224,15 +305,16 @@ func DeployOnL1(ctx context.Context, parentChainReader *headerreader.HeaderReade validatorAddrs = append(validatorAddrs, crypto.CreateAddress(validatorWalletCreator, i)) } - // 0.1 gwei - maxFeePerGas := big.NewInt(100000000) + deployUtilityFactories := false + maxFeePerGas := big.NewInt(0) // needed when utility factories are deployed tx, err := rollupCreator.CreateRollup( deployAuth, config, batchPoster, validatorAddrs, + maxDataSize, nativeToken, - false, + deployUtilityFactories, maxFeePerGas, ) if err != nil { @@ -253,6 +335,8 @@ func DeployOnL1(ctx context.Context, parentChainReader *headerreader.HeaderReade SequencerInbox: info.SequencerInbox, DeployedAt: receipt.BlockNumber.Uint64(), Rollup: info.RollupAddress, + NativeToken: nativeToken, + UpgradeExecutor: info.UpgradeExecutor, ValidatorUtils: validatorUtils, ValidatorWalletCreator: validatorWalletCreator, }, nil diff --git a/cmd/chaininfo/chain_info.go b/cmd/chaininfo/chain_info.go index 2902e96115..cc13321513 100644 --- a/cmd/chaininfo/chain_info.go +++ b/cmd/chaininfo/chain_info.go @@ -107,6 +107,7 @@ type RollupAddresses struct { SequencerInbox common.Address `json:"sequencer-inbox"` Rollup common.Address `json:"rollup"` NativeToken common.Address `json:"native-token"` + UpgradeExecutor common.Address `json:"upgrade-executor"` ValidatorUtils common.Address `json:"validator-utils"` ValidatorWalletCreator common.Address `json:"validator-wallet-creator"` DeployedAt uint64 `json:"deployed-at"` diff --git a/cmd/deploy/deploy.go b/cmd/deploy/deploy.go index 4da3ae7652..d726ad21f4 100644 --- a/cmd/deploy/deploy.go +++ b/cmd/deploy/deploy.go @@ -40,7 +40,8 @@ func main() { deployAccount := flag.String("l1DeployAccount", "", "l1 seq account to use (default is first account in keystore)") ownerAddressString := flag.String("ownerAddress", "", "the rollup owner's address") sequencerAddressString := flag.String("sequencerAddress", "", "the sequencer's address") - nativeTokenAddressString := flag.String("nativeTokenAddress", "", "address of the ERC20 token which is used as native L2 currency") + nativeTokenAddressString := flag.String("nativeTokenAddress", "0x0000000000000000000000000000000000000000", "address of the ERC20 token which is used as native L2 currency") + maxDataSizeUint := flag.Uint64("maxDataSize", 117964, "maximum size of data") loserEscrowAddressString := flag.String("loserEscrowAddress", "", "the address which half of challenge loser's funds accumulate at") wasmmoduleroot := flag.String("wasmmoduleroot", "", "WASM module root hash") wasmrootpath := flag.String("wasmrootpath", "", "path to machine folders") @@ -56,6 +57,7 @@ func main() { prod := flag.Bool("prod", false, "Whether to configure the rollup for production or testing") flag.Parse() l1ChainId := new(big.Int).SetUint64(*l1ChainIdUint) + maxDataSize := new(big.Int).SetUint64(*maxDataSizeUint) if *prod { if *wasmmoduleroot == "" { @@ -147,6 +149,7 @@ func main() { *authorizevalidators, arbnode.GenerateRollupConfig(*prod, moduleRoot, ownerAddress, &chainConfig, chainConfigJson, loserEscrowAddress), nativeToken, + maxDataSize, ) if err != nil { flag.Usage() From 6a3c33741c5224ac0d9dd9299c79f7e1216e7c0e Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Tue, 10 Oct 2023 09:52:18 -0500 Subject: [PATCH 12/64] Revert "code refactor" This reverts commit a65d5b58db52441cad55ea39a63248d27614099b. --- arbnode/node.go | 2 -- broadcastclients/broadcastclients.go | 6 +++--- relay/relay.go | 1 - 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/arbnode/node.go b/arbnode/node.go index 1b42880d3b..bf57b1c004 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -36,7 +36,6 @@ import ( "github.com/offchainlabs/nitro/das" "github.com/offchainlabs/nitro/execution" "github.com/offchainlabs/nitro/execution/gethexec" - "github.com/offchainlabs/nitro/relay" "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/solgen/go/challengegen" "github.com/offchainlabs/nitro/solgen/go/ospgen" @@ -644,7 +643,6 @@ func createNodeImpl( nil, fatalErrChan, bpVerifier, - relay.ConfigDefault.Queue, ) if err != nil { return nil, err diff --git a/broadcastclients/broadcastclients.go b/broadcastclients/broadcastclients.go index a92c3b736d..873d6be03b 100644 --- a/broadcastclients/broadcastclients.go +++ b/broadcastclients/broadcastclients.go @@ -18,6 +18,7 @@ import ( ) const MAX_FEED_INACTIVE_TIME = time.Second * 6 +const ROUTER_QUEUE_SIZE = 1024 type Router struct { stopwaiter.StopWaiter @@ -54,7 +55,6 @@ func NewBroadcastClients( confirmedSequenceNumberListener chan arbutil.MessageIndex, fatalErrChan chan error, addrVerifier contracts.AddressVerifierInterface, - queueCapcity int, ) (*BroadcastClients, error) { config := configFetcher() if len(config.URL) == 0 && len(config.SecondaryURL) == 0 { @@ -63,8 +63,8 @@ func NewBroadcastClients( clients := BroadcastClients{ router: &Router{ - messageChan: make(chan broadcaster.BroadcastFeedMessage, queueCapcity), - confirmedSequenceNumberChan: make(chan arbutil.MessageIndex, queueCapcity), + messageChan: make(chan broadcaster.BroadcastFeedMessage, ROUTER_QUEUE_SIZE), + confirmedSequenceNumberChan: make(chan arbutil.MessageIndex, ROUTER_QUEUE_SIZE), forwardTxStreamer: txStreamer, forwardConfirmationChan: confirmedSequenceNumberListener, }, diff --git a/relay/relay.go b/relay/relay.go index 26894c0a3d..bb07251190 100644 --- a/relay/relay.go +++ b/relay/relay.go @@ -58,7 +58,6 @@ func NewRelay(config *Config, feedErrChan chan error) (*Relay, error) { confirmedSequenceNumberListener, feedErrChan, nil, - config.Queue, ) if err != nil { return nil, err From 001ae3179abb304ee87b3e36cdfc03414cefaf02 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Thu, 12 Oct 2023 22:05:10 +0200 Subject: [PATCH 13/64] address review comments --- arbos/addressSet/addressSet.go | 90 ++++++------- arbos/addressTable/addressTable.go | 2 +- arbos/arbosState/arbosstate.go | 32 ++--- arbos/arbosState/arbosstate_test.go | 2 +- arbos/blockhash/blockhash.go | 2 +- arbos/l1pricing/batchPoster.go | 8 +- arbos/l1pricing/l1pricing.go | 4 +- arbos/queue_test.go | 2 +- arbos/retryables/retryable.go | 14 +- arbos/storage/queue.go | 2 +- arbos/storage/storage.go | 200 +++++++++++++++------------- 11 files changed, 184 insertions(+), 174 deletions(-) diff --git a/arbos/addressSet/addressSet.go b/arbos/addressSet/addressSet.go index 38b07f7a1b..50a43c36cf 100644 --- a/arbos/addressSet/addressSet.go +++ b/arbos/addressSet/addressSet.go @@ -26,49 +26,49 @@ func Initialize(sto *storage.Storage) error { func OpenAddressSet(sto *storage.Storage) *AddressSet { return &AddressSet{ - sto.NoCacheCopy(), - sto.OpenStorageBackedUint64(0), - sto.OpenSubStorage([]byte{0}, false), + backingStorage: sto.WithoutCache(), + size: sto.OpenStorageBackedUint64(0), + byAddress: sto.OpenSubStorage([]byte{0}), } } -func (aset *AddressSet) Size() (uint64, error) { - return aset.size.Get() +func (as *AddressSet) Size() (uint64, error) { + return as.size.Get() } -func (aset *AddressSet) IsMember(addr common.Address) (bool, error) { - value, err := aset.byAddress.Get(util.AddressToHash(addr)) +func (as *AddressSet) IsMember(addr common.Address) (bool, error) { + value, err := as.byAddress.Get(util.AddressToHash(addr)) return value != (common.Hash{}), err } -func (aset *AddressSet) GetAnyMember() (*common.Address, error) { - size, err := aset.size.Get() +func (as *AddressSet) GetAnyMember() (*common.Address, error) { + size, err := as.size.Get() if err != nil || size == 0 { return nil, err } - sba := aset.backingStorage.OpenStorageBackedAddressOrNil(1) + sba := as.backingStorage.OpenStorageBackedAddressOrNil(1) addr, err := sba.Get() return addr, err } -func (aset *AddressSet) Clear() error { - size, err := aset.size.Get() +func (as *AddressSet) Clear() error { + size, err := as.size.Get() if err != nil || size == 0 { return err } for i := uint64(1); i <= size; i++ { - contents, _ := aset.backingStorage.GetByUint64(i) - _ = aset.backingStorage.ClearByUint64(i) - err = aset.byAddress.Clear(contents) + contents, _ := as.backingStorage.GetByUint64(i) + _ = as.backingStorage.ClearByUint64(i) + err = as.byAddress.Clear(contents) if err != nil { return err } } - return aset.size.Clear() + return as.size.Clear() } -func (aset *AddressSet) AllMembers(maxNumToReturn uint64) ([]common.Address, error) { - size, err := aset.size.Get() +func (as *AddressSet) AllMembers(maxNumToReturn uint64) ([]common.Address, error) { + size, err := as.size.Get() if err != nil { return nil, err } @@ -77,7 +77,7 @@ func (aset *AddressSet) AllMembers(maxNumToReturn uint64) ([]common.Address, err } ret := make([]common.Address, size) for i := range ret { - sba := aset.backingStorage.OpenStorageBackedAddress(uint64(i + 1)) + sba := as.backingStorage.OpenStorageBackedAddress(uint64(i + 1)) ret[i], err = sba.Get() if err != nil { return nil, err @@ -86,22 +86,22 @@ func (aset *AddressSet) AllMembers(maxNumToReturn uint64) ([]common.Address, err return ret, nil } -func (aset *AddressSet) ClearList() error { - size, err := aset.size.Get() +func (as *AddressSet) ClearList() error { + size, err := as.size.Get() if err != nil || size == 0 { return err } for i := uint64(1); i <= size; i++ { - err = aset.backingStorage.ClearByUint64(i) + err = as.backingStorage.ClearByUint64(i) if err != nil { return err } } - return aset.size.Clear() + return as.size.Clear() } -func (aset *AddressSet) RectifyMapping(addr common.Address) error { - isOwner, err := aset.IsMember(addr) +func (as *AddressSet) RectifyMapping(addr common.Address) error { + isOwner, err := as.IsMember(addr) if !isOwner || err != nil { return errors.New("RectifyMapping: Address is not an owner") } @@ -109,15 +109,15 @@ func (aset *AddressSet) RectifyMapping(addr common.Address) error { // If the mapping is correct, RectifyMapping shouldn't do anything // Additional safety check to avoid corruption of mapping after the initial fix addrAsHash := common.BytesToHash(addr.Bytes()) - slot, err := aset.byAddress.GetUint64(addrAsHash) + slot, err := as.byAddress.GetUint64(addrAsHash) if err != nil { return err } - atSlot, err := aset.backingStorage.GetByUint64(slot) + atSlot, err := as.backingStorage.GetByUint64(slot) if err != nil { return err } - size, err := aset.size.Get() + size, err := as.size.Get() if err != nil { return err } @@ -126,72 +126,72 @@ func (aset *AddressSet) RectifyMapping(addr common.Address) error { } // Remove the owner from map and add them as a new owner - err = aset.byAddress.Clear(addrAsHash) + err = as.byAddress.Clear(addrAsHash) if err != nil { return err } - return aset.Add(addr) + return as.Add(addr) } -func (aset *AddressSet) Add(addr common.Address) error { - present, err := aset.IsMember(addr) +func (as *AddressSet) Add(addr common.Address) error { + present, err := as.IsMember(addr) if present || err != nil { return err } - size, err := aset.size.Get() + size, err := as.size.Get() if err != nil { return err } slot := util.UintToHash(1 + size) addrAsHash := common.BytesToHash(addr.Bytes()) - err = aset.byAddress.Set(addrAsHash, slot) + err = as.byAddress.Set(addrAsHash, slot) if err != nil { return err } - sba := aset.backingStorage.OpenStorageBackedAddress(1 + size) + sba := as.backingStorage.OpenStorageBackedAddress(1 + size) err = sba.Set(addr) if err != nil { return err } - _, err = aset.size.Increment() + _, err = as.size.Increment() return err } -func (aset *AddressSet) Remove(addr common.Address, arbosVersion uint64) error { +func (as *AddressSet) Remove(addr common.Address, arbosVersion uint64) error { addrAsHash := common.BytesToHash(addr.Bytes()) - slot, err := aset.byAddress.GetUint64(addrAsHash) + slot, err := as.byAddress.GetUint64(addrAsHash) if slot == 0 || err != nil { return err } - err = aset.byAddress.Clear(addrAsHash) + err = as.byAddress.Clear(addrAsHash) if err != nil { return err } - size, err := aset.size.Get() + size, err := as.size.Get() if err != nil { return err } if slot < size { - atSize, err := aset.backingStorage.GetByUint64(size) + atSize, err := as.backingStorage.GetByUint64(size) if err != nil { return err } - err = aset.backingStorage.SetByUint64(slot, atSize) + err = as.backingStorage.SetByUint64(slot, atSize) if err != nil { return err } if arbosVersion >= 11 { - err = aset.byAddress.Set(atSize, util.UintToHash(slot)) + err = as.byAddress.Set(atSize, util.UintToHash(slot)) if err != nil { return err } } } - err = aset.backingStorage.ClearByUint64(size) + err = as.backingStorage.ClearByUint64(size) if err != nil { return err } - _, err = aset.size.Decrement() + _, err = as.size.Decrement() return err } diff --git a/arbos/addressTable/addressTable.go b/arbos/addressTable/addressTable.go index 56f04badff..f44e7f3dcf 100644 --- a/arbos/addressTable/addressTable.go +++ b/arbos/addressTable/addressTable.go @@ -25,7 +25,7 @@ func Initialize(sto *storage.Storage) { func Open(sto *storage.Storage) *AddressTable { numItems := sto.OpenStorageBackedUint64(0) - return &AddressTable{sto.NoCacheCopy(), sto.OpenSubStorage([]byte{}, false), numItems} + return &AddressTable{sto.WithoutCache(), sto.OpenSubStorage([]byte{}), numItems} } func (atab *AddressTable) Register(addr common.Address) (uint64, error) { diff --git a/arbos/arbosState/arbosstate.go b/arbos/arbosState/arbosstate.go index 750c796e33..8702c62d16 100644 --- a/arbos/arbosState/arbosstate.go +++ b/arbos/arbosState/arbosstate.go @@ -73,13 +73,13 @@ func OpenArbosState(stateDB vm.StateDB, burner burn.Burner) (*ArbosState, error) backingStorage.OpenStorageBackedUint64(uint64(upgradeVersionOffset)), backingStorage.OpenStorageBackedUint64(uint64(upgradeTimestampOffset)), backingStorage.OpenStorageBackedAddress(uint64(networkFeeAccountOffset)), - l1pricing.OpenL1PricingState(backingStorage.OpenSubStorage(l1PricingSubspace, true)), - l2pricing.OpenL2PricingState(backingStorage.OpenSubStorage(l2PricingSubspace, true)), - retryables.OpenRetryableState(backingStorage.OpenSubStorage(retryablesSubspace, true), stateDB), - addressTable.Open(backingStorage.OpenSubStorage(addressTableSubspace, true)), - addressSet.OpenAddressSet(backingStorage.OpenSubStorage(chainOwnerSubspace, true)), - merkleAccumulator.OpenMerkleAccumulator(backingStorage.OpenSubStorage(sendMerkleSubspace, true)), - blockhash.OpenBlockhashes(backingStorage.OpenSubStorage(blockhashesSubspace, true)), + l1pricing.OpenL1PricingState(backingStorage.OpenCachedSubStorage(l1PricingSubspace)), + l2pricing.OpenL2PricingState(backingStorage.OpenCachedSubStorage(l2PricingSubspace)), + retryables.OpenRetryableState(backingStorage.OpenCachedSubStorage(retryablesSubspace), stateDB), + addressTable.Open(backingStorage.OpenCachedSubStorage(addressTableSubspace)), + addressSet.OpenAddressSet(backingStorage.OpenCachedSubStorage(chainOwnerSubspace)), + merkleAccumulator.OpenMerkleAccumulator(backingStorage.OpenCachedSubStorage(sendMerkleSubspace)), + blockhash.OpenBlockhashes(backingStorage.OpenCachedSubStorage(blockhashesSubspace)), backingStorage.OpenStorageBackedBigInt(uint64(chainIdOffset)), backingStorage.OpenStorageBackedBytes(chainConfigSubspace), backingStorage.OpenStorageBackedUint64(uint64(genesisBlockNumOffset)), @@ -225,14 +225,14 @@ func InitializeArbosState(stateDB vm.StateDB, burner burn.Burner, chainConfig *p if desiredArbosVersion >= 2 { initialRewardsRecipient = initialChainOwner } - _ = l1pricing.InitializeL1PricingState(sto.OpenSubStorage(l1PricingSubspace, true), initialRewardsRecipient, initMessage.InitialL1BaseFee) - _ = l2pricing.InitializeL2PricingState(sto.OpenSubStorage(l2PricingSubspace, true)) - _ = retryables.InitializeRetryableState(sto.OpenSubStorage(retryablesSubspace, true)) - addressTable.Initialize(sto.OpenSubStorage(addressTableSubspace, true)) - merkleAccumulator.InitializeMerkleAccumulator(sto.OpenSubStorage(sendMerkleSubspace, true)) - blockhash.InitializeBlockhashes(sto.OpenSubStorage(blockhashesSubspace, true)) - - ownersStorage := sto.OpenSubStorage(chainOwnerSubspace, true) + _ = l1pricing.InitializeL1PricingState(sto.OpenCachedSubStorage(l1PricingSubspace), initialRewardsRecipient, initMessage.InitialL1BaseFee) + _ = l2pricing.InitializeL2PricingState(sto.OpenCachedSubStorage(l2PricingSubspace)) + _ = retryables.InitializeRetryableState(sto.OpenCachedSubStorage(retryablesSubspace)) + addressTable.Initialize(sto.OpenCachedSubStorage(addressTableSubspace)) + merkleAccumulator.InitializeMerkleAccumulator(sto.OpenCachedSubStorage(sendMerkleSubspace)) + blockhash.InitializeBlockhashes(sto.OpenCachedSubStorage(blockhashesSubspace)) + + ownersStorage := sto.OpenCachedSubStorage(chainOwnerSubspace) _ = addressSet.Initialize(ownersStorage) _ = addressSet.OpenAddressSet(ownersStorage).Add(initialChainOwner) @@ -428,7 +428,7 @@ func (state *ArbosState) ChainOwners() *addressSet.AddressSet { func (state *ArbosState) SendMerkleAccumulator() *merkleAccumulator.MerkleAccumulator { if state.sendMerkle == nil { - state.sendMerkle = merkleAccumulator.OpenMerkleAccumulator(state.backingStorage.OpenSubStorage(sendMerkleSubspace, true)) + state.sendMerkle = merkleAccumulator.OpenMerkleAccumulator(state.backingStorage.OpenCachedSubStorage(sendMerkleSubspace)) } return state.sendMerkle } diff --git a/arbos/arbosState/arbosstate_test.go b/arbos/arbosState/arbosstate_test.go index 384fc9c72f..ef63c23386 100644 --- a/arbos/arbosState/arbosstate_test.go +++ b/arbos/arbosState/arbosstate_test.go @@ -64,7 +64,7 @@ func TestStorageBackedInt64(t *testing.T) { func TestStorageSlots(t *testing.T) { state, _ := NewArbosMemoryBackedArbOSState() - sto := state.BackingStorage().OpenSubStorage([]byte{}, true) + sto := state.BackingStorage().OpenCachedSubStorage([]byte{}) println("nil address", colors.Blue, storage.NilAddressRepresentation.String(), colors.Clear) diff --git a/arbos/blockhash/blockhash.go b/arbos/blockhash/blockhash.go index 99fb3ef470..34c907207c 100644 --- a/arbos/blockhash/blockhash.go +++ b/arbos/blockhash/blockhash.go @@ -21,7 +21,7 @@ func InitializeBlockhashes(backingStorage *storage.Storage) { } func OpenBlockhashes(backingStorage *storage.Storage) *Blockhashes { - return &Blockhashes{backingStorage.NoCacheCopy(), backingStorage.OpenStorageBackedUint64(0)} + return &Blockhashes{backingStorage.WithoutCache(), backingStorage.OpenStorageBackedUint64(0)} } func (bh *Blockhashes) L1BlockNumber() (uint64, error) { diff --git a/arbos/l1pricing/batchPoster.go b/arbos/l1pricing/batchPoster.go index 4ac86307ec..a3428c441c 100644 --- a/arbos/l1pricing/batchPoster.go +++ b/arbos/l1pricing/batchPoster.go @@ -42,13 +42,13 @@ func InitializeBatchPostersTable(storage *storage.Storage) error { if err := totalFundsDue.SetChecked(common.Big0); err != nil { return err } - return addressSet.Initialize(storage.OpenSubStorage(PosterAddrsKey, true)) + return addressSet.Initialize(storage.OpenCachedSubStorage(PosterAddrsKey)) } func OpenBatchPostersTable(storage *storage.Storage) *BatchPostersTable { return &BatchPostersTable{ - posterAddrs: addressSet.OpenAddressSet(storage.OpenSubStorage(PosterAddrsKey, true)), - posterInfo: storage.OpenSubStorage(PosterInfoKey, false), + posterAddrs: addressSet.OpenAddressSet(storage.OpenCachedSubStorage(PosterAddrsKey)), + posterInfo: storage.OpenSubStorage(PosterInfoKey), totalFundsDue: storage.OpenStorageBackedBigInt(totalFundsDueOffset), } } @@ -68,7 +68,7 @@ func (bpt *BatchPostersTable) OpenPoster(poster common.Address, createIfNotExist } func (bpt *BatchPostersTable) internalOpen(poster common.Address) *BatchPosterState { - bpStorage := bpt.posterInfo.OpenSubStorage(poster.Bytes(), false) + bpStorage := bpt.posterInfo.OpenSubStorage(poster.Bytes()) return &BatchPosterState{ fundsDue: bpStorage.OpenStorageBackedBigInt(0), payTo: bpStorage.OpenStorageBackedAddress(1), diff --git a/arbos/l1pricing/l1pricing.go b/arbos/l1pricing/l1pricing.go index 1956eab29b..58c1042ab7 100644 --- a/arbos/l1pricing/l1pricing.go +++ b/arbos/l1pricing/l1pricing.go @@ -83,7 +83,7 @@ var InitialEquilibrationUnitsV0 = arbmath.UintToBig(60 * params.TxDataNonZeroGas var InitialEquilibrationUnitsV6 = arbmath.UintToBig(params.TxDataNonZeroGasEIP2028 * 10000000) func InitializeL1PricingState(sto *storage.Storage, initialRewardsRecipient common.Address, initialL1BaseFee *big.Int) error { - bptStorage := sto.OpenSubStorage(BatchPosterTableKey, true) + bptStorage := sto.OpenCachedSubStorage(BatchPosterTableKey) if err := InitializeBatchPostersTable(bptStorage); err != nil { return err } @@ -118,7 +118,7 @@ func InitializeL1PricingState(sto *storage.Storage, initialRewardsRecipient comm func OpenL1PricingState(sto *storage.Storage) *L1PricingState { return &L1PricingState{ sto, - OpenBatchPostersTable(sto.OpenSubStorage(BatchPosterTableKey, true)), + OpenBatchPostersTable(sto.OpenCachedSubStorage(BatchPosterTableKey)), sto.OpenStorageBackedAddress(payRewardsToOffset), sto.OpenStorageBackedBigUint(equilibrationUnitsOffset), sto.OpenStorageBackedUint64(inertiaOffset), diff --git a/arbos/queue_test.go b/arbos/queue_test.go index abeec49a93..ff993a233f 100644 --- a/arbos/queue_test.go +++ b/arbos/queue_test.go @@ -14,7 +14,7 @@ import ( func TestQueue(t *testing.T) { state, statedb := arbosState.NewArbosMemoryBackedArbOSState() - sto := state.BackingStorage().OpenSubStorage([]byte{}, true) + sto := state.BackingStorage().OpenCachedSubStorage([]byte{}) Require(t, storage.InitializeQueue(sto)) q := storage.OpenQueue(sto) diff --git a/arbos/retryables/retryable.go b/arbos/retryables/retryable.go index 0322938541..1121de01f4 100644 --- a/arbos/retryables/retryable.go +++ b/arbos/retryables/retryable.go @@ -31,13 +31,13 @@ var ( ) func InitializeRetryableState(sto *storage.Storage) error { - return storage.InitializeQueue(sto.OpenSubStorage(timeoutQueueKey, true)) + return storage.InitializeQueue(sto.OpenCachedSubStorage(timeoutQueueKey)) } func OpenRetryableState(sto *storage.Storage, statedb vm.StateDB) *RetryableState { return &RetryableState{ sto, - storage.OpenQueue(sto.OpenSubStorage(timeoutQueueKey, true)), + storage.OpenQueue(sto.OpenCachedSubStorage(timeoutQueueKey)), } } @@ -73,7 +73,7 @@ func (rs *RetryableState) CreateRetryable( beneficiary common.Address, calldata []byte, ) (*Retryable, error) { - sto := rs.retryables.OpenSubStorage(id.Bytes(), false) + sto := rs.retryables.OpenSubStorage(id.Bytes()) ret := &Retryable{ id, sto, @@ -100,7 +100,7 @@ func (rs *RetryableState) CreateRetryable( } func (rs *RetryableState) OpenRetryable(id common.Hash, currentTimestamp uint64) (*Retryable, error) { - sto := rs.retryables.OpenSubStorage(id.Bytes(), false) + sto := rs.retryables.OpenSubStorage(id.Bytes()) timeoutStorage := sto.OpenStorageBackedUint64(timeoutOffset) timeout, err := timeoutStorage.Get() if timeout == 0 || timeout < currentTimestamp || err != nil { @@ -134,7 +134,7 @@ func (rs *RetryableState) RetryableSizeBytes(id common.Hash, currentTime uint64) } func (rs *RetryableState) DeleteRetryable(id common.Hash, evm *vm.EVM, scenario util.TracingScenario) (bool, error) { - retStorage := rs.retryables.OpenSubStorage(id.Bytes(), false) + retStorage := rs.retryables.OpenSubStorage(id.Bytes()) timeout, err := retStorage.GetByUint64(timeoutOffset) if timeout == (common.Hash{}) || err != nil { return false, err @@ -157,7 +157,7 @@ func (rs *RetryableState) DeleteRetryable(id common.Hash, evm *vm.EVM, scenario _ = retStorage.ClearByUint64(beneficiaryOffset) _ = retStorage.ClearByUint64(timeoutOffset) _ = retStorage.ClearByUint64(timeoutWindowsLeftOffset) - err = retStorage.OpenSubStorage(calldataKey, false).ClearBytes() + err = retStorage.OpenSubStorage(calldataKey).ClearBytes() return true, err } @@ -291,7 +291,7 @@ func (rs *RetryableState) TryToReapOneRetryable(currentTimestamp uint64, evm *vm if err != nil || id == nil { return err } - retryableStorage := rs.retryables.OpenSubStorage(id.Bytes(), false) + retryableStorage := rs.retryables.OpenSubStorage(id.Bytes()) timeoutStorage := retryableStorage.OpenStorageBackedUint64(timeoutOffset) timeout, err := timeoutStorage.Get() if err != nil { diff --git a/arbos/storage/queue.go b/arbos/storage/queue.go index 032ac11aad..9c02dc1ee7 100644 --- a/arbos/storage/queue.go +++ b/arbos/storage/queue.go @@ -25,7 +25,7 @@ func InitializeQueue(sto *Storage) error { func OpenQueue(sto *Storage) *Queue { return &Queue{ - sto.NoCacheCopy(), + sto.WithoutCache(), sto.OpenStorageBackedUint64(0), sto.OpenStorageBackedUint64(1), } diff --git a/arbos/storage/storage.go b/arbos/storage/storage.go index 66f3d49473..47ca66445d 100644 --- a/arbos/storage/storage.go +++ b/arbos/storage/storage.go @@ -91,11 +91,11 @@ func NewMemoryBackedStateDB() vm.StateDB { // a page, to preserve contiguity within a page. This will reduce cost if/when Ethereum switches to storage // representations that reward contiguity. // Because page numbers are 248 bits, this gives us 124-bit security against collision attacks, which is good enough. -func (store *Storage) mapAddress(key common.Hash) common.Hash { +func (s *Storage) mapAddress(key common.Hash) common.Hash { keyBytes := key.Bytes() boundary := common.HashLength - 1 mapped := make([]byte, 0, common.HashLength) - mapped = append(mapped, store.cachedKeccak(store.storageKey, keyBytes[:boundary])[:boundary]...) + mapped = append(mapped, s.cachedKeccak(s.storageKey, keyBytes[:boundary])[:boundary]...) mapped = append(mapped, keyBytes[boundary]) return common.BytesToHash(mapped) } @@ -107,132 +107,139 @@ func writeCost(value common.Hash) uint64 { return StorageWriteCost } -func (store *Storage) Account() common.Address { - return store.account +func (s *Storage) Account() common.Address { + return s.account } -func (store *Storage) Get(key common.Hash) (common.Hash, error) { - err := store.burner.Burn(StorageReadCost) +func (s *Storage) Get(key common.Hash) (common.Hash, error) { + err := s.burner.Burn(StorageReadCost) if err != nil { return common.Hash{}, err } - if info := store.burner.TracingInfo(); info != nil { + if info := s.burner.TracingInfo(); info != nil { info.RecordStorageGet(key) } - return store.db.GetState(store.account, store.mapAddress(key)), nil + return s.db.GetState(s.account, s.mapAddress(key)), nil } -func (store *Storage) GetStorageSlot(key common.Hash) common.Hash { - return store.mapAddress(key) +func (s *Storage) GetStorageSlot(key common.Hash) common.Hash { + return s.mapAddress(key) } -func (store *Storage) GetUint64(key common.Hash) (uint64, error) { - value, err := store.Get(key) +func (s *Storage) GetUint64(key common.Hash) (uint64, error) { + value, err := s.Get(key) return value.Big().Uint64(), err } -func (store *Storage) GetByUint64(key uint64) (common.Hash, error) { - return store.Get(util.UintToHash(key)) +func (s *Storage) GetByUint64(key uint64) (common.Hash, error) { + return s.Get(util.UintToHash(key)) } -func (store *Storage) GetUint64ByUint64(key uint64) (uint64, error) { - return store.GetUint64(util.UintToHash(key)) +func (s *Storage) GetUint64ByUint64(key uint64) (uint64, error) { + return s.GetUint64(util.UintToHash(key)) } -func (store *Storage) Set(key common.Hash, value common.Hash) error { - if store.burner.ReadOnly() { +func (s *Storage) Set(key common.Hash, value common.Hash) error { + if s.burner.ReadOnly() { log.Error("Read-only burner attempted to mutate state", "key", key, "value", value) return vm.ErrWriteProtection } - err := store.burner.Burn(writeCost(value)) + err := s.burner.Burn(writeCost(value)) if err != nil { return err } - if info := store.burner.TracingInfo(); info != nil { + if info := s.burner.TracingInfo(); info != nil { info.RecordStorageSet(key, value) } - store.db.SetState(store.account, store.mapAddress(key), value) + s.db.SetState(s.account, s.mapAddress(key), value) return nil } -func (store *Storage) SetByUint64(key uint64, value common.Hash) error { - return store.Set(util.UintToHash(key), value) +func (s *Storage) SetByUint64(key uint64, value common.Hash) error { + return s.Set(util.UintToHash(key), value) } -func (store *Storage) SetUint64ByUint64(key uint64, value uint64) error { - return store.Set(util.UintToHash(key), util.UintToHash(value)) +func (s *Storage) SetUint64ByUint64(key uint64, value uint64) error { + return s.Set(util.UintToHash(key), util.UintToHash(value)) } -func (store *Storage) Clear(key common.Hash) error { - return store.Set(key, common.Hash{}) +func (s *Storage) Clear(key common.Hash) error { + return s.Set(key, common.Hash{}) } -func (store *Storage) ClearByUint64(key uint64) error { - return store.Set(util.UintToHash(key), common.Hash{}) +func (s *Storage) ClearByUint64(key uint64) error { + return s.Set(util.UintToHash(key), common.Hash{}) } -func (store *Storage) Swap(key common.Hash, newValue common.Hash) (common.Hash, error) { - oldValue, err := store.Get(key) +func (s *Storage) Swap(key common.Hash, newValue common.Hash) (common.Hash, error) { + oldValue, err := s.Get(key) if err != nil { return common.Hash{}, err } - return oldValue, store.Set(key, newValue) + return oldValue, s.Set(key, newValue) } -func (store *Storage) OpenSubStorage(id []byte, cacheKeys bool) *Storage { - var hashCache *containers.SafeLruCache[string, []byte] - if cacheKeys { - hashCache = storageHashCache +func (s *Storage) OpenCachedSubStorage(id []byte) *Storage { + return &Storage{ + account: s.account, + db: s.db, + storageKey: s.cachedKeccak(s.storageKey, id), + burner: s.burner, + hashCache: storageHashCache, } +} +func (s *Storage) OpenSubStorage(id []byte) *Storage { return &Storage{ - store.account, - store.db, - store.cachedKeccak(store.storageKey, id), - store.burner, - hashCache, + account: s.account, + db: s.db, + storageKey: s.cachedKeccak(s.storageKey, id), + burner: s.burner, + hashCache: nil, } } -func (store *Storage) NoCacheCopy() *Storage { +// Returns shallow copy of Storage that won't use storage key hash cache. +// The storage space represented by the returned Storage is kept the same. +func (s *Storage) WithoutCache() *Storage { return &Storage{ - store.account, - store.db, - store.storageKey, - store.burner, - nil, + account: s.account, + db: s.db, + storageKey: s.storageKey, + burner: s.burner, + hashCache: nil, } } -func (store *Storage) SetBytes(b []byte) error { - err := store.ClearBytes() +func (s *Storage) SetBytes(b []byte) error { + err := s.ClearBytes() if err != nil { return err } - err = store.SetUint64ByUint64(0, uint64(len(b))) + err = s.SetUint64ByUint64(0, uint64(len(b))) if err != nil { return err } offset := uint64(1) for len(b) >= 32 { - err = store.SetByUint64(offset, common.BytesToHash(b[:32])) + err = s.SetByUint64(offset, common.BytesToHash(b[:32])) if err != nil { return err } b = b[32:] offset++ } - return store.SetByUint64(offset, common.BytesToHash(b)) + return s.SetByUint64(offset, common.BytesToHash(b)) } -func (store *Storage) GetBytes() ([]byte, error) { - bytesLeft, err := store.GetUint64ByUint64(0) +func (s *Storage) GetBytes() ([]byte, error) { + bytesLeft, err := s.GetUint64ByUint64(0) if err != nil { return nil, err } ret := []byte{} offset := uint64(1) for bytesLeft >= 32 { - next, err := store.GetByUint64(offset) + next, err := s.GetByUint64(offset) if err != nil { return nil, err } @@ -240,7 +247,7 @@ func (store *Storage) GetBytes() ([]byte, error) { bytesLeft -= 32 offset++ } - next, err := store.GetByUint64(offset) + next, err := s.GetByUint64(offset) if err != nil { return nil, err } @@ -248,18 +255,18 @@ func (store *Storage) GetBytes() ([]byte, error) { return ret, nil } -func (store *Storage) GetBytesSize() (uint64, error) { - return store.GetUint64ByUint64(0) +func (s *Storage) GetBytesSize() (uint64, error) { + return s.GetUint64ByUint64(0) } -func (store *Storage) ClearBytes() error { - bytesLeft, err := store.GetUint64ByUint64(0) +func (s *Storage) ClearBytes() error { + bytesLeft, err := s.GetUint64ByUint64(0) if err != nil { return err } offset := uint64(1) for bytesLeft > 0 { - err := store.ClearByUint64(offset) + err := s.ClearByUint64(offset) if err != nil { return err } @@ -270,48 +277,51 @@ func (store *Storage) ClearBytes() error { bytesLeft -= 32 } } - return store.ClearByUint64(0) + return s.ClearByUint64(0) } -func (store *Storage) Burner() burn.Burner { - return store.burner // not public because these should never be changed once set +func (s *Storage) Burner() burn.Burner { + return s.burner // not public because these should never be changed once set } -func (store *Storage) Keccak(data ...[]byte) ([]byte, error) { +func (s *Storage) Keccak(data ...[]byte) ([]byte, error) { byteCount := 0 for _, part := range data { byteCount += len(part) } cost := 30 + 6*arbmath.WordsForBytes(uint64(byteCount)) - if err := store.burner.Burn(cost); err != nil { + if err := s.burner.Burn(cost); err != nil { return nil, err } return crypto.Keccak256(data...), nil } +func (s *Storage) KeccakHash(data ...[]byte) (common.Hash, error) { + bytes, err := s.Keccak(data...) + return common.BytesToHash(bytes), err +} + +// Returns crypto.Keccak256 result for the given data +// If available the result is taken from hash cache +// otherwise crypto.Keccak256 is executed and its result is added to the cache and returned +// note: the method doesn't burn gas, as it's only intended for generating storage subspace keys and mapping slot addresses // note: returned slice is not thread-safe -func (store *Storage) cachedKeccak(data ...[]byte) []byte { - if store.hashCache == nil { +func (s *Storage) cachedKeccak(data ...[]byte) []byte { + if s.hashCache == nil { return crypto.Keccak256(data...) } keyString := string(bytes.Join(data, []byte{})) - hash, wasCached := store.hashCache.Get(keyString) - if wasCached { + if hash, wasCached := s.hashCache.Get(keyString); wasCached { return hash } - hash = crypto.Keccak256(data...) - evicted := store.hashCache.Add(keyString, hash) + hash := crypto.Keccak256(data...) + evicted := s.hashCache.Add(keyString, hash) if evicted && cacheFullLogged.CompareAndSwap(false, true) { log.Warn("Hash cache full, we didn't expect that. Some non-static storage keys may fill up the cache.") } return hash } -func (store *Storage) KeccakHash(data ...[]byte) (common.Hash, error) { - bytes, err := store.Keccak(data...) - return common.BytesToHash(bytes), err -} - type StorageSlot struct { account common.Address db vm.StateDB @@ -319,8 +329,8 @@ type StorageSlot struct { burner burn.Burner } -func (store *Storage) NewSlot(offset uint64) StorageSlot { - return StorageSlot{store.account, store.db, store.mapAddress(util.UintToHash(offset)), store.burner} +func (s *Storage) NewSlot(offset uint64) StorageSlot { + return StorageSlot{s.account, s.db, s.mapAddress(util.UintToHash(offset)), s.burner} } func (ss *StorageSlot) Get() (common.Hash, error) { @@ -359,8 +369,8 @@ type StorageBackedInt64 struct { StorageSlot } -func (store *Storage) OpenStorageBackedInt64(offset uint64) StorageBackedInt64 { - return StorageBackedInt64{store.NewSlot(offset)} +func (s *Storage) OpenStorageBackedInt64(offset uint64) StorageBackedInt64 { + return StorageBackedInt64{s.NewSlot(offset)} } func (sbu *StorageBackedInt64) Get() (int64, error) { @@ -380,8 +390,8 @@ type StorageBackedBips struct { backing StorageBackedInt64 } -func (store *Storage) OpenStorageBackedBips(offset uint64) StorageBackedBips { - return StorageBackedBips{StorageBackedInt64{store.NewSlot(offset)}} +func (s *Storage) OpenStorageBackedBips(offset uint64) StorageBackedBips { + return StorageBackedBips{StorageBackedInt64{s.NewSlot(offset)}} } func (sbu *StorageBackedBips) Get() (arbmath.Bips, error) { @@ -397,8 +407,8 @@ type StorageBackedUint64 struct { StorageSlot } -func (store *Storage) OpenStorageBackedUint64(offset uint64) StorageBackedUint64 { - return StorageBackedUint64{store.NewSlot(offset)} +func (s *Storage) OpenStorageBackedUint64(offset uint64) StorageBackedUint64 { + return StorageBackedUint64{s.NewSlot(offset)} } func (sbu *StorageBackedUint64) Get() (uint64, error) { @@ -485,8 +495,8 @@ type StorageBackedBigUint struct { StorageSlot } -func (store *Storage) OpenStorageBackedBigUint(offset uint64) StorageBackedBigUint { - return StorageBackedBigUint{store.NewSlot(offset)} +func (s *Storage) OpenStorageBackedBigUint(offset uint64) StorageBackedBigUint { + return StorageBackedBigUint{s.NewSlot(offset)} } func (sbbu *StorageBackedBigUint) Get() (*big.Int, error) { @@ -524,8 +534,8 @@ type StorageBackedBigInt struct { StorageSlot } -func (store *Storage) OpenStorageBackedBigInt(offset uint64) StorageBackedBigInt { - return StorageBackedBigInt{store.NewSlot(offset)} +func (s *Storage) OpenStorageBackedBigInt(offset uint64) StorageBackedBigInt { + return StorageBackedBigInt{s.NewSlot(offset)} } func (sbbi *StorageBackedBigInt) Get() (*big.Int, error) { @@ -581,8 +591,8 @@ type StorageBackedAddress struct { StorageSlot } -func (store *Storage) OpenStorageBackedAddress(offset uint64) StorageBackedAddress { - return StorageBackedAddress{store.NewSlot(offset)} +func (s *Storage) OpenStorageBackedAddress(offset uint64) StorageBackedAddress { + return StorageBackedAddress{s.NewSlot(offset)} } func (sba *StorageBackedAddress) Get() (common.Address, error) { @@ -604,8 +614,8 @@ func init() { NilAddressRepresentation = common.BigToHash(new(big.Int).Lsh(big.NewInt(1), 255)) } -func (store *Storage) OpenStorageBackedAddressOrNil(offset uint64) StorageBackedAddressOrNil { - return StorageBackedAddressOrNil{store.NewSlot(offset)} +func (s *Storage) OpenStorageBackedAddressOrNil(offset uint64) StorageBackedAddressOrNil { + return StorageBackedAddressOrNil{s.NewSlot(offset)} } func (sba *StorageBackedAddressOrNil) Get() (*common.Address, error) { @@ -629,9 +639,9 @@ type StorageBackedBytes struct { Storage } -func (store *Storage) OpenStorageBackedBytes(id []byte) StorageBackedBytes { +func (s *Storage) OpenStorageBackedBytes(id []byte) StorageBackedBytes { return StorageBackedBytes{ - *store.OpenSubStorage(id, false), + *s.OpenSubStorage(id), } } From eeb327c3b7d3e81ff057f58e0fb366739610dee7 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Thu, 12 Oct 2023 23:45:59 +0200 Subject: [PATCH 14/64] replace SafeLruCache with lru.Cache from geth --- arbos/storage/storage.go | 6 +-- util/containers/safe_lru.go | 82 ------------------------------------- 2 files changed, 3 insertions(+), 85 deletions(-) delete mode 100644 util/containers/safe_lru.go diff --git a/arbos/storage/storage.go b/arbos/storage/storage.go index 47ca66445d..63987b91f8 100644 --- a/arbos/storage/storage.go +++ b/arbos/storage/storage.go @@ -10,6 +10,7 @@ import ( "sync/atomic" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/lru" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/vm" @@ -19,7 +20,6 @@ import ( "github.com/offchainlabs/nitro/arbos/burn" "github.com/offchainlabs/nitro/arbos/util" "github.com/offchainlabs/nitro/util/arbmath" - "github.com/offchainlabs/nitro/util/containers" ) // Storage allows ArbOS to store data persistently in the Ethereum-compatible stateDB. This is represented in @@ -46,7 +46,7 @@ type Storage struct { db vm.StateDB storageKey []byte burner burn.Burner - hashCache *containers.SafeLruCache[string, []byte] + hashCache *lru.Cache[string, []byte] } const StorageReadCost = params.SloadGasEIP2200 @@ -55,7 +55,7 @@ const StorageWriteZeroCost = params.SstoreResetGasEIP2200 const storageKeyCacheSize = 1024 -var storageHashCache = containers.NewSafeLruCache[string, []byte](storageKeyCacheSize) +var storageHashCache = lru.NewCache[string, []byte](storageKeyCacheSize) var cacheFullLogged atomic.Bool // NewGeth uses a Geth database to create an evm key-value store diff --git a/util/containers/safe_lru.go b/util/containers/safe_lru.go deleted file mode 100644 index 40e7d993ec..0000000000 --- a/util/containers/safe_lru.go +++ /dev/null @@ -1,82 +0,0 @@ -package containers - -import ( - "sync" -) - -// thread safe version of containers.LruCache -type SafeLruCache[K comparable, V any] struct { - inner *LruCache[K, V] - mutex sync.RWMutex -} - -func NewSafeLruCache[K comparable, V any](size int) *SafeLruCache[K, V] { - return NewSafeLruCacheWithOnEvict[K, V](size, nil) -} - -func NewSafeLruCacheWithOnEvict[K comparable, V any](size int, onEvict func(K, V)) *SafeLruCache[K, V] { - return &SafeLruCache[K, V]{ - inner: NewLruCacheWithOnEvict(size, onEvict), - } -} - -// Returns true if an item was evicted -func (c *SafeLruCache[K, V]) Add(key K, value V) bool { - c.mutex.Lock() - defer c.mutex.Unlock() - return c.inner.Add(key, value) -} - -func (c *SafeLruCache[K, V]) Get(key K) (V, bool) { - c.mutex.Lock() - defer c.mutex.Unlock() - return c.inner.Get(key) -} - -func (c *SafeLruCache[K, V]) Contains(key K) bool { - c.mutex.RLock() - defer c.mutex.RUnlock() - return c.inner.Contains(key) -} - -func (c *SafeLruCache[K, V]) Remove(key K) { - c.mutex.Lock() - defer c.mutex.Unlock() - c.inner.Remove(key) -} - -func (c *SafeLruCache[K, V]) GetOldest() (K, V, bool) { - c.mutex.RLock() - defer c.mutex.RUnlock() - return c.inner.GetOldest() -} - -func (c *SafeLruCache[K, V]) RemoveOldest() { - c.mutex.Lock() - defer c.mutex.Unlock() - c.inner.RemoveOldest() -} - -func (c *SafeLruCache[K, V]) Len() int { - c.mutex.RLock() - defer c.mutex.RUnlock() - return c.inner.Len() -} - -func (c *SafeLruCache[K, V]) Size() int { - c.mutex.RLock() - defer c.mutex.RUnlock() - return c.inner.Size() -} - -func (c *SafeLruCache[K, V]) Clear() { - c.mutex.Lock() - defer c.mutex.Unlock() - c.inner.Clear() -} - -func (c *SafeLruCache[K, V]) Resize(newSize int) { - c.mutex.Lock() - defer c.mutex.Unlock() - c.inner.Resize(newSize) -} From 6f1c79fca98a5a2eec1c9302f4bb751c2599064f Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Fri, 13 Oct 2023 00:20:15 +0200 Subject: [PATCH 15/64] add todos for renaming packages --- arbos/addressSet/addressSet.go | 2 ++ arbos/addressTable/addressTable.go | 2 ++ 2 files changed, 4 insertions(+) diff --git a/arbos/addressSet/addressSet.go b/arbos/addressSet/addressSet.go index 50a43c36cf..6d2db26941 100644 --- a/arbos/addressSet/addressSet.go +++ b/arbos/addressSet/addressSet.go @@ -3,6 +3,8 @@ package addressSet +// TODO rename this package to lowercase + import ( "errors" diff --git a/arbos/addressTable/addressTable.go b/arbos/addressTable/addressTable.go index f44e7f3dcf..bfc526d39d 100644 --- a/arbos/addressTable/addressTable.go +++ b/arbos/addressTable/addressTable.go @@ -3,6 +3,8 @@ package addressTable +// TODO rename this package to lowercase + import ( "bytes" "errors" From 9ec92bc55121f2c54571268b567a9249d5680060 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Fri, 13 Oct 2023 00:22:01 +0200 Subject: [PATCH 16/64] rephrase todos --- arbos/addressSet/addressSet.go | 2 +- arbos/addressTable/addressTable.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arbos/addressSet/addressSet.go b/arbos/addressSet/addressSet.go index 6d2db26941..1f09ff1440 100644 --- a/arbos/addressSet/addressSet.go +++ b/arbos/addressSet/addressSet.go @@ -3,7 +3,7 @@ package addressSet -// TODO rename this package to lowercase +// TODO lowercase this package name import ( "errors" diff --git a/arbos/addressTable/addressTable.go b/arbos/addressTable/addressTable.go index bfc526d39d..3fbb7b3782 100644 --- a/arbos/addressTable/addressTable.go +++ b/arbos/addressTable/addressTable.go @@ -3,7 +3,7 @@ package addressTable -// TODO rename this package to lowercase +// TODO lowercase this package name import ( "bytes" From e7f1155548699efef9c4c0f8282336355aac9521 Mon Sep 17 00:00:00 2001 From: Goran Vladika Date: Fri, 13 Oct 2023 10:38:09 +0200 Subject: [PATCH 17/64] Use develop branch --- .gitmodules | 2 +- contracts | 2 +- nitro-testnode | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.gitmodules b/.gitmodules index 75e66a648c..977f7a4772 100644 --- a/.gitmodules +++ b/.gitmodules @@ -13,7 +13,7 @@ [submodule "contracts"] path = contracts url = https://github.com/OffchainLabs/nitro-contracts.git - branch = feature-orbit-bridge + branch = develop [submodule "arbitrator/wasm-testsuite/testsuite"] path = arbitrator/wasm-testsuite/testsuite url = https://github.com/WebAssembly/testsuite.git diff --git a/contracts b/contracts index 7dc1aa4382..46d1767ce2 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 7dc1aa43829d9f4afe7251ad28d96a5d0e1d48c7 +Subproject commit 46d1767ce2181c501f6b1f8651d6f1ace3da1c41 diff --git a/nitro-testnode b/nitro-testnode index 441166624c..82a072dde9 160000 --- a/nitro-testnode +++ b/nitro-testnode @@ -1 +1 @@ -Subproject commit 441166624c54857a11c0a880f12e19d7dbeebc69 +Subproject commit 82a072dde9f82ac57bd9ff4306dae7c36c980dba From 1539963fd93443ed37397397818ccd3c46ebe59b Mon Sep 17 00:00:00 2001 From: Goran Vladika Date: Fri, 13 Oct 2023 15:13:26 +0200 Subject: [PATCH 18/64] Fix maxDataSize leftovers --- arbnode/node.go | 20 ++++++++++---------- system_tests/common_test.go | 1 + system_tests/full_challenge_impl_test.go | 1 + 3 files changed, 12 insertions(+), 10 deletions(-) diff --git a/arbnode/node.go b/arbnode/node.go index ea09aab253..675aca3361 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -98,11 +98,11 @@ func deployBridgeCreator(ctx context.Context, l1Reader *headerreader.HeaderReade } ethBasedTemplates := rollupgen.BridgeCreatorBridgeContracts{ - Bridge: bridgeTemplate, - SequencerInbox: seqInboxTemplate, - Inbox: inboxTemplate, - RollupEventInbox:rollupEventBridgeTemplate, - Outbox: outboxTemplate, + Bridge: bridgeTemplate, + SequencerInbox: seqInboxTemplate, + Inbox: inboxTemplate, + RollupEventInbox: rollupEventBridgeTemplate, + Outbox: outboxTemplate, } /// deploy ERC20 based templates @@ -131,11 +131,11 @@ func deployBridgeCreator(ctx context.Context, l1Reader *headerreader.HeaderReade } erc20BasedTemplates := rollupgen.BridgeCreatorBridgeContracts{ - Bridge: erc20BridgeTemplate, - SequencerInbox: seqInboxTemplate, - Inbox: erc20InboxTemplate, - RollupEventInbox:erc20RollupEventBridgeTemplate, - Outbox: erc20OutboxTemplate, + Bridge: erc20BridgeTemplate, + SequencerInbox: seqInboxTemplate, + Inbox: erc20InboxTemplate, + RollupEventInbox: erc20RollupEventBridgeTemplate, + Outbox: erc20OutboxTemplate, } bridgeCreatorAddr, tx, _, err := rollupgen.DeployBridgeCreator(auth, client, ethBasedTemplates, erc20BasedTemplates) diff --git a/system_tests/common_test.go b/system_tests/common_test.go index b3965c5c85..525dada685 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -490,6 +490,7 @@ func DeployOnTestL1( 0, arbnode.GenerateRollupConfig(false, locator.LatestWasmModuleRoot(), l1info.GetAddress("RollupOwner"), chainConfig, serializedChainConfig, common.Address{}), common.Address{}, + big.NewInt(117964), ) Require(t, err) l1info.SetContract("Bridge", addresses.Bridge) diff --git a/system_tests/full_challenge_impl_test.go b/system_tests/full_challenge_impl_test.go index 99064d1913..f22960c39d 100644 --- a/system_tests/full_challenge_impl_test.go +++ b/system_tests/full_challenge_impl_test.go @@ -207,6 +207,7 @@ func setupSequencerInboxStub(ctx context.Context, t *testing.T, l1Info *Blockcha bridgeAddr, l1Info.GetAddress("sequencer"), timeBounds, + big.NewInt(117964), ) Require(t, err) _, err = EnsureTxSucceeded(ctx, l1Client, tx) From 41b9517ec40c1a72e130dbe53940952910a14283 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Fri, 13 Oct 2023 18:21:29 +0200 Subject: [PATCH 19/64] add comment to skipped error checks --- arbos/retryables/retryable.go | 1 + 1 file changed, 1 insertion(+) diff --git a/arbos/retryables/retryable.go b/arbos/retryables/retryable.go index 1121de01f4..6984e41904 100644 --- a/arbos/retryables/retryable.go +++ b/arbos/retryables/retryable.go @@ -150,6 +150,7 @@ func (rs *RetryableState) DeleteRetryable(id common.Hash, evm *vm.EVM, scenario return false, err } + // we ignore returned error as we expect that if one ClearByUint64 fails, than all consecutive calls to ClearByUint64 will fail with the same error (not modifying state), and then ClearBytes will also fail with the same error (also not modifying state) - and this one we check and return _ = retStorage.ClearByUint64(numTriesOffset) _ = retStorage.ClearByUint64(fromOffset) _ = retStorage.ClearByUint64(toOffset) From df6f73e1b9c34a2fe5b272a4259ddbe63020c34a Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Mon, 16 Oct 2023 09:38:51 -0500 Subject: [PATCH 20/64] test failure --- .github/workflows/waitForNitro.sh | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/.github/workflows/waitForNitro.sh b/.github/workflows/waitForNitro.sh index e196b38d88..422083e38e 100755 --- a/.github/workflows/waitForNitro.sh +++ b/.github/workflows/waitForNitro.sh @@ -1,10 +1,14 @@ -# poll the nitro endpoint until we get a 0 return code -while true -do +# poll the nitro endpoint until we get a 0 return code or 30mins have passed, in that case exit 1 +start_time=$(date +%s) +timeout=20 + +while (( $(date +%s) - start_time <= timeout )); do curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":45678,"method":"eth_chainId","params":[]}' 'http://localhost:8547' if [ "$?" -eq "0" ]; then exit 0 else sleep 20 fi -done \ No newline at end of file +done + +exit 1 \ No newline at end of file From 82663481ad99b6ea6eef6440aed735e3cdedd243 Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Mon, 16 Oct 2023 09:58:24 -0500 Subject: [PATCH 21/64] test success --- .github/workflows/waitForNitro.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/waitForNitro.sh b/.github/workflows/waitForNitro.sh index 422083e38e..a1b7f2ad0f 100755 --- a/.github/workflows/waitForNitro.sh +++ b/.github/workflows/waitForNitro.sh @@ -1,6 +1,6 @@ # poll the nitro endpoint until we get a 0 return code or 30mins have passed, in that case exit 1 start_time=$(date +%s) -timeout=20 +timeout=1800 while (( $(date +%s) - start_time <= timeout )); do curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":45678,"method":"eth_chainId","params":[]}' 'http://localhost:8547' From 0bb425a4de6139d17c55ba0824770f3ae350ccc3 Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Mon, 16 Oct 2023 17:20:37 +0200 Subject: [PATCH 22/64] Add client certificate, require to verify client cert --- arbnode/dataposter/data_poster.go | 33 ++++++++++++---- arbnode/dataposter/dataposter_test.go | 31 ++++++++++++--- arbnode/dataposter/testdata/client.cnf | 52 ++++++++++++++++++++++++++ arbnode/dataposter/testdata/client.crt | 28 ++++++++++++++ arbnode/dataposter/testdata/client.key | 28 ++++++++++++++ 5 files changed, 159 insertions(+), 13 deletions(-) create mode 100644 arbnode/dataposter/testdata/client.cnf create mode 100644 arbnode/dataposter/testdata/client.crt create mode 100644 arbnode/dataposter/testdata/client.key diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 56e87e3b29..02a3548660 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -175,21 +175,32 @@ func NewDataPoster(ctx context.Context, opts *DataPosterOpts) (*DataPoster, erro } func rpcClient(ctx context.Context, opts *ExternalSignerCfg) (*rpc.Client, error) { - rootCrt, err := os.ReadFile(opts.RootCA) + clientCert, err := tls.LoadX509KeyPair(opts.ClientCert, opts.ClientPrivateKey) if err != nil { - return nil, fmt.Errorf("error reading external signer root CA: %w", err) + return nil, fmt.Errorf("error loading client certificate and private key: %w", err) } - pool := x509.NewCertPool() - pool.AppendCertsFromPEM(rootCrt) + + tlsCfg := &tls.Config{ + MinVersion: tls.VersionTLS12, + Certificates: []tls.Certificate{clientCert}, + } + if opts.RootCA != "" { + rootCrt, err := os.ReadFile(opts.RootCA) + if err != nil { + return nil, fmt.Errorf("error reading external signer root CA: %w", err) + } + rootCertPool := x509.NewCertPool() + rootCertPool.AppendCertsFromPEM(rootCrt) + tlsCfg.RootCAs = rootCertPool + } + return rpc.DialOptions( ctx, opts.URL, rpc.WithHTTPClient( &http.Client{ Transport: &http.Transport{ - TLSClientConfig: &tls.Config{ - RootCAs: pool, - }, + TLSClientConfig: tlsCfg, }, }, ), @@ -742,9 +753,13 @@ type ExternalSignerCfg struct { Address string `koanf:"address"` // API method name (e.g. eth_signTransaction). Method string `koanf:"method"` - // Path to the external signer root CA certificate. + // (Optional) Path to the external signer root CA certificate. // This allows us to use self-signed certificats on the external signer. RootCA string `koanf:"root-ca"` + // Client certificate for mtls. + ClientCert string `koanf:"client-cert"` + // Client certificate key for mtls. + ClientPrivateKey string `koanf:"client-private-key"` } type DangerousConfig struct { @@ -787,6 +802,8 @@ func addExternalSignerOptions(prefix string, f *pflag.FlagSet) { f.String(prefix+".address", DefaultDataPosterConfig.ExternalSigner.Address, "external signer address") f.String(prefix+".method", DefaultDataPosterConfig.ExternalSigner.Method, "external signer method") f.String(prefix+".root-ca", DefaultDataPosterConfig.ExternalSigner.RootCA, "external signer root CA") + f.String(prefix+".client-cert", DefaultDataPosterConfig.ExternalSigner.ClientCert, "rpc client cert") + f.String(prefix+".client-private-key", DefaultDataPosterConfig.ExternalSigner.ClientPrivateKey, "rpc client private key") } var DefaultDataPosterConfig = DataPosterConfig{ diff --git a/arbnode/dataposter/dataposter_test.go b/arbnode/dataposter/dataposter_test.go index 0b3b8cba5c..d21390bccd 100644 --- a/arbnode/dataposter/dataposter_test.go +++ b/arbnode/dataposter/dataposter_test.go @@ -2,6 +2,8 @@ package dataposter import ( "context" + "crypto/tls" + "crypto/x509" "encoding/json" "fmt" "io" @@ -74,10 +76,12 @@ func TestExternalSigner(t *testing.T) { }() signer, addr, err := externalSigner(ctx, &ExternalSignerCfg{ - Address: srv.address.Hex(), - URL: "https://localhost:1234", - Method: "test_signTransaction", - RootCA: cert, + Address: srv.address.Hex(), + URL: "https://localhost:1234", + Method: "test_signTransaction", + RootCA: cert, + ClientCert: "./testdata/client.crt", + ClientPrivateKey: "./testdata/client.key", }) if err != nil { t.Fatalf("Error getting external signer: %v", err) @@ -129,7 +133,24 @@ func newServer(ctx context.Context, t *testing.T) (*http.Server, *server) { "test_signTransaction": s.signTransaction, } m := http.NewServeMux() - httpSrv := &http.Server{Addr: ":1234", Handler: m, ReadTimeout: 5 * time.Second} + + clientCert, err := os.ReadFile("./testdata/client.crt") + if err != nil { + panic(err) + } + pool := x509.NewCertPool() + pool.AppendCertsFromPEM(clientCert) + + httpSrv := &http.Server{ + Addr: ":1234", + Handler: m, + ReadTimeout: 5 * time.Second, + TLSConfig: &tls.Config{ + MinVersion: tls.VersionTLS12, + ClientAuth: tls.RequireAndVerifyClientCert, + ClientCAs: pool, + }, + } m.HandleFunc("/", s.mux) return httpSrv, s } diff --git a/arbnode/dataposter/testdata/client.cnf b/arbnode/dataposter/testdata/client.cnf new file mode 100644 index 0000000000..8c15cc3dbc --- /dev/null +++ b/arbnode/dataposter/testdata/client.cnf @@ -0,0 +1,52 @@ +[req] +default_bits = 2048 +default_keyfile = server-key.pem +distinguished_name = subject +req_extensions = req_ext +x509_extensions = x509_ext +string_mask = utf8only + +[subject] +countryName = CH +countryName_default = CH + +stateOrProvinceName = Zurich +stateOrProvinceName_default = ZH + +localityName = city +localityName_default = Zurich + +organizationName = Offchain Labs +organizationName_default = Offchain Labs + +commonName = offchainlabs.ch +commonName_default = localhost + +emailAddress = Email Address +emailAddress_default = notabigdeal@offchainlabs.ch + +[x509_ext] +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid,issuer + +basicConstraints = CA:FALSE +keyUsage = digitalSignature, keyEncipherment +subjectAltName = @alternate_names +nsComment = "OpenSSL Generated Certificate" + +[req_ext] +subjectKeyIdentifier = hash + +basicConstraints = CA:FALSE +keyUsage = digitalSignature, keyEncipherment +subjectAltName = @alternate_names +nsComment = "OpenSSL Generated Certificate" + +[alternate_names] +DNS.1 = localhost +DNS.2 = 127.0.0.1 + +[alternate_names] +DNS.1 = localhost +DNS.2 = 127.0.0.1 + diff --git a/arbnode/dataposter/testdata/client.crt b/arbnode/dataposter/testdata/client.crt new file mode 100644 index 0000000000..3d494be820 --- /dev/null +++ b/arbnode/dataposter/testdata/client.crt @@ -0,0 +1,28 @@ +-----BEGIN CERTIFICATE----- +MIIE0jCCA7qgAwIBAgIUPaBB3/hHMpZfGB3VOw1+mHG4LnUwDQYJKoZIhvcNAQEL +BQAwgYMxCzAJBgNVBAYTAkNIMQswCQYDVQQIDAJaSDEPMA0GA1UEBwwGWnVyaWNo +MRYwFAYDVQQKDA1PZmZjaGFpbiBMYWJzMRIwEAYDVQQDDAlsb2NhbGhvc3QxKjAo +BgkqhkiG9w0BCQEWG25vdGFiaWdkZWFsQG9mZmNoYWlubGFicy5jaDAeFw0yMzEw +MTYxNDU2MjhaFw0yNDEwMTUxNDU2MjhaMIGDMQswCQYDVQQGEwJDSDELMAkGA1UE +CAwCWkgxDzANBgNVBAcMBlp1cmljaDEWMBQGA1UECgwNT2ZmY2hhaW4gTGFiczES +MBAGA1UEAwwJbG9jYWxob3N0MSowKAYJKoZIhvcNAQkBFhtub3RhYmlnZGVhbEBv +ZmZjaGFpbmxhYnMuY2gwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC1 +1asfUzv07QTVwlM4o3g51ilIFEApPkpdQej/GIItLEVRQW+GI9jYuEM07wdwMhSH +JPFNbZB3dmBuqDLx13hY03ufyeY+nab0/sO6x13kXChvIqgPRyJtkEAoYkMM3W0D +S6HeL/6DFoTQ2xAlZb/7i/9deuUwDL3MNVSjPCm9PjFzSOFgAQQud2uUT7aENGuG +Whw3oXz9gU/8gv3keLzcIa2PHyEW5M7jeGSYMjfW3wr0d+Z5mSNRc/U6kncKi06c +QrMKrgFfF7a5kHgxUL7bRCGgCMemXe7VfrW6oKT11JcLWDKhe+uo6bNXUptek55H +HfQi6x8cbM46/h3riZA3AgMBAAGjggE6MIIBNjAdBgNVHQ4EFgQUQD2BOems0+JQ +br234cW5noMmXRIwga0GA1UdIwSBpTCBoqGBiaSBhjCBgzELMAkGA1UEBhMCQ0gx +CzAJBgNVBAgMAlpIMQ8wDQYDVQQHDAZadXJpY2gxFjAUBgNVBAoMDU9mZmNoYWlu +IExhYnMxEjAQBgNVBAMMCWxvY2FsaG9zdDEqMCgGCSqGSIb3DQEJARYbbm90YWJp +Z2RlYWxAb2ZmY2hhaW5sYWJzLmNoghQ9oEHf+Ecyll8YHdU7DX6YcbgudTAJBgNV +HRMEAjAAMAsGA1UdDwQEAwIFoDAfBgNVHREEGDAWgglsb2NhbGhvc3SCCTEyNy4w +LjAuMTAsBglghkgBhvhCAQ0EHxYdT3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNh +dGUwDQYJKoZIhvcNAQELBQADggEBAF4EVkOZZeMIvv0JViP7NsmIl2ke/935x6Hd +hQiLUw13XHYXzMa5/8Y5fnKjttBODpFoQlwjgI18vzuYzItYMBc2cabQJcpfG+Wq +M3m/wl1TC2XOuHj1E4RA/nU3tslntahtXG+vkks9RN+f9irHUhDRR6AGSnSB2Gi/ +B2OGmXn7S4Qge8+fGHAjN+tlu+tOoEWP6R3if/a9UIe5EGM8QTe4zw6lr+iPrOhC +M94pK5IEWn5IIGhr3zJIYkm/Dp+rFqhV1sqPOjjFLVCA7KJ3jVVVHlcm4Xa/+fyk +CIm7/VAmnbeUNlMbkXNOfQMeku8Iwsu80pvf3kjhU/PgO/5oojk= +-----END CERTIFICATE----- diff --git a/arbnode/dataposter/testdata/client.key b/arbnode/dataposter/testdata/client.key new file mode 100644 index 0000000000..b14941dd9f --- /dev/null +++ b/arbnode/dataposter/testdata/client.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQC11asfUzv07QTV +wlM4o3g51ilIFEApPkpdQej/GIItLEVRQW+GI9jYuEM07wdwMhSHJPFNbZB3dmBu +qDLx13hY03ufyeY+nab0/sO6x13kXChvIqgPRyJtkEAoYkMM3W0DS6HeL/6DFoTQ +2xAlZb/7i/9deuUwDL3MNVSjPCm9PjFzSOFgAQQud2uUT7aENGuGWhw3oXz9gU/8 +gv3keLzcIa2PHyEW5M7jeGSYMjfW3wr0d+Z5mSNRc/U6kncKi06cQrMKrgFfF7a5 +kHgxUL7bRCGgCMemXe7VfrW6oKT11JcLWDKhe+uo6bNXUptek55HHfQi6x8cbM46 +/h3riZA3AgMBAAECggEADUboCYMCpm+LqIhzNCtqswQD6QsiSwCmqs8nuKZGk9ue ++hmZj5IpgMJZLrgvWY4s+PGfgiRR/28QCBrVXkETiZ5zirQFN4tvLlKcSK4xZf29 +FBRUCiPxck36NhiqrBNOi1Mn8BKedl4cESkvSu1cvcmeOh100HPcHfLDVqHx3qsl +D/5yMkT2+zdhtLa+X3nkAa+3aibOvgtyfkV679e20CG6h89N9GBKkTXO8ioLZZVm +84ksnd4FcpTo7ebJJxElEB+ZA4akPHbF6ArUmcpqtGso5GtwqqO2ZlguSn2XQT0d +jqvOG4DwfSXk6SpE/dpWvU92fmxWAxZvGrZNgDyJ2QKBgQDyQ8NN4b80Yza/YXar +LWx8A6B0eMc1dXgt9m3UUI+titt45jEcaXhCX01FRFTznWGmWFtJmcWBoaQVPVel +IcDYQSxEuBUrCeI75ocv/IQtENaiX3TK7Nlz5RHfpQpfDVJq45lpiD38CGkYkAif +9pSzC8aup4W3WR0JJZ1AOHUZaQKBgQDAJNJnaSNzB+eDWTKCIN5V9X3QMkmjsuir +Nf2lBXHYARnlYWAbtYFG12wLJQMTNX5ewVQQrWtsdPkGPpCnPLelUTxMssrsXjej +JlLzYUfzRBqEXMI3AA9bVdiauxId2RTcp2F81SM1keCMcuHYxrzVkBSOC9u3wCnb +Whb6+feInwKBgQCbzgC5AcoaQwReqKvNAvWV/C8hONvFAbs8tBOGTBlbHsZvRnun +Lh1tciUbuwp3cmvuszxiZUakS/RexIitZrvDWIbD2y+h8kVRCL1Am0HWSdH/syxF +pXVkF5obHuVApCyxGZb8S+axRCdy6I7jcY3IaHZqtMpGVEVcMJilSKnmoQKBgQCC +tEmgaMfhhx34nqOaG4vDA4T7LEolnh1h4g9RwztnCZC5FZ1QHA79xqrLhfjqhzgY +cwChe6aYl5WSptq1uLrgLTuMnQ8m7QyB4h8JSkKse8ZiBctjqJnJssLutpSjUzk6 +xG2vgjk6RqpuP/PcB40K5cDlw7FJ9OFEQqthPMsi1wKBgQC0/vv5bY3DQ+wV6gUy +nFoSa/XNHaa8y7jmmlCnWJqs6DAAQQ3VW0tPX03GYL/NDcI+PwzYDHDkSB6Qa/o8 +VzVGK1/kr/+bveNvqmi0vNb54fMFLveGgsY4Cu1cffiw8m6nYJ/V4eCsHfpF1B5L +5HDnt5rFKt1Mi9WsUSRtxipxBA== +-----END PRIVATE KEY----- From f99cfe673e5799404186a60ce254a37d48935397 Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Mon, 16 Oct 2023 10:31:24 -0500 Subject: [PATCH 23/64] refactor --- .github/workflows/waitForNitro.sh | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/.github/workflows/waitForNitro.sh b/.github/workflows/waitForNitro.sh index a1b7f2ad0f..add83e24bc 100755 --- a/.github/workflows/waitForNitro.sh +++ b/.github/workflows/waitForNitro.sh @@ -1,8 +1,7 @@ # poll the nitro endpoint until we get a 0 return code or 30mins have passed, in that case exit 1 -start_time=$(date +%s) -timeout=1800 +timeout_time=$(($(date +%s) + 1800)) -while (( $(date +%s) - start_time <= timeout )); do +while (( $(date +%s) <= timeout_time )); do curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":45678,"method":"eth_chainId","params":[]}' 'http://localhost:8547' if [ "$?" -eq "0" ]; then exit 0 From f5593692885c7401195a6907339ea1d03df95c35 Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Mon, 16 Oct 2023 10:46:04 -0500 Subject: [PATCH 24/64] refactor --- .github/workflows/waitForNitro.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/waitForNitro.sh b/.github/workflows/waitForNitro.sh index add83e24bc..cf3f6484fc 100755 --- a/.github/workflows/waitForNitro.sh +++ b/.github/workflows/waitForNitro.sh @@ -1,9 +1,9 @@ +#!/bin/bash # poll the nitro endpoint until we get a 0 return code or 30mins have passed, in that case exit 1 timeout_time=$(($(date +%s) + 1800)) while (( $(date +%s) <= timeout_time )); do - curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":45678,"method":"eth_chainId","params":[]}' 'http://localhost:8547' - if [ "$?" -eq "0" ]; then + if curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":45678,"method":"eth_chainId","params":[]}' 'http://localhost:8547'; then exit 0 else sleep 20 From 754d9be19aacb8cca479701099930e45b8e1d8e5 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Mon, 16 Oct 2023 17:58:32 +0200 Subject: [PATCH 25/64] add simple storage key caching tests --- arbos/storage/storage_test.go | 75 +++++++++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) diff --git a/arbos/storage/storage_test.go b/arbos/storage/storage_test.go index 35e6b7c4be..a8d424d14e 100644 --- a/arbos/storage/storage_test.go +++ b/arbos/storage/storage_test.go @@ -1,11 +1,16 @@ package storage import ( + "bytes" + "fmt" "math/big" + "math/rand" + "sync" "testing" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/crypto" "github.com/offchainlabs/nitro/arbos/burn" "github.com/offchainlabs/nitro/util/arbmath" ) @@ -91,3 +96,73 @@ func TestStorageBackedBigInt(t *testing.T) { }) } } + +func TestOpenCachedSubStorage(t *testing.T) { + s := NewMemoryBacked(burn.NewSystemBurner(nil, false)) + var subSpaceIDs [][]byte + for i := 0; i < 20; i++ { + subSpaceIDs = append(subSpaceIDs, []byte{byte(rand.Intn(0xff))}) + } + var expectedKeys [][]byte + for _, subSpaceID := range subSpaceIDs { + expectedKeys = append(expectedKeys, crypto.Keccak256(s.storageKey, subSpaceID)) + } + n := len(subSpaceIDs) * 50 + start := make(chan struct{}) + errs := make(chan error, n) + var wg sync.WaitGroup + for i := 0; i < n; i++ { + j := i % len(subSpaceIDs) + subSpaceID, expectedKey := subSpaceIDs[j], expectedKeys[j] + wg.Add(1) + go func() { + defer wg.Done() + <-start + ss := s.OpenCachedSubStorage(subSpaceID) + if !bytes.Equal(ss.storageKey, expectedKey) { + errs <- fmt.Errorf("unexpected storage key, want: %v, have: %v", expectedKey, ss.storageKey) + } + }() + } + close(start) + wg.Wait() + select { + case err := <-errs: + t.Fatal(err) + default: + } +} + +func TestMapAddressCache(t *testing.T) { + s := NewMemoryBacked(burn.NewSystemBurner(nil, false)) + var keys []common.Hash + for i := 0; i < 20; i++ { + keys = append(keys, common.BytesToHash([]byte{byte(rand.Intn(0xff))})) + } + var expectedMapped []common.Hash + for _, key := range keys { + expectedMapped = append(expectedMapped, s.mapAddress(key)) + } + n := len(keys) * 50 + start := make(chan struct{}) + errs := make(chan error, n) + var wg sync.WaitGroup + for i := 0; i < n; i++ { + j := i % len(keys) + key, expected := keys[j], expectedMapped[j] + wg.Add(1) + go func() { + defer wg.Done() + <-start + mapped := s.mapAddress(key) + if !bytes.Equal(mapped.Bytes(), expected.Bytes()) { + errs <- fmt.Errorf("unexpected storage key, want: %v, have: %v", expected, mapped) + } + }() + } + close(start) + wg.Wait() + if len(errs) > 0 { + t.Fatal(<-errs) + } +} From 927a760942846eed49658147adbe62a9427b449a Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Mon, 16 Oct 2023 15:43:49 -0500 Subject: [PATCH 26/64] =?UTF-8?q?Add=20metrics=20to=20time=20each=20part?= =?UTF-8?q?=20of=20WSBroadcastServer=E2=80=99s=20StartWithHeader?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- wsbroadcastserver/wsbroadcastserver.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/wsbroadcastserver/wsbroadcastserver.go b/wsbroadcastserver/wsbroadcastserver.go index cd277387a0..f3e693d5d8 100644 --- a/wsbroadcastserver/wsbroadcastserver.go +++ b/wsbroadcastserver/wsbroadcastserver.go @@ -23,6 +23,7 @@ import ( flag "github.com/spf13/pflag" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" "github.com/offchainlabs/nitro/arbutil" ) @@ -32,6 +33,8 @@ var ( HTTPHeaderFeedClientVersion = textproto.CanonicalMIMEHeaderKey("Arbitrum-Feed-Client-Version") HTTPHeaderRequestedSequenceNumber = textproto.CanonicalMIMEHeaderKey("Arbitrum-Requested-Sequence-Number") HTTPHeaderChainId = textproto.CanonicalMIMEHeaderKey("Arbitrum-Chain-Id") + UpgradeToWSTimer = metrics.NewRegisteredTimer("arb/wsbroadcastserver/wsupgrade/duration", nil) + StartWithHeaderTimer = metrics.NewRegisteredTimer("arb/wsbroadcastserver/startwithheader/duration", nil) ) const ( @@ -205,6 +208,7 @@ func (s *WSBroadcastServer) Start(ctx context.Context) error { } func (s *WSBroadcastServer) StartWithHeader(ctx context.Context, header ws.HandshakeHeader) error { + startTimeMain := time.Now() s.startMutex.Lock() defer s.startMutex.Unlock() if s.started { @@ -316,7 +320,10 @@ func (s *WSBroadcastServer) StartWithHeader(ctx context.Context, header ws.Hands } // Zero-copy upgrade to WebSocket connection. + startTime := time.Now() _, err = upgrader.Upgrade(conn) + elapsed := time.Since(startTime) + UpgradeToWSTimer.Update(elapsed) if err != nil { if err.Error() != "" { @@ -483,6 +490,9 @@ func (s *WSBroadcastServer) StartWithHeader(ctx context.Context, header ws.Hands s.started = true + elapsedMain := time.Since(startTimeMain) + StartWithHeaderTimer.Update(elapsedMain) + return nil } From b49e10369bae00f3b3ef8946355edf2e03fb1209 Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Mon, 16 Oct 2023 15:52:27 -0500 Subject: [PATCH 27/64] refactor --- wsbroadcastserver/wsbroadcastserver.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/wsbroadcastserver/wsbroadcastserver.go b/wsbroadcastserver/wsbroadcastserver.go index f3e693d5d8..43ac27593a 100644 --- a/wsbroadcastserver/wsbroadcastserver.go +++ b/wsbroadcastserver/wsbroadcastserver.go @@ -33,8 +33,8 @@ var ( HTTPHeaderFeedClientVersion = textproto.CanonicalMIMEHeaderKey("Arbitrum-Feed-Client-Version") HTTPHeaderRequestedSequenceNumber = textproto.CanonicalMIMEHeaderKey("Arbitrum-Requested-Sequence-Number") HTTPHeaderChainId = textproto.CanonicalMIMEHeaderKey("Arbitrum-Chain-Id") - UpgradeToWSTimer = metrics.NewRegisteredTimer("arb/wsbroadcastserver/wsupgrade/duration", nil) - StartWithHeaderTimer = metrics.NewRegisteredTimer("arb/wsbroadcastserver/startwithheader/duration", nil) + upgradeToWSTimer = metrics.NewRegisteredTimer("arb/wsbroadcastserver/wsupgrade/duration", nil) + startWithHeaderTimer = metrics.NewRegisteredTimer("arb/wsbroadcastserver/startwithheader/duration", nil) ) const ( @@ -323,7 +323,7 @@ func (s *WSBroadcastServer) StartWithHeader(ctx context.Context, header ws.Hands startTime := time.Now() _, err = upgrader.Upgrade(conn) elapsed := time.Since(startTime) - UpgradeToWSTimer.Update(elapsed) + upgradeToWSTimer.Update(elapsed) if err != nil { if err.Error() != "" { @@ -491,7 +491,7 @@ func (s *WSBroadcastServer) StartWithHeader(ctx context.Context, header ws.Hands s.started = true elapsedMain := time.Since(startTimeMain) - StartWithHeaderTimer.Update(elapsedMain) + startWithHeaderTimer.Update(elapsedMain) return nil } From 487f6eeba172805c8be6ce2a0bb8914e55c30e5d Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Mon, 16 Oct 2023 23:07:06 -0600 Subject: [PATCH 28/64] Fix retryable gas estimation when overriding gas price to zero --- arbos/tx_processor.go | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/arbos/tx_processor.go b/arbos/tx_processor.go index 4eeffc679e..436998dfb5 100644 --- a/arbos/tx_processor.go +++ b/arbos/tx_processor.go @@ -245,16 +245,17 @@ func (p *TxProcessor) StartTxHook() (endTxNow bool, gasUsed uint64, err error, r } balance := statedb.GetBalance(tx.From) - basefee := evm.Context.BaseFee + effectiveBaseFee := evm.Context.BaseFee usergas := p.msg.GasLimit - maxGasCost := arbmath.BigMulByUint(tx.GasFeeCap, usergas) - maxFeePerGasTooLow := arbmath.BigLessThan(tx.GasFeeCap, basefee) - if p.msg.TxRunMode == core.MessageGasEstimationMode && tx.GasFeeCap.BitLen() == 0 { - // In gas estimation mode, we permit a zero gas fee cap. - // This matches behavior with normal tx gas estimation. - maxFeePerGasTooLow = false + if p.msg.TxRunMode != core.MessageCommitMode && tx.GasFeeCap.BitLen() == 0 { + // In gas estimation or eth_call mode, we permit a zero gas fee cap. + // This matches behavior with normal tx gas estimation and eth_call. + effectiveBaseFee = common.Big0 } + + maxGasCost := arbmath.BigMulByUint(tx.GasFeeCap, usergas) + maxFeePerGasTooLow := arbmath.BigLessThan(tx.GasFeeCap, effectiveBaseFee) if arbmath.BigLessThan(balance, maxGasCost) || usergas < params.TxGas || maxFeePerGasTooLow { // User either specified too low of a gas fee cap, didn't have enough balance to pay for gas, // or the specified gas limit is below the minimum transaction gas cost. @@ -268,7 +269,7 @@ func (p *TxProcessor) StartTxHook() (endTxNow bool, gasUsed uint64, err error, r } // pay for the retryable's gas and update the pools - gascost := arbmath.BigMulByUint(basefee, usergas) + gascost := arbmath.BigMulByUint(effectiveBaseFee, usergas) networkCost := gascost if p.state.ArbOSVersion() >= 11 { infraFeeAccount, err := p.state.InfraFeeAccount() @@ -276,7 +277,7 @@ func (p *TxProcessor) StartTxHook() (endTxNow bool, gasUsed uint64, err error, r if infraFeeAccount != (common.Address{}) { minBaseFee, err := p.state.L2PricingState().MinBaseFeeWei() p.state.Restrict(err) - infraFee := arbmath.BigMin(minBaseFee, basefee) + infraFee := arbmath.BigMin(minBaseFee, effectiveBaseFee) infraCost := arbmath.BigMulByUint(infraFee, usergas) infraCost = takeFunds(networkCost, infraCost) if err := transfer(&tx.From, &infraFeeAccount, infraCost); err != nil { @@ -294,7 +295,7 @@ func (p *TxProcessor) StartTxHook() (endTxNow bool, gasUsed uint64, err error, r } withheldGasFunds := takeFunds(availableRefund, gascost) // gascost is conceptually charged before the gas price refund - gasPriceRefund := arbmath.BigMulByUint(arbmath.BigSub(tx.GasFeeCap, basefee), tx.Gas) + gasPriceRefund := arbmath.BigMulByUint(arbmath.BigSub(tx.GasFeeCap, effectiveBaseFee), tx.Gas) if gasPriceRefund.Sign() < 0 { // This should only be possible during gas estimation mode gasPriceRefund.SetInt64(0) @@ -310,7 +311,7 @@ func (p *TxProcessor) StartTxHook() (endTxNow bool, gasUsed uint64, err error, r retryTxInner, err := retryable.MakeTx( underlyingTx.ChainId(), 0, - basefee, + effectiveBaseFee, usergas, ticketId, tx.FeeRefundAddr, From 059a45e804657901d62a3ab908d1886a76ad0f22 Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Thu, 19 Oct 2023 10:40:51 +0200 Subject: [PATCH 29/64] revert testnode pin --- nitro-testnode | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nitro-testnode b/nitro-testnode index 7ad12c0f1b..aee6ceff9c 160000 --- a/nitro-testnode +++ b/nitro-testnode @@ -1 +1 @@ -Subproject commit 7ad12c0f1be75a72c7360d5258e0090f8225594e +Subproject commit aee6ceff9c9d3fb2749da55a7d7842f23d1bfc8e From b015ab8f06f8fc72c827af4388428949cec8a5c6 Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Thu, 19 Oct 2023 10:46:31 +0200 Subject: [PATCH 30/64] Make client certificate optional --- arbnode/dataposter/data_poster.go | 19 +++++++++++-------- arbnode/dataposter/dataposter_test.go | 2 +- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 39fda3f2ac..687b26ba26 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -175,15 +175,18 @@ func NewDataPoster(ctx context.Context, opts *DataPosterOpts) (*DataPoster, erro } func rpcClient(ctx context.Context, opts *ExternalSignerCfg) (*rpc.Client, error) { - clientCert, err := tls.LoadX509KeyPair(opts.ClientCert, opts.ClientPrivateKey) - if err != nil { - return nil, fmt.Errorf("error loading client certificate and private key: %w", err) + tlsCfg := &tls.Config{ + MinVersion: tls.VersionTLS12, } - tlsCfg := &tls.Config{ - MinVersion: tls.VersionTLS12, - Certificates: []tls.Certificate{clientCert}, + if opts.ClientCert == "" || opts.ClientPrivateKey == "" { + clientCert, err := tls.LoadX509KeyPair(opts.ClientCert, opts.ClientPrivateKey) + if err != nil { + return nil, fmt.Errorf("error loading client certificate and private key: %w", err) + } + tlsCfg.Certificates = []tls.Certificate{clientCert} } + if opts.RootCA != "" { rootCrt, err := os.ReadFile(opts.RootCA) if err != nil { @@ -756,9 +759,9 @@ type ExternalSignerCfg struct { // (Optional) Path to the external signer root CA certificate. // This allows us to use self-signed certificats on the external signer. RootCA string `koanf:"root-ca"` - // Client certificate for mtls. + // (Optional) Client certificate for mtls. ClientCert string `koanf:"client-cert"` - // Client certificate key for mtls. + // (Optional) Client certificate key for mtls. ClientPrivateKey string `koanf:"client-private-key"` } diff --git a/arbnode/dataposter/dataposter_test.go b/arbnode/dataposter/dataposter_test.go index 4734295ae8..d4d72bbbf4 100644 --- a/arbnode/dataposter/dataposter_test.go +++ b/arbnode/dataposter/dataposter_test.go @@ -136,7 +136,7 @@ func newServer(ctx context.Context, t *testing.T) (*http.Server, *server) { clientCert, err := os.ReadFile("./testdata/client.crt") if err != nil { - panic(err) + t.Fatalf("Error reading client certificate: %v", err) } pool := x509.NewCertPool() pool.AppendCertsFromPEM(clientCert) From 1d524078dc2c41fed4f2bc3efd401e18f911047f Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Thu, 19 Oct 2023 10:55:46 +0200 Subject: [PATCH 31/64] Log when certificate is enabled, fix enabling --- arbnode/dataposter/data_poster.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 687b26ba26..1a202171ec 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -179,7 +179,8 @@ func rpcClient(ctx context.Context, opts *ExternalSignerCfg) (*rpc.Client, error MinVersion: tls.VersionTLS12, } - if opts.ClientCert == "" || opts.ClientPrivateKey == "" { + if opts.ClientCert != "" && opts.ClientPrivateKey != "" { + log.Info("Client certificate for external signer is enabled") clientCert, err := tls.LoadX509KeyPair(opts.ClientCert, opts.ClientPrivateKey) if err != nil { return nil, fmt.Errorf("error loading client certificate and private key: %w", err) @@ -762,6 +763,7 @@ type ExternalSignerCfg struct { // (Optional) Client certificate for mtls. ClientCert string `koanf:"client-cert"` // (Optional) Client certificate key for mtls. + // This is required when client-cert is set. ClientPrivateKey string `koanf:"client-private-key"` } From 89f837284486917a911aabb7ef426cf87f76b9bc Mon Sep 17 00:00:00 2001 From: Goran Vladika Date: Thu, 19 Oct 2023 11:19:19 +0200 Subject: [PATCH 32/64] Use latest version of nitro-contracts and testnode --- arbnode/node.go | 20 +++++++++++--------- contracts | 2 +- nitro-testnode | 2 +- 3 files changed, 13 insertions(+), 11 deletions(-) diff --git a/arbnode/node.go b/arbnode/node.go index 675aca3361..132a9f28d5 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -303,17 +303,19 @@ func DeployOnL1(ctx context.Context, parentChainReader *headerreader.HeaderReade validatorAddrs = append(validatorAddrs, crypto.CreateAddress(validatorWalletCreator, i)) } - deployUtilityFactories := false - maxFeePerGas := big.NewInt(0) // needed when utility factories are deployed + deployParams := rollupgen.RollupCreatorRollupDeploymentParams{ + Config:config, + BatchPoster: batchPoster, + Validators: validatorAddrs, + MaxDataSize: maxDataSize, + NativeToken: nativeToken, + DeployFactoriesToL2: false, + MaxFeePerGasForRetryables: big.NewInt(0), // needed when utility factories are deployed + } + tx, err := rollupCreator.CreateRollup( deployAuth, - config, - batchPoster, - validatorAddrs, - maxDataSize, - nativeToken, - deployUtilityFactories, - maxFeePerGas, + deployParams, ) if err != nil { return nil, fmt.Errorf("error submitting create rollup tx: %w", err) diff --git a/contracts b/contracts index 46d1767ce2..b22f93c7a1 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 46d1767ce2181c501f6b1f8651d6f1ace3da1c41 +Subproject commit b22f93c7a1322fb8063ad71d58acb37416d71146 diff --git a/nitro-testnode b/nitro-testnode index 9f2bd4b074..0bd1ef2c6b 160000 --- a/nitro-testnode +++ b/nitro-testnode @@ -1 +1 @@ -Subproject commit 9f2bd4b0743ad111a050371b4c5233744f0e4622 +Subproject commit 0bd1ef2c6b33500f9144192edadd6491ec1e6bc7 From 1df214fa7fbae995204c0568fc1d1d43c14dad4e Mon Sep 17 00:00:00 2001 From: Goran Vladika Date: Thu, 19 Oct 2023 11:54:32 +0200 Subject: [PATCH 33/64] Lint --- arbnode/node.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/arbnode/node.go b/arbnode/node.go index 132a9f28d5..4b3381e780 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -304,13 +304,13 @@ func DeployOnL1(ctx context.Context, parentChainReader *headerreader.HeaderReade } deployParams := rollupgen.RollupCreatorRollupDeploymentParams{ - Config:config, - BatchPoster: batchPoster, - Validators: validatorAddrs, - MaxDataSize: maxDataSize, - NativeToken: nativeToken, - DeployFactoriesToL2: false, - MaxFeePerGasForRetryables: big.NewInt(0), // needed when utility factories are deployed + Config: config, + BatchPoster: batchPoster, + Validators: validatorAddrs, + MaxDataSize: maxDataSize, + NativeToken: nativeToken, + DeployFactoriesToL2: false, + MaxFeePerGasForRetryables: big.NewInt(0), // needed when utility factories are deployed } tx, err := rollupCreator.CreateRollup( From 0a3e0d8bbcedad1ccae5a987928875c588d38835 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Thu, 19 Oct 2023 12:10:34 -0600 Subject: [PATCH 34/64] Fix basefee in EndTxHook --- arbos/tx_processor.go | 31 ++++++++++++++++++++++++------- go-ethereum | 2 +- 2 files changed, 25 insertions(+), 8 deletions(-) diff --git a/arbos/tx_processor.go b/arbos/tx_processor.go index 436998dfb5..569edb7c63 100644 --- a/arbos/tx_processor.go +++ b/arbos/tx_processor.go @@ -248,7 +248,7 @@ func (p *TxProcessor) StartTxHook() (endTxNow bool, gasUsed uint64, err error, r effectiveBaseFee := evm.Context.BaseFee usergas := p.msg.GasLimit - if p.msg.TxRunMode != core.MessageCommitMode && tx.GasFeeCap.BitLen() == 0 { + if p.msg.TxRunMode != core.MessageCommitMode && p.msg.GasFeeCap.BitLen() == 0 { // In gas estimation or eth_call mode, we permit a zero gas fee cap. // This matches behavior with normal tx gas estimation and eth_call. effectiveBaseFee = common.Big0 @@ -457,7 +457,6 @@ func (p *TxProcessor) EndTxHook(gasLeft uint64, success bool) { underlyingTx := p.msg.Tx networkFeeAccount, _ := p.state.NetworkFeeAccount() - basefee := p.evm.Context.BaseFee scenario := util.TracingAfterEVM if gasLeft > p.msg.GasLimit { @@ -467,9 +466,20 @@ func (p *TxProcessor) EndTxHook(gasLeft uint64, success bool) { if underlyingTx != nil && underlyingTx.Type() == types.ArbitrumRetryTxType { inner, _ := underlyingTx.GetInner().(*types.ArbitrumRetryTx) + effectiveBaseFee := inner.GasFeeCap + if p.msg.TxRunMode == core.MessageCommitMode && !arbmath.BigEquals(effectiveBaseFee, p.evm.Context.BaseFee) { + log.Error( + "ArbitrumRetryTx GasFeeCap doesn't match basefee in commit mode", + "txHash", underlyingTx.Hash(), + "gasFeeCap", inner.GasFeeCap, + "baseFee", p.evm.Context.BaseFee, + ) + // revert to the old behavior to avoid diverging from older nodes + effectiveBaseFee = p.evm.Context.BaseFee + } // undo Geth's refund to the From address - gasRefund := arbmath.BigMulByUint(basefee, gasLeft) + gasRefund := arbmath.BigMulByUint(effectiveBaseFee, gasLeft) err := util.BurnBalance(&inner.From, gasRefund, p.evm, scenario, "undoRefund") if err != nil { log.Error("Uh oh, Geth didn't refund the user", inner.From, gasRefund) @@ -504,7 +514,7 @@ func (p *TxProcessor) EndTxHook(gasLeft uint64, success bool) { takeFunds(maxRefund, inner.SubmissionFeeRefund) } // Conceptually, the gas charge is taken from the L1 deposit pool if possible. - takeFunds(maxRefund, arbmath.BigMulByUint(basefee, gasUsed)) + takeFunds(maxRefund, arbmath.BigMulByUint(effectiveBaseFee, gasUsed)) // Refund any unused gas, without overdrafting the L1 deposit. networkRefund := gasRefund if p.state.ArbOSVersion() >= 11 { @@ -514,7 +524,7 @@ func (p *TxProcessor) EndTxHook(gasLeft uint64, success bool) { minBaseFee, err := p.state.L2PricingState().MinBaseFeeWei() p.state.Restrict(err) // TODO MinBaseFeeWei change during RetryTx execution may cause incorrect calculation of the part of the refund that should be taken from infraFeeAccount. Unless the balances of network and infra fee accounts are too low, the amount transferred to refund address should remain correct. - infraFee := arbmath.BigMin(minBaseFee, basefee) + infraFee := arbmath.BigMin(minBaseFee, effectiveBaseFee) infraRefund := arbmath.BigMulByUint(infraFee, gasLeft) infraRefund = takeFunds(networkRefund, infraRefund) refund(infraFeeAccount, infraRefund) @@ -542,6 +552,7 @@ func (p *TxProcessor) EndTxHook(gasLeft uint64, success bool) { return } + basefee := p.evm.Context.BaseFee totalCost := arbmath.BigMul(basefee, arbmath.UintToBig(gasUsed)) // total cost = price of gas * gas burnt computeCost := arbmath.BigSub(totalCost, p.PosterFee) // total cost = network's compute + poster's L1 costs if computeCost.Sign() < 0 { @@ -603,9 +614,15 @@ func (p *TxProcessor) EndTxHook(gasLeft uint64, success bool) { func (p *TxProcessor) ScheduledTxes() types.Transactions { scheduled := types.Transactions{} time := p.evm.Context.Time - basefee := p.evm.Context.BaseFee + effectiveBaseFee := p.evm.Context.BaseFee chainID := p.evm.ChainConfig().ChainID + if p.msg.TxRunMode != core.MessageCommitMode && p.msg.GasFeeCap.BitLen() == 0 { + // In gas estimation or eth_call mode, we permit a zero gas fee cap. + // This matches behavior with normal tx gas estimation and eth_call. + effectiveBaseFee = common.Big0 + } + logs := p.evm.StateDB.GetCurrentTxLogs() for _, log := range logs { if log.Address != ArbRetryableTxAddress || log.Topics[0] != RedeemScheduledEventID { @@ -624,7 +641,7 @@ func (p *TxProcessor) ScheduledTxes() types.Transactions { redeem, _ := retryable.MakeTx( chainID, event.SequenceNum, - basefee, + effectiveBaseFee, event.DonatedGas, event.TicketId, event.GasDonor, diff --git a/go-ethereum b/go-ethereum index b4221631e1..e8c8827c0b 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit b4221631e1e5eac86f01582bd74234e3c0f7f5c7 +Subproject commit e8c8827c0b9e22e60829da1945cba9c451cda85a From 295b465b0d9652e5943c042175c6d3555516a14e Mon Sep 17 00:00:00 2001 From: Joshua Colvin Date: Thu, 19 Oct 2023 14:00:47 -0700 Subject: [PATCH 35/64] Stop using actions-rs --- .github/workflows/arbitrator-ci.yml | 36 ++++++++------------------- .github/workflows/ci.yml | 23 +++-------------- .github/workflows/codeql-analysis.yml | 9 ++----- .github/workflows/docker.yml | 2 +- 4 files changed, 17 insertions(+), 53 deletions(-) diff --git a/.github/workflows/arbitrator-ci.yml b/.github/workflows/arbitrator-ci.yml index f2b141fb46..8c491a421c 100644 --- a/.github/workflows/arbitrator-ci.yml +++ b/.github/workflows/arbitrator-ci.yml @@ -25,7 +25,7 @@ jobs: runs-on: ubuntu-8 steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: submodules: recursive @@ -49,23 +49,18 @@ jobs: cache-dependency-path: '**/yarn.lock' - name: Install rust stable - uses: actions-rs/toolchain@v1 - id: install-rust + uses: dtolnay/rust-toolchain@stable with: - profile: minimal - toolchain: "stable" - override: true components: 'llvm-tools-preview, rustfmt, clippy' + targets: 'wasm32-wasi, wasm32-unknown-unknown' - name: Install grcov - uses: actions-rs/install@v0.1 + uses: jaxxstorm/action-install-gh-release@v1.10.0 with: - crate: grcov - version: latest - use-tool-cache: true - - - name: Install rust wasm targets - run: rustup target add wasm32-wasi wasm32-unknown-unknown + repo: mozilla/grcov + tag: v0.8.18 + extension: "\\.bz2" + cache: enable - name: Cache Rust intermediate build products uses: actions/cache@v3 @@ -148,22 +143,13 @@ jobs: echo RUSTDOCFLAGS="-Cpanic=abort" >> $GITHUB_ENV - name: Clippy check - uses: actions-rs/cargo@v1 - with: - command: clippy - args: --all --manifest-path arbitrator/Cargo.toml -- -D warnings + run: cargo clippy --all --manifest-path arbitrator/Cargo.toml -- -D warnings - name: Run rust tests - uses: actions-rs/cargo@v1 - with: - command: test - args: --all --manifest-path arbitrator/Cargo.toml + run: cargo test --all --manifest-path arbitrator/Cargo.toml - name: Rustfmt - uses: actions-rs/cargo@v1 - with: - command: fmt - args: --all --manifest-path arbitrator/Cargo.toml -- --check + run: cargo fmt --all --manifest-path arbitrator/Cargo.toml -- --check - name: Make proofs from test cases run: make -j test-gen-proofs diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0734aecfd0..022c26d2f3 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -29,7 +29,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: submodules: true @@ -53,27 +53,10 @@ jobs: sudo apt-get update && sudo apt-get install -y lld-14 sudo ln -s /usr/bin/wasm-ld-14 /usr/local/bin/wasm-ld - - name: Install rust wasm32-unknown-unknown - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: "stable" - target: wasm32-unknown-unknown - - - name: Install rust wasm32-wasi - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: "stable" - target: wasm32-wasi - - name: Install rust stable - uses: actions-rs/toolchain@v1 - id: install-rust + uses: dtolnay/rust-toolchain@stable with: - profile: minimal - toolchain: "stable" - override: true + targets: 'wasm32-unknown-unknown, wasm32-wasi' - name: Cache Build Products uses: actions/cache@v3 diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index cfb5b6eda6..8b2a765f6e 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -44,7 +44,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: submodules: true @@ -74,12 +74,7 @@ jobs: go-version: 1.20.x - name: Install rust stable - uses: actions-rs/toolchain@v1 - id: install-rust - with: - profile: minimal - toolchain: "stable" - override: true + uses: dtolnay/rust-toolchain@stable - name: Cache Rust Build Products uses: actions/cache@v3 diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 33049d4396..dcc76465a7 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -23,7 +23,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: submodules: recursive From cca0c53c64616044b9e2e5e52bfa457cc0cfc75a Mon Sep 17 00:00:00 2001 From: Joshua Colvin Date: Thu, 19 Oct 2023 15:00:12 -0700 Subject: [PATCH 36/64] Use latest docker and golangci actions Old versions of docker and golangci actions used deprecated github actions methods `The CODEQL_EXTRACTOR_GO_BUILD_TRACING environment variable has no effect on workflows with manual build steps, so we recommend that you remove it from your workflow.` --- .github/workflows/ci.yml | 2 +- .github/workflows/codeql-analysis.yml | 1 - .github/workflows/docker.yml | 6 +++--- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 022c26d2f3..de2a580c93 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -108,7 +108,7 @@ jobs: run: make -j build-node-deps - name: Lint - uses: golangci/golangci-lint-action@v2 + uses: golangci/golangci-lint-action@v3 with: version: latest skip-go-installation: true diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 8b2a765f6e..8fb9d80c21 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -32,7 +32,6 @@ jobs: contents: read security-events: write env: - CODEQL_EXTRACTOR_GO_BUILD_TRACING: 'on' WABT_VERSION: 1.0.32 strategy: diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index dcc76465a7..30ad88d91a 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -28,7 +28,7 @@ jobs: submodules: recursive - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 + uses: docker/setup-buildx-action@v3 with: driver-opts: network=host @@ -40,7 +40,7 @@ jobs: restore-keys: ${{ runner.os }}-buildx- - name: Build nitro-node docker - uses: docker/build-push-action@v2 + uses: docker/build-push-action@v5 with: target: nitro-node push: true @@ -50,7 +50,7 @@ jobs: cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max - name: Build nitro-node-dev docker - uses: docker/build-push-action@v2 + uses: docker/build-push-action@v5 with: target: nitro-node-dev push: true From 1e1a378dc52a36cc26ad8ef6a528e7c1ef749890 Mon Sep 17 00:00:00 2001 From: Joshua Colvin Date: Thu, 19 Oct 2023 15:51:03 -0700 Subject: [PATCH 37/64] Don't use removed lint option `skip-go-installation` is no longer an option for `golangci/golangci-lint-action` --- .github/workflows/ci.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index de2a580c93..f2c4fac84c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -111,7 +111,6 @@ jobs: uses: golangci/golangci-lint-action@v3 with: version: latest - skip-go-installation: true skip-pkg-cache: true - name: Custom Lint run: | From 0fe1550abf69c32c09869961c8812f0949965af4 Mon Sep 17 00:00:00 2001 From: Goran Vladika Date: Fri, 20 Oct 2023 16:46:52 +0200 Subject: [PATCH 38/64] Remove testnode custom branch --- .gitmodules | 1 - nitro-testnode | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/.gitmodules b/.gitmodules index 977f7a4772..7c78791c78 100644 --- a/.gitmodules +++ b/.gitmodules @@ -20,4 +20,3 @@ [submodule "nitro-testnode"] path = nitro-testnode url = https://github.com/OffchainLabs/nitro-testnode.git - branch = fee-token-support diff --git a/nitro-testnode b/nitro-testnode index 0bd1ef2c6b..11170fe363 160000 --- a/nitro-testnode +++ b/nitro-testnode @@ -1 +1 @@ -Subproject commit 0bd1ef2c6b33500f9144192edadd6491ec1e6bc7 +Subproject commit 11170fe36318991973bea632d9f348816a64a974 From 878ec4ae5df30be16422d20ce30c937c4a03488c Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Fri, 20 Oct 2023 11:26:38 -0500 Subject: [PATCH 39/64] address PR comments, refactor code to use two router design --- broadcastclients/broadcastclients.go | 106 +++++++++++++++++++-------- 1 file changed, 76 insertions(+), 30 deletions(-) diff --git a/broadcastclients/broadcastclients.go b/broadcastclients/broadcastclients.go index 48f644b7f0..f508404799 100644 --- a/broadcastclients/broadcastclients.go +++ b/broadcastclients/broadcastclients.go @@ -21,6 +21,7 @@ const ROUTER_QUEUE_SIZE = 1024 const RECENT_FEED_INITIAL_MAP_SIZE = 1024 const RECENT_FEED_ITEM_TTL = time.Second * 10 const MAX_FEED_INACTIVE_TIME = time.Second * 5 +const PRIMARY_FEED_UPTIME = time.Minute * 10 type Router struct { stopwaiter.StopWaiter @@ -43,7 +44,8 @@ type BroadcastClients struct { secondaryClients []*broadcastclient.BroadcastClient numOfStartedSecondary int - router *Router + primaryRouter *Router + secondaryRouter *Router // Use atomic access connected int32 @@ -62,17 +64,20 @@ func NewBroadcastClients( if len(config.URL) == 0 && len(config.SecondaryURL) == 0 { return nil, nil } - - clients := BroadcastClients{ - router: &Router{ + newStandardRouter := func() *Router { + return &Router{ messageChan: make(chan broadcaster.BroadcastFeedMessage, ROUTER_QUEUE_SIZE), confirmedSequenceNumberChan: make(chan arbutil.MessageIndex, ROUTER_QUEUE_SIZE), forwardTxStreamer: txStreamer, forwardConfirmationChan: confirmedSequenceNumberListener, - }, + } + } + clients := BroadcastClients{ + primaryRouter: newStandardRouter(), + secondaryRouter: newStandardRouter(), } var lastClientErr error - makeFeeds := func(url []string) []*broadcastclient.BroadcastClient { + makeFeeds := func(url []string, router *Router) []*broadcastclient.BroadcastClient { feeds := make([]*broadcastclient.BroadcastClient, 0, len(url)) for _, address := range url { client, err := broadcastclient.NewBroadcastClient( @@ -80,8 +85,8 @@ func NewBroadcastClients( address, l2ChainId, currentMessageCount, - clients.router, - clients.router.confirmedSequenceNumberChan, + router, + router.confirmedSequenceNumberChan, fatalErrChan, addrVerifier, func(delta int32) { clients.adjustCount(delta) }, @@ -96,8 +101,8 @@ func NewBroadcastClients( return feeds } - clients.primaryClients = makeFeeds(config.URL) - clients.secondaryClients = makeFeeds(config.SecondaryURL) + clients.primaryClients = makeFeeds(config.URL, clients.primaryRouter) + clients.secondaryClients = makeFeeds(config.SecondaryURL, clients.secondaryRouter) if len(clients.primaryClients) == 0 && len(clients.secondaryClients) == 0 { log.Error("no connected feed on startup, last error: %w", lastClientErr) @@ -121,7 +126,8 @@ func (bcs *BroadcastClients) adjustCount(delta int32) { } func (bcs *BroadcastClients) Start(ctx context.Context) { - bcs.router.StopWaiter.Start(ctx, bcs.router) + bcs.primaryRouter.StopWaiter.Start(ctx, bcs.primaryRouter) + bcs.secondaryRouter.StopWaiter.Start(ctx, bcs.secondaryRouter) for _, client := range bcs.primaryClients { client.Start(ctx) @@ -130,17 +136,24 @@ func (bcs *BroadcastClients) Start(ctx context.Context) { var lastConfirmed arbutil.MessageIndex recentFeedItemsNew := make(map[arbutil.MessageIndex]time.Time, RECENT_FEED_INITIAL_MAP_SIZE) recentFeedItemsOld := make(map[arbutil.MessageIndex]time.Time, RECENT_FEED_INITIAL_MAP_SIZE) - bcs.router.LaunchThread(func(ctx context.Context) { + bcs.primaryRouter.LaunchThread(func(ctx context.Context) { recentFeedItemsCleanup := time.NewTicker(RECENT_FEED_ITEM_TTL) - startNewFeedTimer := time.NewTicker(MAX_FEED_INACTIVE_TIME) + startSecondaryFeedTimer := time.NewTicker(MAX_FEED_INACTIVE_TIME) + stopSecondaryFeedTimer := time.NewTicker(PRIMARY_FEED_UPTIME) + primaryFeedIsDownTimer := time.NewTicker(MAX_FEED_INACTIVE_TIME) defer recentFeedItemsCleanup.Stop() - defer startNewFeedTimer.Stop() + defer startSecondaryFeedTimer.Stop() + defer stopSecondaryFeedTimer.Stop() + defer primaryFeedIsDownTimer.Stop() for { select { case <-ctx.Done(): return - case msg := <-bcs.router.messageChan: - startNewFeedTimer.Reset(MAX_FEED_INACTIVE_TIME) + + // Primary feeds + case msg := <-bcs.primaryRouter.messageChan: + startSecondaryFeedTimer.Reset(MAX_FEED_INACTIVE_TIME) + primaryFeedIsDownTimer.Reset(MAX_FEED_INACTIVE_TIME) if _, ok := recentFeedItemsNew[msg.SequenceNumber]; ok { continue } @@ -148,29 +161,55 @@ func (bcs *BroadcastClients) Start(ctx context.Context) { continue } recentFeedItemsNew[msg.SequenceNumber] = time.Now() - // need to stop the timer because forwardTxStreamer might be blocked when traffic is high - // and that shouldn't create race condition between channels timer.C and messageChan - startNewFeedTimer.Stop() - if err := bcs.router.forwardTxStreamer.AddBroadcastMessages([]*broadcaster.BroadcastFeedMessage{&msg}); err != nil { - log.Error("Error routing message from Sequencer Feed", "err", err) + if err := bcs.primaryRouter.forwardTxStreamer.AddBroadcastMessages([]*broadcaster.BroadcastFeedMessage{&msg}); err != nil { + log.Error("Error routing message from Primary Sequencer Feeds", "err", err) } - startNewFeedTimer.Reset(MAX_FEED_INACTIVE_TIME) - case cs := <-bcs.router.confirmedSequenceNumberChan: - startNewFeedTimer.Reset(MAX_FEED_INACTIVE_TIME) + case cs := <-bcs.primaryRouter.confirmedSequenceNumberChan: + startSecondaryFeedTimer.Reset(MAX_FEED_INACTIVE_TIME) + primaryFeedIsDownTimer.Reset(MAX_FEED_INACTIVE_TIME) if cs == lastConfirmed { continue } lastConfirmed = cs - startNewFeedTimer.Stop() - bcs.router.forwardConfirmationChan <- cs - startNewFeedTimer.Reset(MAX_FEED_INACTIVE_TIME) + bcs.primaryRouter.forwardConfirmationChan <- cs + + // Secondary Feeds + case msg := <-bcs.secondaryRouter.messageChan: + startSecondaryFeedTimer.Reset(MAX_FEED_INACTIVE_TIME) + if _, ok := recentFeedItemsNew[msg.SequenceNumber]; ok { + continue + } + if _, ok := recentFeedItemsOld[msg.SequenceNumber]; ok { + continue + } + recentFeedItemsNew[msg.SequenceNumber] = time.Now() + if err := bcs.secondaryRouter.forwardTxStreamer.AddBroadcastMessages([]*broadcaster.BroadcastFeedMessage{&msg}); err != nil { + log.Error("Error routing message from Secondary Sequencer Feeds", "err", err) + } + case cs := <-bcs.secondaryRouter.confirmedSequenceNumberChan: + startSecondaryFeedTimer.Reset(MAX_FEED_INACTIVE_TIME) + if cs == lastConfirmed { + continue + } + lastConfirmed = cs + bcs.secondaryRouter.forwardConfirmationChan <- cs + + // Cycle buckets to get rid of old entries case <-recentFeedItemsCleanup.C: - // Cycle buckets to get rid of old entries recentFeedItemsOld = recentFeedItemsNew recentFeedItemsNew = make(map[arbutil.MessageIndex]time.Time, RECENT_FEED_INITIAL_MAP_SIZE) - case <-startNewFeedTimer.C: - // failed to get messages from primary feed for ~5 seconds, start a new feed + + // failed to get messages from both primary and secondary feeds for ~5 seconds, start a new secondary feed + case <-startSecondaryFeedTimer.C: bcs.StartSecondaryFeed(ctx) + + // failed to get messages from primary feed for ~5 seconds, reset the timer responsible for stopping a secondary + case <-primaryFeedIsDownTimer.C: + stopSecondaryFeedTimer.Reset(PRIMARY_FEED_UPTIME) + + // primary feeds have been up and running for PRIMARY_FEED_UPTIME=10 mins without a failure, stop the recently started secondary feed + case <-stopSecondaryFeedTimer.C: + bcs.StopSecondaryFeed(ctx) } } }) @@ -185,6 +224,13 @@ func (bcs *BroadcastClients) StartSecondaryFeed(ctx context.Context) { log.Warn("failed to start a new secondary feed all available secondary feeds were started") } } +func (bcs *BroadcastClients) StopSecondaryFeed(ctx context.Context) { + if bcs.numOfStartedSecondary > 0 { + bcs.numOfStartedSecondary -= 1 + client := bcs.secondaryClients[bcs.numOfStartedSecondary] + client.StopAndWait() + } +} func (bcs *BroadcastClients) StopAndWait() { for _, client := range bcs.primaryClients { From 058db5408a5cfda478332d5b8eb4061cb1279a8b Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Mon, 23 Oct 2023 13:23:31 +0200 Subject: [PATCH 40/64] Suppress golangci-lint error --- cmd/genericconf/pprof.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cmd/genericconf/pprof.go b/cmd/genericconf/pprof.go index e55bfddd32..9fd3a6f2a4 100644 --- a/cmd/genericconf/pprof.go +++ b/cmd/genericconf/pprof.go @@ -17,8 +17,7 @@ func StartPprof(address string) { log.Info("Starting metrics server with pprof", "addr", fmt.Sprintf("http://%s/debug/metrics", address)) log.Info("Pprof endpoint", "addr", fmt.Sprintf("http://%s/debug/pprof", address)) go func() { - // #nosec G114 - if err := http.ListenAndServe(address, http.DefaultServeMux); err != nil { + if err := http.ListenAndServe(address, http.DefaultServeMux); /* #nosec G114 */ err != nil { log.Error("Failure in running pprof server", "err", err) } }() From 2ec0a4412828c6adcc9345a139d3ba9e558648df Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Mon, 23 Oct 2023 22:23:54 -0500 Subject: [PATCH 41/64] Refactor System Tests --- system_tests/arbtrace_test.go | 33 +++--- system_tests/common_test.go | 5 + system_tests/conditionaltx_test.go | 143 ++++++++++++------------- system_tests/debugapi_test.go | 19 ++-- system_tests/fees_test.go | 80 +++++++------- system_tests/forwarder_test.go | 141 ++++++++++-------------- system_tests/ipc_test.go | 14 +-- system_tests/meaningless_reorg_test.go | 40 ++++--- system_tests/seq_coordinator_test.go | 68 ++++++------ system_tests/seqfeed_test.go | 114 +++++++++++--------- system_tests/seqinbox_test.go | 81 +++++++------- system_tests/staker_test.go | 80 +++++++------- 12 files changed, 398 insertions(+), 420 deletions(-) diff --git a/system_tests/arbtrace_test.go b/system_tests/arbtrace_test.go index a4995e3979..36e4cc9402 100644 --- a/system_tests/arbtrace_test.go +++ b/system_tests/arbtrace_test.go @@ -10,8 +10,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/rpc" - "github.com/offchainlabs/nitro/execution/gethexec" - "github.com/offchainlabs/nitro/util/testhelpers" ) type callTxArgs struct { @@ -140,18 +138,17 @@ func TestArbTraceForwarding(t *testing.T) { Public: false, }) listener, srv, err := rpc.StartIPCEndpoint(ipcPath, apis) - testhelpers.RequireImpl(t, err) + Require(t, err) defer srv.Stop() defer listener.Close() - execConfig := gethexec.ConfigDefaultTest() - execConfig.RPC.ClassicRedirect = ipcPath - execConfig.RPC.ClassicRedirectTimeout = time.Second - _, _, _, l2stack, _, _, _, l1stack := createTestNodeOnL1WithConfigImpl(t, ctx, true, nil, execConfig, nil, nil, nil) - defer requireClose(t, l1stack) - defer requireClose(t, l2stack) + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.execConfig.RPC.ClassicRedirect = ipcPath + builder.execConfig.RPC.ClassicRedirectTimeout = time.Second + cleanup := builder.Build(t) + defer cleanup() - l2rpc, _ := l2stack.Attach() + l2rpc, _ := builder.L2.Stack.Attach() txArgs := callTxArgs{} traceTypes := []string{"trace"} blockNum := rpc.BlockNumberOrHash{} @@ -162,22 +159,22 @@ func TestArbTraceForwarding(t *testing.T) { filter := filterRequest{} var result traceResult err = l2rpc.CallContext(ctx, &result, "arbtrace_call", txArgs, traceTypes, blockNum) - testhelpers.RequireImpl(t, err) + Require(t, err) var results []*traceResult err = l2rpc.CallContext(ctx, &results, "arbtrace_callMany", traceRequests, blockNum) - testhelpers.RequireImpl(t, err) + Require(t, err) err = l2rpc.CallContext(ctx, &results, "arbtrace_replayBlockTransactions", blockNum, traceTypes) - testhelpers.RequireImpl(t, err) + Require(t, err) err = l2rpc.CallContext(ctx, &result, "arbtrace_replayTransaction", txHash, traceTypes) - testhelpers.RequireImpl(t, err) + Require(t, err) var frames []traceFrame err = l2rpc.CallContext(ctx, &frames, "arbtrace_transaction", txHash) - testhelpers.RequireImpl(t, err) + Require(t, err) var frame traceFrame err = l2rpc.CallContext(ctx, &frame, "arbtrace_get", txHash, path) - testhelpers.RequireImpl(t, err) + Require(t, err) err = l2rpc.CallContext(ctx, &frames, "arbtrace_block", blockNum) - testhelpers.RequireImpl(t, err) + Require(t, err) err = l2rpc.CallContext(ctx, &frames, "arbtrace_filter", filter) - testhelpers.RequireImpl(t, err) + Require(t, err) } diff --git a/system_tests/common_test.go b/system_tests/common_test.go index d233631d4c..41a3fd1417 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -115,6 +115,10 @@ func (tc *TestClient) DeploySimple(t *testing.T, auth bind.TransactOpts) (common return deploySimple(t, tc.ctx, auth, tc.Client) } +func (tc *TestClient) EnsureTxSucceeded(transaction *types.Transaction) (*types.Receipt, error) { + return EnsureTxSucceeded(tc.ctx, tc.Client, transaction) +} + type NodeBuilder struct { // NodeBuilder configuration ctx context.Context @@ -212,6 +216,7 @@ func (b *NodeBuilder) Build2ndNode(t *testing.T, params *SecondNodeParams) (*Tes l2 := NewTestClient(b.ctx) l2.Client, l2.ConsensusNode = Create2ndNodeWithConfig(t, b.ctx, b.L2.ConsensusNode, b.L1.Stack, b.L1Info, params.initData, params.nodeConfig, params.execConfig, params.stackConfig) + l2.ExecNode = getExecNode(t, l2.ConsensusNode) l2.cleanup = func() { l2.ConsensusNode.StopAndWait() } return l2, func() { l2.cleanup() } } diff --git a/system_tests/conditionaltx_test.go b/system_tests/conditionaltx_test.go index 211908a883..d75dd27255 100644 --- a/system_tests/conditionaltx_test.go +++ b/system_tests/conditionaltx_test.go @@ -202,43 +202,42 @@ func TestSendRawTransactionConditionalBasic(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - l2info, node, l2client, _, _, l1client, l1stack := createTestNodeOnL1(t, ctx, true) - defer requireClose(t, l1stack) - defer node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + cleanup := builder.Build(t) + defer cleanup() - execNode := getExecNode(t, node) - auth := l2info.GetDefaultTransactOpts("Owner", ctx) - contractAddress1, simple1 := deploySimple(t, ctx, auth, l2client) + auth := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) + contractAddress1, simple1 := builder.L2.DeploySimple(t, auth) tx, err := simple1.Increment(&auth) Require(t, err, "failed to call Increment()") - _, err = EnsureTxSucceeded(ctx, l2client, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) - contractAddress2, simple2 := deploySimple(t, ctx, auth, l2client) + contractAddress2, simple2 := builder.L2.DeploySimple(t, auth) tx, err = simple2.Increment(&auth) Require(t, err, "failed to call Increment()") - _, err = EnsureTxSucceeded(ctx, l2client, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) tx, err = simple2.Increment(&auth) Require(t, err, "failed to call Increment()") - _, err = EnsureTxSucceeded(ctx, l2client, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) - currentRootHash1 := getStorageRootHash(t, execNode, contractAddress1) - currentSlotValueMap1 := getStorageSlotValue(t, execNode, contractAddress1) - currentRootHash2 := getStorageRootHash(t, execNode, contractAddress2) - currentSlotValueMap2 := getStorageSlotValue(t, execNode, contractAddress2) + currentRootHash1 := getStorageRootHash(t, builder.L2.ExecNode, contractAddress1) + currentSlotValueMap1 := getStorageSlotValue(t, builder.L2.ExecNode, contractAddress1) + currentRootHash2 := getStorageRootHash(t, builder.L2.ExecNode, contractAddress2) + currentSlotValueMap2 := getStorageSlotValue(t, builder.L2.ExecNode, contractAddress2) - rpcClient, err := node.Stack.Attach() + rpcClient, err := builder.L2.ConsensusNode.Stack.Attach() Require(t, err) - l2info.GenerateAccount("User2") + builder.L2Info.GenerateAccount("User2") - testConditionalTxThatShouldSucceed(t, ctx, -1, l2info, rpcClient, nil) + testConditionalTxThatShouldSucceed(t, ctx, -1, builder.L2Info, rpcClient, nil) for i, options := range getEmptyOptions(contractAddress1) { - testConditionalTxThatShouldSucceed(t, ctx, i, l2info, rpcClient, options) + testConditionalTxThatShouldSucceed(t, ctx, i, builder.L2Info, rpcClient, options) } - block, err := l1client.BlockByNumber(ctx, nil) + block, err := builder.L1.Client.BlockByNumber(ctx, nil) Require(t, err) blockNumber := block.NumberU64() blockTime := block.Time() @@ -249,33 +248,33 @@ func TestSendRawTransactionConditionalBasic(t *testing.T) { options1 := dedupOptions(t, append(append(optionsAB, optionsA...), optionsB...)) options1 = optionsDedupProduct(t, options1, getFulfillableBlockTimeLimits(t, blockNumber, blockTime)) for i, options := range options1 { - testConditionalTxThatShouldSucceed(t, ctx, i, l2info, rpcClient, options) + testConditionalTxThatShouldSucceed(t, ctx, i, builder.L2Info, rpcClient, options) } tx, err = simple1.Increment(&auth) Require(t, err, "failed to call Increment()") - _, err = EnsureTxSucceeded(ctx, l2client, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) tx, err = simple2.Increment(&auth) Require(t, err, "failed to call Increment()") - _, err = EnsureTxSucceeded(ctx, l2client, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) previousStorageRootHash1 := currentRootHash1 - currentRootHash1 = getStorageRootHash(t, execNode, contractAddress1) + currentRootHash1 = getStorageRootHash(t, builder.L2.ExecNode, contractAddress1) if bytes.Equal(previousStorageRootHash1.Bytes(), currentRootHash1.Bytes()) { Fatal(t, "storage root hash didn't change as expected") } - currentSlotValueMap1 = getStorageSlotValue(t, execNode, contractAddress1) + currentSlotValueMap1 = getStorageSlotValue(t, builder.L2.ExecNode, contractAddress1) previousStorageRootHash2 := currentRootHash2 - currentRootHash2 = getStorageRootHash(t, execNode, contractAddress2) + currentRootHash2 = getStorageRootHash(t, builder.L2.ExecNode, contractAddress2) if bytes.Equal(previousStorageRootHash2.Bytes(), currentRootHash2.Bytes()) { Fatal(t, "storage root hash didn't change as expected") } - currentSlotValueMap2 = getStorageSlotValue(t, execNode, contractAddress2) + currentSlotValueMap2 = getStorageSlotValue(t, builder.L2.ExecNode, contractAddress2) - block, err = l1client.BlockByNumber(ctx, nil) + block, err = builder.L1.Client.BlockByNumber(ctx, nil) Require(t, err) blockNumber = block.NumberU64() blockTime = block.Time() @@ -286,35 +285,38 @@ func TestSendRawTransactionConditionalBasic(t *testing.T) { options2 := dedupOptions(t, append(append(optionsCD, optionsC...), optionsD...)) options2 = optionsDedupProduct(t, options2, getFulfillableBlockTimeLimits(t, blockNumber, blockTime)) for i, options := range options2 { - testConditionalTxThatShouldSucceed(t, ctx, i, l2info, rpcClient, options) + testConditionalTxThatShouldSucceed(t, ctx, i, builder.L2Info, rpcClient, options) } for i, options := range options1 { - testConditionalTxThatShouldFail(t, ctx, i, l2info, rpcClient, options, -32003) + testConditionalTxThatShouldFail(t, ctx, i, builder.L2Info, rpcClient, options, -32003) } - block, err = l1client.BlockByNumber(ctx, nil) + block, err = builder.L1.Client.BlockByNumber(ctx, nil) Require(t, err) blockNumber = block.NumberU64() blockTime = block.Time() options3 := optionsDedupProduct(t, options2, getUnfulfillableBlockTimeLimits(t, blockNumber, blockTime)) for i, options := range options3 { - testConditionalTxThatShouldFail(t, ctx, i, l2info, rpcClient, options, -32003) + testConditionalTxThatShouldFail(t, ctx, i, builder.L2Info, rpcClient, options, -32003) } options4 := optionsDedupProduct(t, options2, options1) for i, options := range options4 { - testConditionalTxThatShouldFail(t, ctx, i, l2info, rpcClient, options, -32003) + testConditionalTxThatShouldFail(t, ctx, i, builder.L2Info, rpcClient, options, -32003) } } func TestSendRawTransactionConditionalMultiRoutine(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - l2info, node, client := CreateTestL2(t, ctx) - defer node.StopAndWait() - rpcClient, err := node.Stack.Attach() + + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + cleanup := builder.Build(t) + defer cleanup() + + rpcClient, err := builder.L2.ConsensusNode.Stack.Attach() Require(t, err) - auth := l2info.GetDefaultTransactOpts("Owner", ctx) - contractAddress, simple := deploySimple(t, ctx, auth, client) + auth := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) + contractAddress, simple := builder.L2.DeploySimple(t, auth) simpleContract, err := abi.JSON(strings.NewReader(mocksgen.SimpleABI)) Require(t, err) @@ -325,11 +327,11 @@ func TestSendRawTransactionConditionalMultiRoutine(t *testing.T) { var options []*arbitrum_types.ConditionalOptions for i := 0; i < numTxes; i++ { account := fmt.Sprintf("User%v", i) - l2info.GenerateAccount(account) - tx := l2info.PrepareTx("Owner", account, l2info.TransferGas, big.NewInt(1e16), nil) - err := client.SendTransaction(ctx, tx) + builder.L2Info.GenerateAccount(account) + tx := builder.L2Info.PrepareTx("Owner", account, builder.L2Info.TransferGas, big.NewInt(1e16), nil) + err := builder.L2.Client.SendTransaction(ctx, tx) Require(t, err) - _, err = EnsureTxSucceeded(ctx, client, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) } for i := numTxes - 1; i >= 0; i-- { @@ -337,7 +339,7 @@ func TestSendRawTransactionConditionalMultiRoutine(t *testing.T) { data, err := simpleContract.Pack("logAndIncrement", big.NewInt(int64(expected))) Require(t, err) account := fmt.Sprintf("User%v", i) - txes = append(txes, l2info.PrepareTxTo(account, &contractAddress, l2info.TransferGas, big.NewInt(0), data)) + txes = append(txes, builder.L2Info.PrepareTxTo(account, &contractAddress, builder.L2Info.TransferGas, big.NewInt(0), data)) options = append(options, &arbitrum_types.ConditionalOptions{KnownAccounts: map[common.Address]arbitrum_types.RootHashOrSlots{contractAddress: {SlotValue: map[common.Hash]common.Hash{{0}: common.BigToHash(big.NewInt(int64(expected)))}}}}) } ctxWithTimeout, cancelCtxWithTimeout := context.WithTimeout(ctx, 5*time.Second) @@ -367,8 +369,7 @@ func TestSendRawTransactionConditionalMultiRoutine(t *testing.T) { } cancelCtxWithTimeout() wg.Wait() - execNode := getExecNode(t, node) - bc := execNode.Backend.ArbInterface().BlockChain() + bc := builder.L2.ExecNode.Backend.ArbInterface().BlockChain() genesis := bc.Config().ArbitrumChainParams.GenesisBlockNum var receipts types.Receipts @@ -404,41 +405,39 @@ func TestSendRawTransactionConditionalPreCheck(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - execConfig := gethexec.ConfigDefaultTest() - execConfig.Sequencer.MaxBlockSpeed = 0 - execConfig.TxPreChecker.Strictness = gethexec.TxPreCheckerStrictnessLikelyCompatible - execConfig.TxPreChecker.RequiredStateAge = 1 - execConfig.TxPreChecker.RequiredStateMaxBlocks = 2 + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.execConfig.Sequencer.MaxBlockSpeed = 0 + builder.execConfig.TxPreChecker.Strictness = gethexec.TxPreCheckerStrictnessLikelyCompatible + builder.execConfig.TxPreChecker.RequiredStateAge = 1 + builder.execConfig.TxPreChecker.RequiredStateMaxBlocks = 2 + cleanup := builder.Build(t) + defer cleanup() - l2info, node, l2client, _, _, _, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, nil, execConfig, nil, nil) - defer requireClose(t, l1stack) - defer node.StopAndWait() - rpcClient, err := node.Stack.Attach() + rpcClient, err := builder.L2.ConsensusNode.Stack.Attach() Require(t, err) - execNode := getExecNode(t, node) - l2info.GenerateAccount("User2") + builder.L2Info.GenerateAccount("User2") - auth := l2info.GetDefaultTransactOpts("Owner", ctx) + auth := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) start := time.Now().Unix() - contractAddress, simple := deploySimple(t, ctx, auth, l2client) + contractAddress, simple := builder.L2.DeploySimple(t, auth) if time.Since(time.Unix(start, 0)) > 200*time.Millisecond { start++ time.Sleep(time.Until(time.Unix(start, 0))) } tx, err := simple.Increment(&auth) Require(t, err, "failed to call Increment()") - _, err = EnsureTxSucceeded(ctx, l2client, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) - currentRootHash := getStorageRootHash(t, execNode, contractAddress) + currentRootHash := getStorageRootHash(t, builder.L2.ExecNode, contractAddress) options := &arbitrum_types.ConditionalOptions{ KnownAccounts: map[common.Address]arbitrum_types.RootHashOrSlots{ contractAddress: {RootHash: ¤tRootHash}, }, } - testConditionalTxThatShouldFail(t, ctx, 0, l2info, rpcClient, options, -32003) + testConditionalTxThatShouldFail(t, ctx, 0, builder.L2Info, rpcClient, options, -32003) time.Sleep(time.Until(time.Unix(start+1, 0))) - testConditionalTxThatShouldSucceed(t, ctx, 1, l2info, rpcClient, options) + testConditionalTxThatShouldSucceed(t, ctx, 1, builder.L2Info, rpcClient, options) start = time.Now().Unix() if time.Since(time.Unix(start, 0)) > 200*time.Millisecond { @@ -447,23 +446,23 @@ func TestSendRawTransactionConditionalPreCheck(t *testing.T) { } tx, err = simple.Increment(&auth) Require(t, err, "failed to call Increment()") - _, err = EnsureTxSucceeded(ctx, l2client, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) - currentRootHash = getStorageRootHash(t, execNode, contractAddress) + currentRootHash = getStorageRootHash(t, builder.L2.ExecNode, contractAddress) options = &arbitrum_types.ConditionalOptions{ KnownAccounts: map[common.Address]arbitrum_types.RootHashOrSlots{ contractAddress: {RootHash: ¤tRootHash}, }, } - testConditionalTxThatShouldFail(t, ctx, 2, l2info, rpcClient, options, -32003) - tx = l2info.PrepareTx("Owner", "User2", l2info.TransferGas, big.NewInt(1e12), nil) - Require(t, l2client.SendTransaction(ctx, tx)) - _, err = EnsureTxSucceeded(ctx, l2client, tx) + testConditionalTxThatShouldFail(t, ctx, 2, builder.L2Info, rpcClient, options, -32003) + tx = builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, big.NewInt(1e12), nil) + Require(t, builder.L2.Client.SendTransaction(ctx, tx)) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) - testConditionalTxThatShouldFail(t, ctx, 3, l2info, rpcClient, options, -32003) - tx = l2info.PrepareTx("Owner", "User2", l2info.TransferGas, big.NewInt(1e12), nil) - Require(t, l2client.SendTransaction(ctx, tx)) - _, err = EnsureTxSucceeded(ctx, l2client, tx) + testConditionalTxThatShouldFail(t, ctx, 3, builder.L2Info, rpcClient, options, -32003) + tx = builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, big.NewInt(1e12), nil) + Require(t, builder.L2.Client.SendTransaction(ctx, tx)) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) - testConditionalTxThatShouldSucceed(t, ctx, 4, l2info, rpcClient, options) + testConditionalTxThatShouldSucceed(t, ctx, 4, builder.L2Info, rpcClient, options) } diff --git a/system_tests/debugapi_test.go b/system_tests/debugapi_test.go index ff28e2350c..b8fbffcfee 100644 --- a/system_tests/debugapi_test.go +++ b/system_tests/debugapi_test.go @@ -8,32 +8,31 @@ import ( "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/rpc" - "github.com/offchainlabs/nitro/util/testhelpers" ) func TestDebugAPI(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - _, _, _, l2stack, _, _, _, l1stack := createTestNodeOnL1WithConfigImpl(t, ctx, true, nil, nil, nil, nil, nil) - defer requireClose(t, l1stack) - defer requireClose(t, l2stack) + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + cleanup := builder.Build(t) + defer cleanup() - l2rpc, _ := l2stack.Attach() + l2rpc, _ := builder.L2.Stack.Attach() var dump state.Dump err := l2rpc.CallContext(ctx, &dump, "debug_dumpBlock", rpc.LatestBlockNumber) - testhelpers.RequireImpl(t, err) + Require(t, err) err = l2rpc.CallContext(ctx, &dump, "debug_dumpBlock", rpc.PendingBlockNumber) - testhelpers.RequireImpl(t, err) + Require(t, err) var badBlocks []eth.BadBlockArgs err = l2rpc.CallContext(ctx, &badBlocks, "debug_getBadBlocks") - testhelpers.RequireImpl(t, err) + Require(t, err) var dumpIt state.IteratorDump err = l2rpc.CallContext(ctx, &dumpIt, "debug_accountRange", rpc.LatestBlockNumber, hexutil.Bytes{}, 10, true, true, false) - testhelpers.RequireImpl(t, err) + Require(t, err) err = l2rpc.CallContext(ctx, &dumpIt, "debug_accountRange", rpc.PendingBlockNumber, hexutil.Bytes{}, 10, true, true, false) - testhelpers.RequireImpl(t, err) + Require(t, err) } diff --git a/system_tests/fees_test.go b/system_tests/fees_test.go index 750293622d..17ab7b69c4 100644 --- a/system_tests/fees_test.go +++ b/system_tests/fees_test.go @@ -20,7 +20,6 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbcompress" - "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbos/l1pricing" "github.com/ethereum/go-ethereum/common" @@ -33,20 +32,20 @@ func TestSequencerFeePaid(t *testing.T) { t.Parallel() ctx, cancel := context.WithCancel(context.Background()) defer cancel() - l2info, l2node, l2client, _, _, _, l1stack := createTestNodeOnL1(t, ctx, true) - defer requireClose(t, l1stack) - defer l2node.StopAndWait() - execNode := getExecNode(t, l2node) - version := execNode.ArbInterface.BlockChain().Config().ArbitrumChainParams.InitialArbOSVersion - callOpts := l2info.GetDefaultCallOpts("Owner", ctx) + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + cleanup := builder.Build(t) + defer cleanup() + + version := builder.L2.ExecNode.ArbInterface.BlockChain().Config().ArbitrumChainParams.InitialArbOSVersion + callOpts := builder.L2Info.GetDefaultCallOpts("Owner", ctx) // get the network fee account - arbOwnerPublic, err := precompilesgen.NewArbOwnerPublic(common.HexToAddress("0x6b"), l2client) + arbOwnerPublic, err := precompilesgen.NewArbOwnerPublic(common.HexToAddress("0x6b"), builder.L2.Client) Require(t, err, "failed to deploy contract") - arbGasInfo, err := precompilesgen.NewArbGasInfo(common.HexToAddress("0x6c"), l2client) + arbGasInfo, err := precompilesgen.NewArbGasInfo(common.HexToAddress("0x6c"), builder.L2.Client) Require(t, err, "failed to deploy contract") - arbDebug, err := precompilesgen.NewArbDebug(common.HexToAddress("0xff"), l2client) + arbDebug, err := precompilesgen.NewArbDebug(common.HexToAddress("0xff"), builder.L2.Client) Require(t, err, "failed to deploy contract") networkFeeAccount, err := arbOwnerPublic.GetNetworkFeeAccount(callOpts) Require(t, err, "could not get the network fee account") @@ -54,24 +53,24 @@ func TestSequencerFeePaid(t *testing.T) { l1Estimate, err := arbGasInfo.GetL1BaseFeeEstimate(callOpts) Require(t, err) - baseFee := GetBaseFee(t, l2client, ctx) - l2info.GasPrice = baseFee + baseFee := GetBaseFee(t, builder.L2.Client, ctx) + builder.L2Info.GasPrice = baseFee testFees := func(tip uint64) (*big.Int, *big.Int) { tipCap := arbmath.BigMulByUint(baseFee, tip) - txOpts := l2info.GetDefaultTransactOpts("Faucet", ctx) + txOpts := builder.L2Info.GetDefaultTransactOpts("Faucet", ctx) txOpts.GasTipCap = tipCap gasPrice := arbmath.BigAdd(baseFee, tipCap) - networkBefore := GetBalance(t, ctx, l2client, networkFeeAccount) + networkBefore := builder.L2.GetBalance(t, networkFeeAccount) tx, err := arbDebug.Events(&txOpts, true, [32]byte{}) Require(t, err) - receipt, err := EnsureTxSucceeded(ctx, l2client, tx) + receipt, err := builder.L2.EnsureTxSucceeded(tx) Require(t, err) - networkAfter := GetBalance(t, ctx, l2client, networkFeeAccount) - l1Charge := arbmath.BigMulByUint(l2info.GasPrice, receipt.GasUsedForL1) + networkAfter := builder.L2.GetBalance(t, networkFeeAccount) + l1Charge := arbmath.BigMulByUint(builder.L2Info.GasPrice, receipt.GasUsedForL1) // the network should receive // 1. compute costs @@ -93,7 +92,7 @@ func TestSequencerFeePaid(t *testing.T) { l1GasBought := arbmath.BigDiv(l1Charge, l1Estimate).Uint64() l1ChargeExpected := arbmath.BigMulByUint(l1Estimate, txSize*params.TxDataNonZeroGasEIP2028) // L1 gas can only be charged in terms of L2 gas, so subtract off any rounding error from the expected value - l1ChargeExpected.Sub(l1ChargeExpected, new(big.Int).Mod(l1ChargeExpected, l2info.GasPrice)) + l1ChargeExpected.Sub(l1ChargeExpected, new(big.Int).Mod(l1ChargeExpected, builder.L2Info.GasPrice)) colors.PrintBlue("bytes ", l1GasBought/params.TxDataNonZeroGasEIP2028, txSize) @@ -132,42 +131,39 @@ func testSequencerPriceAdjustsFrom(t *testing.T, initialEstimate uint64) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - chainConfig := params.ArbitrumDevTestChainConfig() - conf := arbnode.ConfigDefaultL1Test() - conf.DelayedSequencer.FinalizeDistance = 1 - - l2info, node, l2client, l1info, _, l1client, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, conf, nil, chainConfig, nil) - defer requireClose(t, l1stack) - defer node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.nodeConfig.DelayedSequencer.FinalizeDistance = 1 + cleanup := builder.Build(t) + defer cleanup() - ownerAuth := l2info.GetDefaultTransactOpts("Owner", ctx) + ownerAuth := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) // make ownerAuth a chain owner - arbdebug, err := precompilesgen.NewArbDebug(common.HexToAddress("0xff"), l2client) + arbdebug, err := precompilesgen.NewArbDebug(common.HexToAddress("0xff"), builder.L2.Client) Require(t, err) tx, err := arbdebug.BecomeChainOwner(&ownerAuth) Require(t, err) - _, err = EnsureTxSucceeded(ctx, l2client, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) // use ownerAuth to set the L1 price per unit Require(t, err) - arbOwner, err := precompilesgen.NewArbOwner(common.HexToAddress("0x70"), l2client) + arbOwner, err := precompilesgen.NewArbOwner(common.HexToAddress("0x70"), builder.L2.Client) Require(t, err) tx, err = arbOwner.SetL1PricePerUnit(&ownerAuth, arbmath.UintToBig(initialEstimate)) Require(t, err) - _, err = WaitForTx(ctx, l2client, tx.Hash(), time.Second*5) + _, err = WaitForTx(ctx, builder.L2.Client, tx.Hash(), time.Second*5) Require(t, err) - arbGasInfo, err := precompilesgen.NewArbGasInfo(common.HexToAddress("0x6c"), l2client) + arbGasInfo, err := precompilesgen.NewArbGasInfo(common.HexToAddress("0x6c"), builder.L2.Client) Require(t, err) lastEstimate, err := arbGasInfo.GetL1BaseFeeEstimate(&bind.CallOpts{Context: ctx}) Require(t, err) - lastBatchCount, err := node.InboxTracker.GetBatchCount() + lastBatchCount, err := builder.L2.ConsensusNode.InboxTracker.GetBatchCount() Require(t, err) - l1Header, err := l1client.HeaderByNumber(ctx, nil) + l1Header, err := builder.L1.Client.HeaderByNumber(ctx, nil) Require(t, err) - rewardRecipientBalanceBefore := GetBalance(t, ctx, l2client, l1pricing.BatchPosterAddress) + rewardRecipientBalanceBefore := builder.L2.GetBalance(t, l1pricing.BatchPosterAddress) timesPriceAdjusted := 0 colors.PrintBlue("Initial values") @@ -176,17 +172,17 @@ func testSequencerPriceAdjustsFrom(t *testing.T, initialEstimate uint64) { numRetrogradeMoves := 0 for i := 0; i < 256; i++ { - tx, receipt := TransferBalance(t, "Owner", "Owner", common.Big1, l2info, l2client, ctx) - header, err := l2client.HeaderByHash(ctx, receipt.BlockHash) + tx, receipt := builder.L2.TransferBalance(t, "Owner", "Owner", common.Big1, builder.L2Info) + header, err := builder.L2.Client.HeaderByHash(ctx, receipt.BlockHash) Require(t, err) - TransferBalance(t, "Faucet", "Faucet", common.Big1, l1info, l1client, ctx) // generate l1 traffic + builder.L1.TransferBalance(t, "Faucet", "Faucet", common.Big1, builder.L1Info) // generate l1 traffic units := compressedTxSize(t, tx) * params.TxDataNonZeroGasEIP2028 estimatedL1FeePerUnit := arbmath.BigDivByUint(arbmath.BigMulByUint(header.BaseFee, receipt.GasUsedForL1), units) if !arbmath.BigEquals(lastEstimate, estimatedL1FeePerUnit) { - l1Header, err = l1client.HeaderByNumber(ctx, nil) + l1Header, err = builder.L1.Client.HeaderByNumber(ctx, nil) Require(t, err) callOpts := &bind.CallOpts{Context: ctx, BlockNumber: receipt.BlockNumber} @@ -235,7 +231,7 @@ func testSequencerPriceAdjustsFrom(t *testing.T, initialEstimate uint64) { // see that the inbox advances for j := 16; j > 0; j-- { - newBatchCount, err := node.InboxTracker.GetBatchCount() + newBatchCount, err := builder.L2.ConsensusNode.InboxTracker.GetBatchCount() Require(t, err) if newBatchCount > lastBatchCount { colors.PrintGrey("posted new batch ", newBatchCount) @@ -250,7 +246,7 @@ func testSequencerPriceAdjustsFrom(t *testing.T, initialEstimate uint64) { } } - rewardRecipientBalanceAfter := GetBalance(t, ctx, l2client, chainConfig.ArbitrumChainParams.InitialChainOwner) + rewardRecipientBalanceAfter := builder.L2.GetBalance(t, builder.chainConfig.ArbitrumChainParams.InitialChainOwner) colors.PrintMint("reward recipient balance ", rewardRecipientBalanceBefore, " ➤ ", rewardRecipientBalanceAfter) colors.PrintMint("price changes ", timesPriceAdjusted) @@ -261,7 +257,7 @@ func testSequencerPriceAdjustsFrom(t *testing.T, initialEstimate uint64) { Fatal(t, "reward recipient didn't get paid") } - arbAggregator, err := precompilesgen.NewArbAggregator(common.HexToAddress("0x6d"), l2client) + arbAggregator, err := precompilesgen.NewArbAggregator(common.HexToAddress("0x6d"), builder.L2.Client) Require(t, err) batchPosterAddresses, err := arbAggregator.GetBatchPosters(&bind.CallOpts{Context: ctx}) Require(t, err) @@ -269,7 +265,7 @@ func testSequencerPriceAdjustsFrom(t *testing.T, initialEstimate uint64) { for _, bpAddr := range batchPosterAddresses { if bpAddr != l1pricing.BatchPosterAddress && bpAddr != l1pricing.L1PricerFundsPoolAddress { numReimbursed++ - bal, err := l1client.BalanceAt(ctx, bpAddr, nil) + bal, err := builder.L1.Client.BalanceAt(ctx, bpAddr, nil) Require(t, err) if bal.Sign() == 0 { Fatal(t, "Batch poster balance is zero for", bpAddr) diff --git a/system_tests/forwarder_test.go b/system_tests/forwarder_test.go index fc7eb4cc2d..d29e82c12c 100644 --- a/system_tests/forwarder_test.go +++ b/system_tests/forwarder_test.go @@ -15,13 +15,9 @@ import ( "time" "github.com/alicebob/miniredis/v2" - "github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/ethclient" - "github.com/ethereum/go-ethereum/node" "github.com/offchainlabs/nitro/arbnode" - "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/execution/gethexec" - "github.com/offchainlabs/nitro/statetransfer" "github.com/offchainlabs/nitro/util/redisutil" ) @@ -33,16 +29,14 @@ func TestStaticForwarder(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ipcPath := tmpPath(t, "test.ipc") - ipcConfig := genericconf.IPCConfigDefault - ipcConfig.Path = ipcPath - stackConfig := createStackConfigForTest(t.TempDir()) - ipcConfig.Apply(stackConfig) - nodeConfigA := arbnode.ConfigDefaultL1Test() - nodeConfigA.BatchPoster.Enable = false - l2info, nodeA, clientA, l1info, _, _, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, nodeConfigA, nil, nil, stackConfig) - defer requireClose(t, l1stack) - defer nodeA.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.nodeConfig.BatchPoster.Enable = false + builder.l2StackConfig.IPCPath = ipcPath + cleanupA := builder.Build(t) + defer cleanupA() + + clientA := builder.L2.Client nodeConfigB := arbnode.ConfigDefaultL1Test() execConfigB := gethexec.ConfigDefaultTest() @@ -53,18 +47,22 @@ func TestStaticForwarder(t *testing.T) { execConfigB.ForwardingTarget = ipcPath nodeConfigB.BatchPoster.Enable = false - clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, nodeConfigB, execConfigB, nil) - defer nodeB.StopAndWait() + testClientB, cleanupB := builder.Build2ndNode(t, &SecondNodeParams{ + nodeConfig: nodeConfigB, + execConfig: execConfigB, + }) + defer cleanupB() + clientB := testClientB.Client - l2info.GenerateAccount("User2") - tx := l2info.PrepareTx("Owner", "User2", l2info.TransferGas, transferAmount, nil) + builder.L2Info.GenerateAccount("User2") + tx := builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, transferAmount, nil) err := clientB.SendTransaction(ctx, tx) Require(t, err) - _, err = EnsureTxSucceeded(ctx, clientA, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) - l2balance, err := clientA.BalanceAt(ctx, l2info.GetAddress("User2"), nil) + l2balance, err := clientA.BalanceAt(ctx, builder.L2Info.GetAddress("User2"), nil) Require(t, err) if l2balance.Cmp(transferAmount) != 0 { @@ -95,36 +93,18 @@ type fallbackSequencerOpts struct { enableSecCoordinator bool } -func fallbackSequencer( - ctx context.Context, t *testing.T, opts *fallbackSequencerOpts, -) (l2info info, currentNode *arbnode.Node, l2client *ethclient.Client, - l1info info, l1backend *eth.Ethereum, l1client *ethclient.Client, l1stack *node.Node) { - stackConfig := createStackConfigForTest(t.TempDir()) - ipcConfig := genericconf.IPCConfigDefault - ipcConfig.Path = opts.ipcPath - ipcConfig.Apply(stackConfig) - nodeConfig := arbnode.ConfigDefaultL1Test() - nodeConfig.SeqCoordinator.Enable = opts.enableSecCoordinator - nodeConfig.SeqCoordinator.RedisUrl = opts.redisUrl - nodeConfig.SeqCoordinator.MyUrl = opts.ipcPath - return createTestNodeOnL1WithConfig(t, ctx, true, nodeConfig, nil, nil, stackConfig) +func fallbackSequencer(ctx context.Context, t *testing.T, opts *fallbackSequencerOpts) *NodeBuilder { + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.l2StackConfig.IPCPath = opts.ipcPath + builder.nodeConfig.SeqCoordinator.Enable = opts.enableSecCoordinator + builder.nodeConfig.SeqCoordinator.RedisUrl = opts.redisUrl + builder.nodeConfig.SeqCoordinator.MyUrl = opts.ipcPath + return builder } -func createForwardingNode( - ctx context.Context, t *testing.T, - first *arbnode.Node, - l1stack *node.Node, - l1info *BlockchainTestInfo, - l2InitData *statetransfer.ArbosInitializationInfo, - ipcPath string, - redisUrl string, - fallbackPath string, -) (*ethclient.Client, *arbnode.Node) { - stackConfig := createStackConfigForTest(t.TempDir()) +func createForwardingNode(t *testing.T, builder *NodeBuilder, ipcPath string, redisUrl string, fallbackPath string) (*TestClient, func()) { if ipcPath != "" { - ipcConfig := genericconf.IPCConfigDefault - ipcConfig.Path = ipcPath - ipcConfig.Apply(stackConfig) + builder.l2StackConfig.IPCPath = ipcPath } nodeConfig := arbnode.ConfigDefaultL1Test() nodeConfig.Sequencer = false @@ -136,29 +116,18 @@ func createForwardingNode( execConfig.ForwardingTarget = fallbackPath // nodeConfig.Feed.Output.Enable = false - return Create2ndNodeWithConfig(t, ctx, first, l1stack, l1info, l2InitData, nodeConfig, execConfig, stackConfig) + return builder.Build2ndNode(t, &SecondNodeParams{nodeConfig: nodeConfig, execConfig: execConfig}) } -func createSequencer( - ctx context.Context, t *testing.T, - first *arbnode.Node, - l1stack *node.Node, - l1info *BlockchainTestInfo, - l2InitData *statetransfer.ArbosInitializationInfo, - ipcPath string, - redisUrl string, -) (*ethclient.Client, *arbnode.Node) { - stackConfig := createStackConfigForTest(t.TempDir()) - ipcConfig := genericconf.IPCConfigDefault - ipcConfig.Path = ipcPath - ipcConfig.Apply(stackConfig) +func createSequencer(t *testing.T, builder *NodeBuilder, ipcPath string, redisUrl string) (*TestClient, func()) { + builder.l2StackConfig.IPCPath = ipcPath nodeConfig := arbnode.ConfigDefaultL1Test() nodeConfig.BatchPoster.Enable = false nodeConfig.SeqCoordinator.Enable = true nodeConfig.SeqCoordinator.RedisUrl = redisUrl nodeConfig.SeqCoordinator.MyUrl = ipcPath - return Create2ndNodeWithConfig(t, ctx, first, l1stack, l1info, l2InitData, nodeConfig, gethexec.ConfigDefaultTest(), stackConfig) + return builder.Build2ndNode(t, &SecondNodeParams{nodeConfig: nodeConfig}) } // tmpPath returns file path with specified filename from temporary directory of the test. @@ -252,34 +221,36 @@ func TestRedisForwarder(t *testing.T) { redisServer, redisUrl := initRedis(ctx, t, append(nodePaths, fbNodePath)) defer redisServer.Close() - l2info, fallbackNode, fallbackClient, l1info, _, _, l1stack := fallbackSequencer(ctx, t, + builder := fallbackSequencer(ctx, t, &fallbackSequencerOpts{ ipcPath: fbNodePath, redisUrl: redisUrl, enableSecCoordinator: true, }) - defer requireClose(t, l1stack) - defer fallbackNode.StopAndWait() + cleanup := builder.Build(t) + defer cleanup() + fallbackNode, fallbackClient := builder.L2.ConsensusNode, builder.L2.Client - forwardingClient, forwardingNode := createForwardingNode(ctx, t, fallbackNode, l1stack, l1info, &l2info.ArbInitData, "", redisUrl, fbNodePath) - defer forwardingNode.StopAndWait() + TestClientForwarding, cleanupForwarding := createForwardingNode(t, builder, "", redisUrl, fbNodePath) + defer cleanupForwarding() + forwardingClient := TestClientForwarding.Client var seqNodes []*arbnode.Node var seqClients []*ethclient.Client for _, path := range nodePaths { - client, node := createSequencer(ctx, t, fallbackNode, l1stack, l1info, &l2info.ArbInitData, path, redisUrl) - seqNodes = append(seqNodes, node) - seqClients = append(seqClients, client) + testClientSeq, _ := createSequencer(t, builder, path, redisUrl) + seqNodes = append(seqNodes, testClientSeq.ConsensusNode) + seqClients = append(seqClients, testClientSeq.Client) } defer stopNodes(seqNodes) for i := range seqClients { userA := user("A", i) - l2info.GenerateAccount(userA) - tx := l2info.PrepareTx("Owner", userA, l2info.TransferGas, big.NewInt(1e12+int64(l2info.TransferGas)*l2info.GasPrice.Int64()), nil) + builder.L2Info.GenerateAccount(userA) + tx := builder.L2Info.PrepareTx("Owner", userA, builder.L2Info.TransferGas, big.NewInt(1e12+int64(builder.L2Info.TransferGas)*builder.L2Info.GasPrice.Int64()), nil) err := fallbackClient.SendTransaction(ctx, tx) Require(t, err) - _, err = EnsureTxSucceeded(ctx, fallbackClient, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) } @@ -289,8 +260,8 @@ func TestRedisForwarder(t *testing.T) { } userA := user("A", i) userB := user("B", i) - l2info.GenerateAccount(userB) - tx := l2info.PrepareTx(userA, userB, l2info.TransferGas, transferAmount, nil) + builder.L2Info.GenerateAccount(userB) + tx := builder.L2Info.PrepareTx(userA, userB, builder.L2Info.TransferGas, transferAmount, nil) sendFunc := func() error { return forwardingClient.SendTransaction(ctx, tx) } if err := tryWithTimeout(ctx, sendFunc, gethexec.DefaultTestForwarderConfig.UpdateInterval*10); err != nil { @@ -299,7 +270,7 @@ func TestRedisForwarder(t *testing.T) { _, err := EnsureTxSucceeded(ctx, seqClients[i], tx) Require(t, err) - l2balance, err := seqClients[i].BalanceAt(ctx, l2info.GetAddress(userB), nil) + l2balance, err := seqClients[i].BalanceAt(ctx, builder.L2Info.GetAddress(userB), nil) Require(t, err) if l2balance.Cmp(transferAmount) != 0 { @@ -320,29 +291,31 @@ func TestRedisForwarderFallbackNoRedis(t *testing.T) { redisServer, redisUrl := initRedis(ctx, t, nodePaths) redisServer.Close() - l2info, fallbackNode, fallbackClient, l1info, _, _, l1stack := fallbackSequencer(ctx, t, + builder := fallbackSequencer(ctx, t, &fallbackSequencerOpts{ ipcPath: fallbackIpcPath, redisUrl: redisUrl, enableSecCoordinator: false, }) - defer requireClose(t, l1stack) - defer fallbackNode.StopAndWait() + cleanup := builder.Build(t) + defer cleanup() + fallbackClient := builder.L2.Client - forwardingClient, forwardingNode := createForwardingNode(ctx, t, fallbackNode, l1stack, l1info, &l2info.ArbInitData, "", redisUrl, fallbackIpcPath) - defer forwardingNode.StopAndWait() + TestClientForwarding, cleanupForwarding := createForwardingNode(t, builder, "", redisUrl, fallbackIpcPath) + defer cleanupForwarding() + forwardingClient := TestClientForwarding.Client user := "User2" - l2info.GenerateAccount(user) - tx := l2info.PrepareTx("Owner", "User2", l2info.TransferGas, transferAmount, nil) + builder.L2Info.GenerateAccount(user) + tx := builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, transferAmount, nil) sendFunc := func() error { return forwardingClient.SendTransaction(ctx, tx) } err := tryWithTimeout(ctx, sendFunc, gethexec.DefaultTestForwarderConfig.UpdateInterval*10) Require(t, err) - _, err = EnsureTxSucceeded(ctx, fallbackClient, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) - l2balance, err := fallbackClient.BalanceAt(ctx, l2info.GetAddress(user), nil) + l2balance, err := fallbackClient.BalanceAt(ctx, builder.L2Info.GetAddress(user), nil) Require(t, err) if l2balance.Cmp(transferAmount) != 0 { diff --git a/system_tests/ipc_test.go b/system_tests/ipc_test.go index dc73825a13..511a608e67 100644 --- a/system_tests/ipc_test.go +++ b/system_tests/ipc_test.go @@ -9,24 +9,18 @@ import ( "testing" "github.com/ethereum/go-ethereum/ethclient" - "github.com/offchainlabs/nitro/cmd/genericconf" ) func TestIpcRpc(t *testing.T) { ipcPath := filepath.Join(t.TempDir(), "test.ipc") - ipcConfig := genericconf.IPCConfigDefault - ipcConfig.Path = ipcPath - - stackConf := createStackConfigForTest(t.TempDir()) - ipcConfig.Apply(stackConf) - ctx, cancel := context.WithCancel(context.Background()) defer cancel() - _, l2node, _, _, _, _, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, nil, nil, nil, stackConf) - defer requireClose(t, l1stack) - defer l2node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.l2StackConfig.IPCPath = ipcPath + cleanup := builder.Build(t) + defer cleanup() _, err := ethclient.Dial(ipcPath) Require(t, err) diff --git a/system_tests/meaningless_reorg_test.go b/system_tests/meaningless_reorg_test.go index 7fb6934e84..f09f68041a 100644 --- a/system_tests/meaningless_reorg_test.go +++ b/system_tests/meaningless_reorg_test.go @@ -10,7 +10,6 @@ import ( "time" "github.com/ethereum/go-ethereum/common" - "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/solgen/go/bridgegen" ) @@ -18,27 +17,26 @@ func TestMeaninglessBatchReorg(t *testing.T) { t.Parallel() ctx, cancel := context.WithCancel(context.Background()) defer cancel() - conf := arbnode.ConfigDefaultL1Test() - conf.BatchPoster.Enable = false - l2Info, arbNode, l2Client, l1Info, l1Backend, l1Client, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, conf, nil, nil, nil) - defer requireClose(t, l1stack) - defer arbNode.StopAndWait() - seqInbox, err := bridgegen.NewSequencerInbox(l1Info.GetAddress("SequencerInbox"), l1Client) + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.nodeConfig.BatchPoster.Enable = false + cleanup := builder.Build(t) + defer cleanup() + + seqInbox, err := bridgegen.NewSequencerInbox(builder.L1Info.GetAddress("SequencerInbox"), builder.L1.Client) Require(t, err) - seqOpts := l1Info.GetDefaultTransactOpts("Sequencer", ctx) + seqOpts := builder.L1Info.GetDefaultTransactOpts("Sequencer", ctx) tx, err := seqInbox.AddSequencerL2BatchFromOrigin(&seqOpts, big.NewInt(1), nil, big.NewInt(1), common.Address{}) Require(t, err) - batchReceipt, err := EnsureTxSucceeded(ctx, l1Client, tx) + batchReceipt, err := builder.L1.EnsureTxSucceeded(tx) Require(t, err) - execNode := getExecNode(t, arbNode) for i := 0; ; i++ { if i >= 500 { Fatal(t, "Failed to read batch from L1") } - msgNum, err := execNode.ExecEngine.HeadMessageNumber() + msgNum, err := builder.L2.ExecNode.ExecEngine.HeadMessageNumber() Require(t, err) if msgNum == 1 { break @@ -47,33 +45,33 @@ func TestMeaninglessBatchReorg(t *testing.T) { } time.Sleep(10 * time.Millisecond) } - metadata, err := arbNode.InboxTracker.GetBatchMetadata(1) + metadata, err := builder.L2.ConsensusNode.InboxTracker.GetBatchMetadata(1) Require(t, err) originalBatchBlock := batchReceipt.BlockNumber.Uint64() if metadata.ParentChainBlock != originalBatchBlock { Fatal(t, "Posted batch in block", originalBatchBlock, "but metadata says L1 block was", metadata.ParentChainBlock) } - _, l2Receipt := TransferBalance(t, "Owner", "Owner", common.Big1, l2Info, l2Client, ctx) + _, l2Receipt := builder.L2.TransferBalance(t, "Owner", "Owner", common.Big1, builder.L2Info) // Make the reorg larger to force the miner to discard transactions. // The miner usually collects transactions from deleted blocks and puts them in the mempool. // However, this code doesn't run on reorgs larger than 64 blocks for performance reasons. // Therefore, we make a bunch of small blocks to prevent the code from running. for j := uint64(0); j < 70; j++ { - TransferBalance(t, "Faucet", "Faucet", common.Big1, l1Info, l1Client, ctx) + builder.L1.TransferBalance(t, "Faucet", "Faucet", common.Big1, builder.L1Info) } - parentBlock := l1Backend.BlockChain().GetBlockByNumber(batchReceipt.BlockNumber.Uint64() - 1) - err = l1Backend.BlockChain().ReorgToOldBlock(parentBlock) + parentBlock := builder.L1.L1Backend.BlockChain().GetBlockByNumber(batchReceipt.BlockNumber.Uint64() - 1) + err = builder.L1.L1Backend.BlockChain().ReorgToOldBlock(parentBlock) Require(t, err) // Produce a new l1Block so that the batch ends up in a different l1Block than before - TransferBalance(t, "User", "User", common.Big1, l1Info, l1Client, ctx) + builder.L1.TransferBalance(t, "User", "User", common.Big1, builder.L1Info) tx, err = seqInbox.AddSequencerL2BatchFromOrigin(&seqOpts, big.NewInt(1), nil, big.NewInt(1), common.Address{}) Require(t, err) - newBatchReceipt, err := EnsureTxSucceeded(ctx, l1Client, tx) + newBatchReceipt, err := builder.L1.EnsureTxSucceeded(tx) Require(t, err) newBatchBlock := newBatchReceipt.BlockNumber.Uint64() @@ -87,7 +85,7 @@ func TestMeaninglessBatchReorg(t *testing.T) { if i >= 500 { Fatal(t, "Failed to read batch reorg from L1") } - metadata, err = arbNode.InboxTracker.GetBatchMetadata(1) + metadata, err = builder.L2.ConsensusNode.InboxTracker.GetBatchMetadata(1) Require(t, err) if metadata.ParentChainBlock == newBatchBlock { break @@ -97,10 +95,10 @@ func TestMeaninglessBatchReorg(t *testing.T) { time.Sleep(10 * time.Millisecond) } - _, err = arbNode.InboxReader.GetSequencerMessageBytes(ctx, 1) + _, err = builder.L2.ConsensusNode.InboxReader.GetSequencerMessageBytes(ctx, 1) Require(t, err) - l2Header, err := l2Client.HeaderByNumber(ctx, l2Receipt.BlockNumber) + l2Header, err := builder.L2.Client.HeaderByNumber(ctx, l2Receipt.BlockNumber) Require(t, err) if l2Header.Hash() != l2Receipt.BlockHash { diff --git a/system_tests/seq_coordinator_test.go b/system_tests/seq_coordinator_test.go index a213c366cf..ac3167a604 100644 --- a/system_tests/seq_coordinator_test.go +++ b/system_tests/seq_coordinator_test.go @@ -14,7 +14,6 @@ import ( "github.com/go-redis/redis/v8" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbos/arbostypes" @@ -47,11 +46,12 @@ func TestRedisSeqCoordinatorPriorities(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - nodeConfig := arbnode.ConfigDefaultL2Test() - nodeConfig.SeqCoordinator.Enable = true - nodeConfig.SeqCoordinator.RedisUrl = redisutil.CreateTestRedis(ctx, t) + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + builder.takeOwnership = false + builder.nodeConfig.SeqCoordinator.Enable = true + builder.nodeConfig.SeqCoordinator.RedisUrl = redisutil.CreateTestRedis(ctx, t) - l2Info := NewArbTestInfo(t, params.ArbitrumDevTestChainConfig().ChainID) + l2Info := builder.L2Info // stdio protocol makes sure forwarder initialization doesn't fail nodeNames := []string{"stdio://A", "stdio://B", "stdio://C", "stdio://D", "stdio://E"} @@ -59,12 +59,13 @@ func TestRedisSeqCoordinatorPriorities(t *testing.T) { nodes := make([]*arbnode.Node, len(nodeNames)) // init DB to known state - initRedisForTest(t, ctx, nodeConfig.SeqCoordinator.RedisUrl, nodeNames) + initRedisForTest(t, ctx, builder.nodeConfig.SeqCoordinator.RedisUrl, nodeNames) createStartNode := func(nodeNum int) { - nodeConfig.SeqCoordinator.MyUrl = nodeNames[nodeNum] - _, node, _ := CreateTestL2WithConfig(t, ctx, l2Info, nodeConfig, nil, false) - nodes[nodeNum] = node + builder.nodeConfig.SeqCoordinator.MyUrl = nodeNames[nodeNum] + builder.L2Info = l2Info + builder.Build(t) + nodes[nodeNum] = builder.L2.ConsensusNode } trySequencing := func(nodeNum int) bool { @@ -128,7 +129,7 @@ func TestRedisSeqCoordinatorPriorities(t *testing.T) { if attempts > 10 { Fatal(t, "timeout waiting for msg ", msgNum, " debug: ", currentNode.SeqCoordinator.DebugPrint()) } - <-time.After(nodeConfig.SeqCoordinator.UpdateInterval / 3) + <-time.After(builder.nodeConfig.SeqCoordinator.UpdateInterval / 3) } } } @@ -232,7 +233,7 @@ func TestRedisSeqCoordinatorPriorities(t *testing.T) { } if sequencer == -1 || (addNodes && (sequencer == currentSequencer+1)) { - time.Sleep(nodeConfig.SeqCoordinator.LockoutDuration / 5) + time.Sleep(builder.nodeConfig.SeqCoordinator.LockoutDuration / 5) continue } if sequencer == currentSequencer { @@ -270,21 +271,20 @@ func testCoordinatorMessageSync(t *testing.T, successCase bool) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - nodeConfig := arbnode.ConfigDefaultL1Test() - nodeConfig.SeqCoordinator.Enable = true - nodeConfig.SeqCoordinator.RedisUrl = redisutil.CreateTestRedis(ctx, t) - nodeConfig.BatchPoster.Enable = false + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.nodeConfig.SeqCoordinator.Enable = true + builder.nodeConfig.SeqCoordinator.RedisUrl = redisutil.CreateTestRedis(ctx, t) + builder.nodeConfig.BatchPoster.Enable = false nodeNames := []string{"stdio://A", "stdio://B"} + initRedisForTest(t, ctx, builder.nodeConfig.SeqCoordinator.RedisUrl, nodeNames) + builder.nodeConfig.SeqCoordinator.MyUrl = nodeNames[0] - initRedisForTest(t, ctx, nodeConfig.SeqCoordinator.RedisUrl, nodeNames) + cleanup := builder.Build(t) + defer cleanup() + clientA := builder.L2.Client - nodeConfig.SeqCoordinator.MyUrl = nodeNames[0] - l2Info, nodeA, clientA, l1info, _, _, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, nodeConfig, nil, params.ArbitrumDevTestChainConfig(), nil) - defer requireClose(t, l1stack) - defer nodeA.StopAndWait() - - redisClient, err := redisutil.RedisClientFromURL(nodeConfig.SeqCoordinator.RedisUrl) + redisClient, err := redisutil.RedisClientFromURL(builder.nodeConfig.SeqCoordinator.RedisUrl) Require(t, err) defer redisClient.Close() @@ -292,27 +292,29 @@ func testCoordinatorMessageSync(t *testing.T, successCase bool) { for { err := redisClient.Get(ctx, redisutil.CHOSENSEQ_KEY).Err() if errors.Is(err, redis.Nil) { - time.Sleep(nodeConfig.SeqCoordinator.UpdateInterval) + time.Sleep(builder.nodeConfig.SeqCoordinator.UpdateInterval) continue } Require(t, err) break } - l2Info.GenerateAccount("User2") + builder.L2Info.GenerateAccount("User2") - nodeConfigDup := *nodeConfig - nodeConfig = &nodeConfigDup + nodeConfigDup := *builder.nodeConfig + builder.nodeConfig = &nodeConfigDup - nodeConfig.SeqCoordinator.MyUrl = nodeNames[1] + builder.nodeConfig.SeqCoordinator.MyUrl = nodeNames[1] if !successCase { - nodeConfig.SeqCoordinator.Signer.ECDSA.AcceptSequencer = false - nodeConfig.SeqCoordinator.Signer.ECDSA.AllowedAddresses = []string{l2Info.GetAddress("User2").Hex()} + builder.nodeConfig.SeqCoordinator.Signer.ECDSA.AcceptSequencer = false + builder.nodeConfig.SeqCoordinator.Signer.ECDSA.AllowedAddresses = []string{builder.L2Info.GetAddress("User2").Hex()} } - clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2Info.ArbInitData, nodeConfig, nil, nil) - defer nodeB.StopAndWait() - tx := l2Info.PrepareTx("Owner", "User2", l2Info.TransferGas, big.NewInt(1e12), nil) + testClientB, cleanupB := builder.Build2ndNode(t, &SecondNodeParams{nodeConfig: builder.nodeConfig}) + defer cleanupB() + clientB := testClientB.Client + + tx := builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, big.NewInt(1e12), nil) err = clientA.SendTransaction(ctx, tx) Require(t, err) @@ -323,7 +325,7 @@ func testCoordinatorMessageSync(t *testing.T, successCase bool) { if successCase { _, err = WaitForTx(ctx, clientB, tx.Hash(), time.Second*5) Require(t, err) - l2balance, err := clientB.BalanceAt(ctx, l2Info.GetAddress("User2"), nil) + l2balance, err := clientB.BalanceAt(ctx, builder.L2Info.GetAddress("User2"), nil) Require(t, err) if l2balance.Cmp(big.NewInt(1e12)) != 0 { t.Fatal("Unexpected balance:", l2balance) diff --git a/system_tests/seqfeed_test.go b/system_tests/seqfeed_test.go index d0d05d569e..749a91e3b1 100644 --- a/system_tests/seqfeed_test.go +++ b/system_tests/seqfeed_test.go @@ -13,7 +13,6 @@ import ( "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/broadcastclient" - "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/relay" "github.com/offchainlabs/nitro/util/signature" "github.com/offchainlabs/nitro/wsbroadcastserver" @@ -43,30 +42,33 @@ func TestSequencerFeed(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - seqNodeConfig := arbnode.ConfigDefaultL2Test() - seqNodeConfig.Feed.Output = *newBroadcasterConfigTest() - l2info1, nodeA, client1 := CreateTestL2WithConfig(t, ctx, nil, seqNodeConfig, nil, true) - defer nodeA.StopAndWait() - clientNodeConfig := arbnode.ConfigDefaultL2Test() - port := nodeA.BroadcastServer.ListenerAddr().(*net.TCPAddr).Port - clientNodeConfig.Feed.Input = *newBroadcastClientConfigTest(port) + builderSeq := NewNodeBuilder(ctx).DefaultConfig(t, false) + builderSeq.nodeConfig.Feed.Output = *newBroadcasterConfigTest() + cleanupSeq := builderSeq.Build(t) + defer cleanupSeq() + seqInfo, seqNode, seqClient := builderSeq.L2Info, builderSeq.L2.ConsensusNode, builderSeq.L2.Client - _, nodeB, client2 := CreateTestL2WithConfig(t, ctx, nil, clientNodeConfig, nil, false) - defer nodeB.StopAndWait() + port := seqNode.BroadcastServer.ListenerAddr().(*net.TCPAddr).Port + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + builder.nodeConfig.Feed.Input = *newBroadcastClientConfigTest(port) + builder.takeOwnership = false + cleanup := builder.Build(t) + defer cleanup() + client := builder.L2.Client - l2info1.GenerateAccount("User2") + seqInfo.GenerateAccount("User2") - tx := l2info1.PrepareTx("Owner", "User2", l2info1.TransferGas, big.NewInt(1e12), nil) + tx := seqInfo.PrepareTx("Owner", "User2", seqInfo.TransferGas, big.NewInt(1e12), nil) - err := client1.SendTransaction(ctx, tx) + err := seqClient.SendTransaction(ctx, tx) Require(t, err) - _, err = EnsureTxSucceeded(ctx, client1, tx) + _, err = builderSeq.L2.EnsureTxSucceeded(tx) Require(t, err) - _, err = WaitForTx(ctx, client2, tx.Hash(), time.Second*5) + _, err = WaitForTx(ctx, client, tx.Hash(), time.Second*5) Require(t, err) - l2balance, err := client2.BalanceAt(ctx, l2info1.GetAddress("User2"), nil) + l2balance, err := client.BalanceAt(ctx, seqInfo.GetAddress("User2"), nil) Require(t, err) if l2balance.Cmp(big.NewInt(1e12)) != 0 { t.Fatal("Unexpected balance:", l2balance) @@ -78,16 +80,17 @@ func TestRelayedSequencerFeed(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - seqNodeConfig := arbnode.ConfigDefaultL2Test() - seqNodeConfig.Feed.Output = *newBroadcasterConfigTest() - l2info1, nodeA, client1 := CreateTestL2WithConfig(t, ctx, nil, seqNodeConfig, nil, true) - defer nodeA.StopAndWait() + builderSeq := NewNodeBuilder(ctx).DefaultConfig(t, false) + builderSeq.nodeConfig.Feed.Output = *newBroadcasterConfigTest() + cleanupSeq := builderSeq.Build(t) + defer cleanupSeq() + seqInfo, seqNode, seqClient := builderSeq.L2Info, builderSeq.L2.ConsensusNode, builderSeq.L2.Client - bigChainId, err := client1.ChainID(ctx) + bigChainId, err := seqClient.ChainID(ctx) Require(t, err) config := relay.ConfigDefault - port := nodeA.BroadcastServer.ListenerAddr().(*net.TCPAddr).Port + port := seqNode.BroadcastServer.ListenerAddr().(*net.TCPAddr).Port config.Node.Feed.Input = *newBroadcastClientConfigTest(port) config.Node.Feed.Output = *newBroadcasterConfigTest() config.Chain.ID = bigChainId.Uint64() @@ -99,26 +102,28 @@ func TestRelayedSequencerFeed(t *testing.T) { Require(t, err) defer currentRelay.StopAndWait() - clientNodeConfig := arbnode.ConfigDefaultL2Test() port = currentRelay.GetListenerAddr().(*net.TCPAddr).Port - clientNodeConfig.Feed.Input = *newBroadcastClientConfigTest(port) - _, nodeC, client3 := CreateTestL2WithConfig(t, ctx, nil, clientNodeConfig, nil, false) - defer nodeC.StopAndWait() - StartWatchChanErr(t, ctx, feedErrChan, nodeC) + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + builder.nodeConfig.Feed.Input = *newBroadcastClientConfigTest(port) + builder.takeOwnership = false + cleanup := builder.Build(t) + defer cleanup() + node, client := builder.L2.ConsensusNode, builder.L2.Client + StartWatchChanErr(t, ctx, feedErrChan, node) - l2info1.GenerateAccount("User2") + seqInfo.GenerateAccount("User2") - tx := l2info1.PrepareTx("Owner", "User2", l2info1.TransferGas, big.NewInt(1e12), nil) + tx := seqInfo.PrepareTx("Owner", "User2", seqInfo.TransferGas, big.NewInt(1e12), nil) - err = client1.SendTransaction(ctx, tx) + err = seqClient.SendTransaction(ctx, tx) Require(t, err) - _, err = EnsureTxSucceeded(ctx, client1, tx) + _, err = builderSeq.L2.EnsureTxSucceeded(tx) Require(t, err) - _, err = WaitForTx(ctx, client3, tx.Hash(), time.Second*5) + _, err = WaitForTx(ctx, client, tx.Hash(), time.Second*5) Require(t, err) - l2balance, err := client3.BalanceAt(ctx, l2info1.GetAddress("User2"), nil) + l2balance, err := client.BalanceAt(ctx, seqInfo.GetAddress("User2"), nil) Require(t, err) if l2balance.Cmp(big.NewInt(1e12)) != 0 { t.Fatal("Unexpected balance:", l2balance) @@ -136,11 +141,16 @@ func testLyingSequencer(t *testing.T, dasModeStr string) { nodeConfigA.BatchPoster.Enable = true nodeConfigA.Feed.Output.Enable = false - l2infoA, nodeA, l2clientA, l1info, _, l1client, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, nodeConfigA, nil, chainConfig, nil) - defer requireClose(t, l1stack, "unable to close l1stack") - defer nodeA.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.nodeConfig = nodeConfigA + builder.chainConfig = chainConfig + builder.L2Info = nil + cleanup := builder.Build(t) + defer cleanup() - authorizeDASKeyset(t, ctx, dasSignerKey, l1info, l1client) + l2clientA := builder.L2.Client + + authorizeDASKeyset(t, ctx, dasSignerKey, builder.L1Info, builder.L1.Client) // The lying sequencer nodeConfigC := arbnode.ConfigDefaultL1Test() @@ -148,8 +158,9 @@ func testLyingSequencer(t *testing.T, dasModeStr string) { nodeConfigC.DataAvailability = nodeConfigA.DataAvailability nodeConfigC.DataAvailability.RPCAggregator.Enable = false nodeConfigC.Feed.Output = *newBroadcasterConfigTest() - l2clientC, nodeC := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2infoA.ArbInitData, nodeConfigC, gethexec.ConfigDefaultTest(), nil) - defer nodeC.StopAndWait() + testClientC, cleanupC := builder.Build2ndNode(t, &SecondNodeParams{nodeConfig: nodeConfigC}) + defer cleanupC() + l2clientC, nodeC := testClientC.Client, testClientC.ConsensusNode port := nodeC.BroadcastServer.ListenerAddr().(*net.TCPAddr).Port @@ -159,15 +170,16 @@ func testLyingSequencer(t *testing.T, dasModeStr string) { nodeConfigB.Feed.Input = *newBroadcastClientConfigTest(port) nodeConfigB.DataAvailability = nodeConfigA.DataAvailability nodeConfigB.DataAvailability.RPCAggregator.Enable = false - l2clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2infoA.ArbInitData, nodeConfigB, nil, nil) - defer nodeB.StopAndWait() + testClientB, cleanupB := builder.Build2ndNode(t, &SecondNodeParams{nodeConfig: nodeConfigB}) + defer cleanupB() + l2clientB := testClientB.Client - l2infoA.GenerateAccount("FraudUser") - l2infoA.GenerateAccount("RealUser") + builder.L2Info.GenerateAccount("FraudUser") + builder.L2Info.GenerateAccount("RealUser") - fraudTx := l2infoA.PrepareTx("Owner", "FraudUser", l2infoA.TransferGas, big.NewInt(1e12), nil) - l2infoA.GetInfoWithPrivKey("Owner").Nonce -= 1 // Use same l2info object for different l2s - realTx := l2infoA.PrepareTx("Owner", "RealUser", l2infoA.TransferGas, big.NewInt(1e12), nil) + fraudTx := builder.L2Info.PrepareTx("Owner", "FraudUser", builder.L2Info.TransferGas, big.NewInt(1e12), nil) + builder.L2Info.GetInfoWithPrivKey("Owner").Nonce -= 1 // Use same l2info object for different l2s + realTx := builder.L2Info.PrepareTx("Owner", "RealUser", builder.L2Info.TransferGas, big.NewInt(1e12), nil) for i := 0; i < 10; i++ { err := l2clientC.SendTransaction(ctx, fraudTx) @@ -180,7 +192,7 @@ func testLyingSequencer(t *testing.T, dasModeStr string) { } } - _, err := EnsureTxSucceeded(ctx, l2clientC, fraudTx) + _, err := testClientC.EnsureTxSucceeded(fraudTx) if err != nil { t.Fatal("error ensuring fraud transaction succeeded:", err) } @@ -190,7 +202,7 @@ func testLyingSequencer(t *testing.T, dasModeStr string) { if err != nil { t.Fatal("error waiting for tx:", err) } - l2balance, err := l2clientB.BalanceAt(ctx, l2infoA.GetAddress("FraudUser"), nil) + l2balance, err := l2clientB.BalanceAt(ctx, builder.L2Info.GetAddress("FraudUser"), nil) if err != nil { t.Fatal("error getting balance:", err) } @@ -204,7 +216,7 @@ func testLyingSequencer(t *testing.T, dasModeStr string) { t.Fatal("error sending real transaction:", err) } - _, err = EnsureTxSucceeded(ctx, l2clientA, realTx) + _, err = builder.L2.EnsureTxSucceeded(realTx) if err != nil { t.Fatal("error ensuring real transaction succeeded:", err) } @@ -214,7 +226,7 @@ func testLyingSequencer(t *testing.T, dasModeStr string) { if err != nil { t.Fatal("error waiting for transaction to get to node b:", err) } - l2balanceFraudAcct, err := l2clientB.BalanceAt(ctx, l2infoA.GetAddress("FraudUser"), nil) + l2balanceFraudAcct, err := l2clientB.BalanceAt(ctx, builder.L2Info.GetAddress("FraudUser"), nil) if err != nil { t.Fatal("error getting fraud balance:", err) } @@ -222,7 +234,7 @@ func testLyingSequencer(t *testing.T, dasModeStr string) { t.Fatal("Unexpected balance (fraud acct should be empty) was:", l2balanceFraudAcct) } - l2balanceRealAcct, err := l2clientB.BalanceAt(ctx, l2infoA.GetAddress("RealUser"), nil) + l2balanceRealAcct, err := l2clientB.BalanceAt(ctx, builder.L2Info.GetAddress("RealUser"), nil) if err != nil { t.Fatal("error getting real balance:", err) } diff --git a/system_tests/seqinbox_test.go b/system_tests/seqinbox_test.go index a456dc5fe9..c90617455a 100644 --- a/system_tests/seqinbox_test.go +++ b/system_tests/seqinbox_test.go @@ -138,32 +138,33 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { t.Parallel() ctx, cancel := context.WithCancel(context.Background()) defer cancel() - conf := arbnode.ConfigDefaultL1Test() - conf.InboxReader.HardReorg = true + + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.nodeConfig.InboxReader.HardReorg = true if validator { - conf.BlockValidator.Enable = true + builder.nodeConfig.BlockValidator.Enable = true } - l2Info, arbNode, _, l1Info, l1backend, l1Client, l1stack := createTestNodeOnL1WithConfig(t, ctx, false, conf, nil, nil, nil) - execNode := getExecNode(t, arbNode) - l2Backend := execNode.Backend - defer requireClose(t, l1stack) - defer arbNode.StopAndWait() + builder.isSequencer = false + cleanup := builder.Build(t) + defer cleanup() + + l2Backend := builder.L2.ExecNode.Backend - l1BlockChain := l1backend.BlockChain() + l1BlockChain := builder.L1.L1Backend.BlockChain() - rpcC, err := l1stack.Attach() + rpcC, err := builder.L1.Stack.Attach() if err != nil { t.Fatalf("Error connecting to l1 node: %v", err) } gethClient := gethclient.New(rpcC) - seqInbox, err := bridgegen.NewSequencerInbox(l1Info.GetAddress("SequencerInbox"), l1Client) + seqInbox, err := bridgegen.NewSequencerInbox(builder.L1Info.GetAddress("SequencerInbox"), builder.L1.Client) Require(t, err) - seqOpts := l1Info.GetDefaultTransactOpts("Sequencer", ctx) + seqOpts := builder.L1Info.GetDefaultTransactOpts("Sequencer", ctx) - gasRefunderAddr := deployGasRefunder(ctx, t, l1Info, l1Client) + gasRefunderAddr := deployGasRefunder(ctx, t, builder.L1Info, builder.L1.Client) - ownerAddress := l2Info.GetAddress("Owner") + ownerAddress := builder.L2Info.GetAddress("Owner") var startL2BlockNumber uint64 = 0 startState, _, err := l2Backend.APIBackend().StateAndHeaderByNumber(ctx, rpc.LatestBlockNumber) @@ -196,10 +197,10 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { } var faucetTxs []*types.Transaction for _, acct := range accounts { - l1Info.GenerateAccount(acct) - faucetTxs = append(faucetTxs, l1Info.PrepareTx("Faucet", acct, 30000, big.NewInt(1e16), nil)) + builder.L1Info.GenerateAccount(acct) + faucetTxs = append(faucetTxs, builder.L1Info.PrepareTx("Faucet", acct, 30000, big.NewInt(1e16), nil)) } - SendWaitTestTransactions(t, ctx, l1Client, faucetTxs) + SendWaitTestTransactions(t, ctx, builder.L1.Client, faucetTxs) seqABI, err := bridgegen.SequencerInboxMetaData.GetAbi() if err != nil { @@ -216,7 +217,7 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { // The miner usually collects transactions from deleted blocks and puts them in the mempool. // However, this code doesn't run on reorgs larger than 64 blocks for performance reasons. // Therefore, we make a bunch of small blocks to prevent the code from running. - padAddr := l1Info.GetAddress("ReorgPadding") + padAddr := builder.L1Info.GetAddress("ReorgPadding") for j := uint64(0); j < 70; j++ { rawTx := &types.DynamicFeeTx{ To: &padAddr, @@ -225,12 +226,12 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { Value: new(big.Int), Nonce: j, } - tx := l1Info.SignTxAs("ReorgPadding", rawTx) - Require(t, l1Client.SendTransaction(ctx, tx)) - _, _ = EnsureTxSucceeded(ctx, l1Client, tx) + tx := builder.L1Info.SignTxAs("ReorgPadding", rawTx) + Require(t, builder.L1.Client.SendTransaction(ctx, tx)) + _, _ = builder.L1.EnsureTxSucceeded(tx) } reorgTargetNumber := blockStates[reorgTo].l1BlockNumber - currentHeader, err := l1Client.HeaderByNumber(ctx, nil) + currentHeader, err := builder.L1.Client.HeaderByNumber(ctx, nil) Require(t, err) if currentHeader.Number.Int64()-int64(reorgTargetNumber) < 65 { Fatal(t, "Less than 65 blocks of difference between current block", currentHeader.Number, "and target", reorgTargetNumber) @@ -245,10 +246,10 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { // Sometimes, this causes it to drop the next tx. // To work around this, we create a sacrificial tx, which may or may not succeed. // Whichever happens, by the end of this block, the miner will have processed the reorg. - tx := l1Info.PrepareTx(fmt.Sprintf("ReorgSacrifice%v", i/10), "Faucet", 30000, big.NewInt(0), nil) - err = l1Client.SendTransaction(ctx, tx) + tx := builder.L1Info.PrepareTx(fmt.Sprintf("ReorgSacrifice%v", i/10), "Faucet", 30000, big.NewInt(0), nil) + err = builder.L1.Client.SendTransaction(ctx, tx) Require(t, err) - _, _ = WaitForTx(ctx, l1Client, tx.Hash(), time.Second) + _, _ = WaitForTx(ctx, builder.L1.Client, tx.Hash(), time.Second) } else { state := blockStates[len(blockStates)-1] newBalances := make(map[common.Address]*big.Int) @@ -276,10 +277,10 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { var dest common.Address if j == 0 && amount.Cmp(reserveAmount) >= 0 { name := accountName(len(state.accounts)) - if !l2Info.HasAccount(name) { - l2Info.GenerateAccount(name) + if !builder.L2Info.HasAccount(name) { + builder.L2Info.GenerateAccount(name) } - dest = l2Info.GetAddress(name) + dest = builder.L2Info.GetAddress(name) state.accounts = append(state.accounts, dest) state.balances[dest] = big.NewInt(0) } else { @@ -294,7 +295,7 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { Nonce: state.nonces[source], } state.nonces[source]++ - tx := l2Info.SignTxAs(accountName(sourceNum), rawTx) + tx := builder.L2Info.SignTxAs(accountName(sourceNum), rawTx) txData, err := tx.MarshalBinary() Require(t, err) var segment []byte @@ -314,7 +315,7 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { seqNonce := len(blockStates) - 1 for j := 0; ; j++ { - haveNonce, err := l1Client.PendingNonceAt(ctx, seqOpts.From) + haveNonce, err := builder.L1.Client.PendingNonceAt(ctx, seqOpts.From) Require(t, err) if haveNonce == uint64(seqNonce) { break @@ -326,7 +327,7 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { } seqOpts.Nonce = big.NewInt(int64(seqNonce)) var tx *types.Transaction - before, err := l1Client.BalanceAt(ctx, seqOpts.From, nil) + before, err := builder.L1.Client.BalanceAt(ctx, seqOpts.From, nil) if err != nil { t.Fatalf("BalanceAt(%v) unexpected error: %v", seqOpts.From, err) } @@ -335,7 +336,7 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { if err != nil { t.Fatalf("Error encoding batch data: %v", err) } - si := l1Info.GetAddress("SequencerInbox") + si := builder.L1Info.GetAddress("SequencerInbox") wantAL, _, _, err := gethClient.CreateAccessList(ctx, ethereum.CallMsg{ From: seqOpts.From, To: &si, @@ -345,8 +346,8 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { t.Fatalf("Error creating access list: %v", err) } accessed := arbnode.AccessList(&arbnode.AccessListOpts{ - SequencerInboxAddr: l1Info.GetAddress("SequencerInbox"), - BridgeAddr: l1Info.GetAddress("Bridge"), + SequencerInboxAddr: builder.L1Info.GetAddress("SequencerInbox"), + BridgeAddr: builder.L1Info.GetAddress("Bridge"), DataPosterAddr: seqOpts.From, GasRefunderAddr: gasRefunderAddr, SequencerInboxAccs: len(blockStates), @@ -361,18 +362,18 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { tx, err = seqInbox.AddSequencerL2BatchFromOrigin(&seqOpts, big.NewInt(int64(len(blockStates))), batchData, big.NewInt(1), gasRefunderAddr) } Require(t, err) - txRes, err := EnsureTxSucceeded(ctx, l1Client, tx) + txRes, err := builder.L1.EnsureTxSucceeded(tx) if err != nil { // Geth's clique miner is finicky. // Unfortunately this is so rare that I haven't had an opportunity to test this workaround. // Specifically, I suspect there's a race where it thinks there's no txs to put in the new block, // if a new tx arrives at the same time as it tries to create a block. // Resubmit the transaction in an attempt to get the miner going again. - _ = l1Client.SendTransaction(ctx, tx) - txRes, err = EnsureTxSucceeded(ctx, l1Client, tx) + _ = builder.L1.Client.SendTransaction(ctx, tx) + txRes, err = builder.L1.EnsureTxSucceeded(tx) Require(t, err) } - after, err := l1Client.BalanceAt(ctx, seqOpts.From, nil) + after, err := builder.L1.Client.BalanceAt(ctx, seqOpts.From, nil) if err != nil { t.Fatalf("BalanceAt(%v) unexpected error: %v", seqOpts.From, err) } @@ -414,9 +415,9 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { if validator && i%15 == 0 { for i := 0; ; i++ { - expectedPos, err := execNode.ExecEngine.BlockNumberToMessageIndex(expectedBlockNumber) + expectedPos, err := builder.L2.ExecNode.ExecEngine.BlockNumberToMessageIndex(expectedBlockNumber) Require(t, err) - lastValidated := arbNode.BlockValidator.Validated(t) + lastValidated := builder.L2.ConsensusNode.BlockValidator.Validated(t) if lastValidated == expectedPos+1 { break } else if i >= 1000 { diff --git a/system_tests/staker_test.go b/system_tests/staker_test.go index 6267abe0c5..0239491422 100644 --- a/system_tests/staker_test.go +++ b/system_tests/staker_test.go @@ -28,7 +28,6 @@ import ( "github.com/offchainlabs/nitro/arbnode/dataposter/storage" "github.com/offchainlabs/nitro/arbos/l2pricing" "github.com/offchainlabs/nitro/arbutil" - "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/solgen/go/mocksgen" "github.com/offchainlabs/nitro/solgen/go/rollupgen" "github.com/offchainlabs/nitro/staker" @@ -60,29 +59,32 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) ctx, cancelCtx := context.WithCancel(context.Background()) defer cancelCtx() var transferGas = util.NormalizeL2GasForL1GasInitial(800_000, params.GWei) // include room for aggregator L1 costs - l2chainConfig := params.ArbitrumDevTestChainConfig() - l2info := NewBlockChainTestInfo( + + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.L2Info = NewBlockChainTestInfo( t, - types.NewArbitrumSigner(types.NewLondonSigner(l2chainConfig.ChainID)), big.NewInt(l2pricing.InitialBaseFeeWei*2), + types.NewArbitrumSigner(types.NewLondonSigner(builder.chainConfig.ChainID)), big.NewInt(l2pricing.InitialBaseFeeWei*2), transferGas, ) - _, l2nodeA, l2clientA, _, l1info, _, l1client, l1stack := createTestNodeOnL1WithConfigImpl(t, ctx, true, nil, nil, l2chainConfig, nil, l2info) - defer requireClose(t, l1stack) - defer l2nodeA.StopAndWait() - execNodeA := getExecNode(t, l2nodeA) + cleanupA := builder.Build(t) + defer cleanupA() + + l2nodeA := builder.L2.ConsensusNode + execNodeA := builder.L2.ExecNode if faultyStaker { - l2info.GenerateGenesisAccount("FaultyAddr", common.Big1) + builder.L2Info.GenerateGenesisAccount("FaultyAddr", common.Big1) } config := arbnode.ConfigDefaultL1Test() - execConfig := gethexec.ConfigDefaultTest() - execConfig.Sequencer.Enable = false config.Sequencer = false config.DelayedSequencer.Enable = false config.BatchPoster.Enable = false - _, l2nodeB := Create2ndNodeWithConfig(t, ctx, l2nodeA, l1stack, l1info, &l2info.ArbInitData, config, execConfig, nil) - defer l2nodeB.StopAndWait() - execNodeB := getExecNode(t, l2nodeB) + builder.execConfig.Sequencer.Enable = false + testClientB, cleanupB := builder.Build2ndNode(t, &SecondNodeParams{nodeConfig: config}) + defer cleanupB() + + l2nodeB := testClientB.ConsensusNode + execNodeB := testClientB.ExecNode nodeAGenesis := execNodeA.Backend.APIBackend().CurrentHeader().Hash() nodeBGenesis := execNodeB.Backend.APIBackend().CurrentHeader().Hash() @@ -96,19 +98,19 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) } } - BridgeBalance(t, "Faucet", big.NewInt(1).Mul(big.NewInt(params.Ether), big.NewInt(10000)), l1info, l2info, l1client, l2clientA, ctx) + builder.BridgeBalance(t, "Faucet", big.NewInt(1).Mul(big.NewInt(params.Ether), big.NewInt(10000))) - deployAuth := l1info.GetDefaultTransactOpts("RollupOwner", ctx) + deployAuth := builder.L1Info.GetDefaultTransactOpts("RollupOwner", ctx) balance := big.NewInt(params.Ether) balance.Mul(balance, big.NewInt(100)) - l1info.GenerateAccount("ValidatorA") - TransferBalance(t, "Faucet", "ValidatorA", balance, l1info, l1client, ctx) - l1authA := l1info.GetDefaultTransactOpts("ValidatorA", ctx) + builder.L1Info.GenerateAccount("ValidatorA") + builder.L1.TransferBalance(t, "Faucet", "ValidatorA", balance, builder.L1Info) + l1authA := builder.L1Info.GetDefaultTransactOpts("ValidatorA", ctx) - l1info.GenerateAccount("ValidatorB") - TransferBalance(t, "Faucet", "ValidatorB", balance, l1info, l1client, ctx) - l1authB := l1info.GetDefaultTransactOpts("ValidatorB", ctx) + builder.L1Info.GenerateAccount("ValidatorB") + builder.L1.TransferBalance(t, "Faucet", "ValidatorB", balance, builder.L1Info) + l1authB := builder.L1Info.GetDefaultTransactOpts("ValidatorB", ctx) valWalletAddrAPtr, err := validatorwallet.GetValidatorWalletContract(ctx, l2nodeA.DeployInfo.ValidatorWalletCreator, 0, &l1authA, l2nodeA.L1Reader, true) Require(t, err) @@ -119,19 +121,19 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) Require(t, err, "didn't cache validator wallet address", valWalletAddrA.String(), "vs", valWalletAddrCheck.String()) } - rollup, err := rollupgen.NewRollupAdminLogic(l2nodeA.DeployInfo.Rollup, l1client) + rollup, err := rollupgen.NewRollupAdminLogic(l2nodeA.DeployInfo.Rollup, builder.L1.Client) Require(t, err) tx, err := rollup.SetValidator(&deployAuth, []common.Address{valWalletAddrA, l1authB.From}, []bool{true, true}) Require(t, err) - _, err = EnsureTxSucceeded(ctx, l1client, tx) + _, err = builder.L1.EnsureTxSucceeded(tx) Require(t, err) tx, err = rollup.SetMinimumAssertionPeriod(&deployAuth, big.NewInt(1)) Require(t, err) - _, err = EnsureTxSucceeded(ctx, l1client, tx) + _, err = builder.L1.EnsureTxSucceeded(tx) Require(t, err) - validatorUtils, err := rollupgen.NewValidatorUtils(l2nodeA.DeployInfo.ValidatorUtils, l1client) + validatorUtils, err := rollupgen.NewValidatorUtils(l2nodeA.DeployInfo.ValidatorUtils, builder.L1.Client) Require(t, err) valConfig := staker.TestL1ValidatorConfig @@ -223,7 +225,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) err = valWalletB.Initialize(ctx) Require(t, err) } - valWalletC := validatorwallet.NewNoOp(l1client, l2nodeA.DeployInfo.Rollup) + valWalletC := validatorwallet.NewNoOp(builder.L1.Client, l2nodeA.DeployInfo.Rollup) valConfig.Strategy = "Watchtower" stakerC, err := staker.NewStaker( l2nodeA.L1Reader, @@ -245,11 +247,11 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) err = stakerC.Initialize(ctx) Require(t, err) - l2info.GenerateAccount("BackgroundUser") - tx = l2info.PrepareTx("Faucet", "BackgroundUser", l2info.TransferGas, balance, nil) - err = l2clientA.SendTransaction(ctx, tx) + builder.L2Info.GenerateAccount("BackgroundUser") + tx = builder.L2Info.PrepareTx("Faucet", "BackgroundUser", builder.L2Info.TransferGas, balance, nil) + err = builder.L2.Client.SendTransaction(ctx, tx) Require(t, err) - _, err = EnsureTxSucceeded(ctx, l2clientA, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) // Continually make L2 transactions in a background thread @@ -261,7 +263,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) })() go (func() { defer close(backgroundTxsShutdownChan) - err := makeBackgroundTxs(backgroundTxsCtx, l2info, l2clientA) + err := makeBackgroundTxs(backgroundTxsCtx, builder.L2Info, builder.L2.Client) if !errors.Is(err, context.Canceled) { log.Warn("error making background txs", "err", err) } @@ -304,26 +306,26 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) if !challengeMangerTimedOut { // Upgrade the ChallengeManager contract to an implementation which says challenges are always timed out - mockImpl, tx, _, err := mocksgen.DeployTimedOutChallengeManager(&deployAuth, l1client) + mockImpl, tx, _, err := mocksgen.DeployTimedOutChallengeManager(&deployAuth, builder.L1.Client) Require(t, err) - _, err = EnsureTxSucceeded(ctx, l1client, tx) + _, err = builder.L1.EnsureTxSucceeded(tx) Require(t, err) managerAddr := valWalletA.ChallengeManagerAddress() // 0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103 proxyAdminSlot := common.BigToHash(arbmath.BigSub(crypto.Keccak256Hash([]byte("eip1967.proxy.admin")).Big(), common.Big1)) - proxyAdminBytes, err := l1client.StorageAt(ctx, managerAddr, proxyAdminSlot, nil) + proxyAdminBytes, err := builder.L1.Client.StorageAt(ctx, managerAddr, proxyAdminSlot, nil) Require(t, err) proxyAdminAddr := common.BytesToAddress(proxyAdminBytes) if proxyAdminAddr == (common.Address{}) { Fatal(t, "failed to get challenge manager proxy admin") } - proxyAdmin, err := mocksgen.NewProxyAdminForBinding(proxyAdminAddr, l1client) + proxyAdmin, err := mocksgen.NewProxyAdminForBinding(proxyAdminAddr, builder.L1.Client) Require(t, err) tx, err = proxyAdmin.Upgrade(&deployAuth, managerAddr, mockImpl) Require(t, err) - _, err = EnsureTxSucceeded(ctx, l1client, tx) + _, err = builder.L1.EnsureTxSucceeded(tx) Require(t, err) challengeMangerTimedOut = true @@ -343,7 +345,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) } Require(t, err, "Staker", stakerName, "failed to act") if tx != nil { - _, err = EnsureTxSucceeded(ctx, l1client, tx) + _, err = builder.L1.EnsureTxSucceeded(tx) Require(t, err, "EnsureTxSucceeded failed for staker", stakerName, "tx") } if faultyStaker { @@ -379,7 +381,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) Require(t, err) } for j := 0; j < 5; j++ { - TransferBalance(t, "Faucet", "Faucet", common.Big0, l1info, l1client, ctx) + builder.L1.TransferBalance(t, "Faucet", "Faucet", common.Big0, builder.L1Info) } } From a430014a20e58db025d8612b0e324fcab1fe936e Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Tue, 24 Oct 2023 18:00:46 -0500 Subject: [PATCH 42/64] code refactor --- wsbroadcastserver/wsbroadcastserver.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/wsbroadcastserver/wsbroadcastserver.go b/wsbroadcastserver/wsbroadcastserver.go index 43ac27593a..9a7037a713 100644 --- a/wsbroadcastserver/wsbroadcastserver.go +++ b/wsbroadcastserver/wsbroadcastserver.go @@ -33,8 +33,8 @@ var ( HTTPHeaderFeedClientVersion = textproto.CanonicalMIMEHeaderKey("Arbitrum-Feed-Client-Version") HTTPHeaderRequestedSequenceNumber = textproto.CanonicalMIMEHeaderKey("Arbitrum-Requested-Sequence-Number") HTTPHeaderChainId = textproto.CanonicalMIMEHeaderKey("Arbitrum-Chain-Id") - upgradeToWSTimer = metrics.NewRegisteredTimer("arb/wsbroadcastserver/wsupgrade/duration", nil) - startWithHeaderTimer = metrics.NewRegisteredTimer("arb/wsbroadcastserver/startwithheader/duration", nil) + upgradeToWSTimer = metrics.NewRegisteredTimer("arb/feed/clients/wsupgrade/duration", nil) + startWithHeaderTimer = metrics.NewRegisteredTimer("arb/feed/clients/start/duration", nil) ) const ( From 25e42335f2ca90e61ed263ea9b1a2e31d1a8c759 Mon Sep 17 00:00:00 2001 From: Goran Vladika Date: Wed, 25 Oct 2023 12:42:29 +0200 Subject: [PATCH 43/64] Perform admin actions through upg executor in system tests --- arbnode/node.go | 1 - system_tests/common_test.go | 24 +++++++++++++++++++----- 2 files changed, 19 insertions(+), 6 deletions(-) diff --git a/arbnode/node.go b/arbnode/node.go index 4b3381e780..0782e8ecb7 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -72,7 +72,6 @@ func deployBridgeCreator(ctx context.Context, l1Reader *headerreader.HeaderReade return common.Address{}, fmt.Errorf("bridge deploy error: %w", err) } - // maxDataSize := big.NewInt(117964) seqInboxTemplate, tx, _, err := bridgegen.DeploySequencerInbox(auth, client, maxDataSize) err = andTxSucceeded(ctx, l1Reader, tx, err) if err != nil { diff --git a/system_tests/common_test.go b/system_tests/common_test.go index 31401945f0..b985d02fd7 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -12,6 +12,7 @@ import ( "net" "os" "strconv" + "strings" "testing" "time" @@ -30,6 +31,7 @@ import ( "github.com/offchainlabs/nitro/validator/server_common" "github.com/offchainlabs/nitro/validator/valnode" + "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/keystore" "github.com/ethereum/go-ethereum/common" @@ -52,6 +54,7 @@ import ( "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/solgen/go/mocksgen" "github.com/offchainlabs/nitro/solgen/go/precompilesgen" + "github.com/offchainlabs/nitro/solgen/go/upgrade_executorgen" "github.com/offchainlabs/nitro/statetransfer" "github.com/offchainlabs/nitro/util/testhelpers" ) @@ -628,6 +631,8 @@ func DeployOnTestL1( l1Reader.Start(ctx) defer l1Reader.StopAndWait() + nativeToken := common.Address{} + maxDataSize := big.NewInt(117964) addresses, err := arbnode.DeployOnL1( ctx, l1Reader, @@ -635,13 +640,14 @@ func DeployOnTestL1( l1info.GetAddress("Sequencer"), 0, arbnode.GenerateRollupConfig(false, locator.LatestWasmModuleRoot(), l1info.GetAddress("RollupOwner"), chainConfig, serializedChainConfig, common.Address{}), - common.Address{}, - big.NewInt(117964), + nativeToken, + maxDataSize, ) Require(t, err) l1info.SetContract("Bridge", addresses.Bridge) l1info.SetContract("SequencerInbox", addresses.SequencerInbox) l1info.SetContract("Inbox", addresses.Inbox) + l1info.SetContract("UpgradeExecutor", addresses.UpgradeExecutor) initMessage := getInitMessage(ctx, t, l1client, addresses) return addresses, initMessage } @@ -990,11 +996,19 @@ func authorizeDASKeyset( err := keyset.Serialize(wr) Require(t, err, "unable to serialize DAS keyset") keysetBytes := wr.Bytes() - sequencerInbox, err := bridgegen.NewSequencerInbox(l1info.Accounts["SequencerInbox"].Address, l1client) - Require(t, err, "unable to create sequencer inbox") + + sequencerInboxABI, err := abi.JSON(strings.NewReader(bridgegen.SequencerInboxABI)) + Require(t, err, "unable to parse sequencer inbox ABI") + setKeysetCalldata, err := sequencerInboxABI.Pack("setValidKeyset", keysetBytes) + Require(t, err, "unable to generate calldata") + + upgradeExecutor, err := upgrade_executorgen.NewUpgradeExecutor(l1info.Accounts["UpgradeExecutor"].Address, l1client) + Require(t, err, "unable to bind upgrade executor") + trOps := l1info.GetDefaultTransactOpts("RollupOwner", ctx) - tx, err := sequencerInbox.SetValidKeyset(&trOps, keysetBytes) + tx, err := upgradeExecutor.ExecuteCall(&trOps, l1info.Accounts["SequencerInbox"].Address, setKeysetCalldata) Require(t, err, "unable to set valid keyset") + _, err = EnsureTxSucceeded(ctx, l1client, tx) Require(t, err, "unable to ensure transaction success for setting valid keyset") } From 967d924e2f5e28fcc8647f84c533c46375754963 Mon Sep 17 00:00:00 2001 From: Goran Vladika Date: Wed, 25 Oct 2023 12:47:08 +0200 Subject: [PATCH 44/64] Perform admin action through upg executor in staker test --- system_tests/staker_test.go | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/system_tests/staker_test.go b/system_tests/staker_test.go index 6267abe0c5..7870c7b84c 100644 --- a/system_tests/staker_test.go +++ b/system_tests/staker_test.go @@ -16,6 +16,7 @@ import ( "testing" "time" + "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" @@ -31,6 +32,7 @@ import ( "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/solgen/go/mocksgen" "github.com/offchainlabs/nitro/solgen/go/rollupgen" + "github.com/offchainlabs/nitro/solgen/go/upgrade_executorgen" "github.com/offchainlabs/nitro/staker" "github.com/offchainlabs/nitro/staker/validatorwallet" "github.com/offchainlabs/nitro/util" @@ -120,16 +122,20 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) } rollup, err := rollupgen.NewRollupAdminLogic(l2nodeA.DeployInfo.Rollup, l1client) - Require(t, err) - tx, err := rollup.SetValidator(&deployAuth, []common.Address{valWalletAddrA, l1authB.From}, []bool{true, true}) - Require(t, err) - _, err = EnsureTxSucceeded(ctx, l1client, tx) - Require(t, err) - - tx, err = rollup.SetMinimumAssertionPeriod(&deployAuth, big.NewInt(1)) - Require(t, err) - _, err = EnsureTxSucceeded(ctx, l1client, tx) - Require(t, err) + upgradeExecutor, err := upgrade_executorgen.NewUpgradeExecutor(l2nodeA.DeployInfo.UpgradeExecutor, l1client) + Require(t, err, "unable to bind upgrade executor") + rollupABI, err := abi.JSON(strings.NewReader(rollupgen.RollupAdminLogicABI)) + Require(t, err, "unable to parse rollup ABI") + + setValidatorCalldata, err := rollupABI.Pack("setValidator", []common.Address{valWalletAddrA, l1authB.From}, []bool{true, true}) + Require(t, err, "unable to generate setValidator calldata") + tx, err := upgradeExecutor.ExecuteCall(&deployAuth, l2nodeA.DeployInfo.Rollup, setValidatorCalldata) + Require(t, err, "unable to set validators") + + setMinAssertPeriodCalldata, err := rollupABI.Pack("setMinimumAssertionPeriod", big.NewInt(1)) + Require(t, err, "unable to generate setMinimumAssertionPeriod calldata") + tx, err = upgradeExecutor.ExecuteCall(&deployAuth, l2nodeA.DeployInfo.Rollup, setMinAssertPeriodCalldata) + Require(t, err, "unable to set minimum assertion period") validatorUtils, err := rollupgen.NewValidatorUtils(l2nodeA.DeployInfo.ValidatorUtils, l1client) Require(t, err) From d603cba1d0f1599132b4f311790aa4d1969eebd3 Mon Sep 17 00:00:00 2001 From: Goran Vladika Date: Wed, 25 Oct 2023 14:12:06 +0200 Subject: [PATCH 45/64] Add missing checks --- system_tests/staker_test.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/system_tests/staker_test.go b/system_tests/staker_test.go index 7870c7b84c..968d7c8e02 100644 --- a/system_tests/staker_test.go +++ b/system_tests/staker_test.go @@ -122,6 +122,8 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) } rollup, err := rollupgen.NewRollupAdminLogic(l2nodeA.DeployInfo.Rollup, l1client) + Require(t, err) + upgradeExecutor, err := upgrade_executorgen.NewUpgradeExecutor(l2nodeA.DeployInfo.UpgradeExecutor, l1client) Require(t, err, "unable to bind upgrade executor") rollupABI, err := abi.JSON(strings.NewReader(rollupgen.RollupAdminLogicABI)) @@ -131,11 +133,15 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) Require(t, err, "unable to generate setValidator calldata") tx, err := upgradeExecutor.ExecuteCall(&deployAuth, l2nodeA.DeployInfo.Rollup, setValidatorCalldata) Require(t, err, "unable to set validators") + _, err = EnsureTxSucceeded(ctx, l1client, tx) + Require(t, err) setMinAssertPeriodCalldata, err := rollupABI.Pack("setMinimumAssertionPeriod", big.NewInt(1)) Require(t, err, "unable to generate setMinimumAssertionPeriod calldata") tx, err = upgradeExecutor.ExecuteCall(&deployAuth, l2nodeA.DeployInfo.Rollup, setMinAssertPeriodCalldata) Require(t, err, "unable to set minimum assertion period") + _, err = EnsureTxSucceeded(ctx, l1client, tx) + Require(t, err) validatorUtils, err := rollupgen.NewValidatorUtils(l2nodeA.DeployInfo.ValidatorUtils, l1client) Require(t, err) From 1c8f496d4f5046ff254296131f4bff6acaca1585 Mon Sep 17 00:00:00 2001 From: Goran Vladika Date: Wed, 25 Oct 2023 14:38:31 +0200 Subject: [PATCH 46/64] Upgrade through upg executor --- system_tests/staker_test.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/system_tests/staker_test.go b/system_tests/staker_test.go index 968d7c8e02..01243608de 100644 --- a/system_tests/staker_test.go +++ b/system_tests/staker_test.go @@ -331,9 +331,11 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) Fatal(t, "failed to get challenge manager proxy admin") } - proxyAdmin, err := mocksgen.NewProxyAdminForBinding(proxyAdminAddr, l1client) + proxyAdminABI, err := abi.JSON(strings.NewReader(mocksgen.ProxyAdminForBindingABI)) Require(t, err) - tx, err = proxyAdmin.Upgrade(&deployAuth, managerAddr, mockImpl) + upgradeCalldata, err := proxyAdminABI.Pack("upgrade", managerAddr, mockImpl) + Require(t, err) + tx, err = upgradeExecutor.ExecuteCall(&deployAuth, proxyAdminAddr, upgradeCalldata) Require(t, err) _, err = EnsureTxSucceeded(ctx, l1client, tx) Require(t, err) From 0e9445dbf9ea1efe29408fef7f1d5be5b85362e6 Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Wed, 25 Oct 2023 09:35:58 -0500 Subject: [PATCH 47/64] Update wsbroadcastserver/wsbroadcastserver.go Co-authored-by: Joshua Colvin --- wsbroadcastserver/wsbroadcastserver.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wsbroadcastserver/wsbroadcastserver.go b/wsbroadcastserver/wsbroadcastserver.go index 9a7037a713..fed5489636 100644 --- a/wsbroadcastserver/wsbroadcastserver.go +++ b/wsbroadcastserver/wsbroadcastserver.go @@ -33,7 +33,7 @@ var ( HTTPHeaderFeedClientVersion = textproto.CanonicalMIMEHeaderKey("Arbitrum-Feed-Client-Version") HTTPHeaderRequestedSequenceNumber = textproto.CanonicalMIMEHeaderKey("Arbitrum-Requested-Sequence-Number") HTTPHeaderChainId = textproto.CanonicalMIMEHeaderKey("Arbitrum-Chain-Id") - upgradeToWSTimer = metrics.NewRegisteredTimer("arb/feed/clients/wsupgrade/duration", nil) + upgradeToWSTimer = metrics.NewRegisteredTimer("arb/feed/clients/upgrade/duration", nil) startWithHeaderTimer = metrics.NewRegisteredTimer("arb/feed/clients/start/duration", nil) ) From 70df6d6a225cf0b88d5178971c5c55cb56a4ef84 Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Wed, 25 Oct 2023 09:57:01 -0500 Subject: [PATCH 48/64] code refactor --- wsbroadcastserver/wsbroadcastserver.go | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/wsbroadcastserver/wsbroadcastserver.go b/wsbroadcastserver/wsbroadcastserver.go index fed5489636..c26a910144 100644 --- a/wsbroadcastserver/wsbroadcastserver.go +++ b/wsbroadcastserver/wsbroadcastserver.go @@ -204,11 +204,18 @@ func (s *WSBroadcastServer) Start(ctx context.Context) error { HTTPHeaderChainId: []string{strconv.FormatUint(s.chainId, 10)}, }) - return s.StartWithHeader(ctx, header) + startTime := time.Now() + err := s.StartWithHeader(ctx, header) + elapsed := time.Since(startTime) + startWithHeaderTimer.Update(elapsed) + + if err != nil { + return err + } + return nil } func (s *WSBroadcastServer) StartWithHeader(ctx context.Context, header ws.HandshakeHeader) error { - startTimeMain := time.Now() s.startMutex.Lock() defer s.startMutex.Unlock() if s.started { @@ -490,9 +497,6 @@ func (s *WSBroadcastServer) StartWithHeader(ctx context.Context, header ws.Hands s.started = true - elapsedMain := time.Since(startTimeMain) - startWithHeaderTimer.Update(elapsedMain) - return nil } From 7315e8ac0fe1d9a161a12f88a15a7b2c1a2c35d1 Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Wed, 25 Oct 2023 11:18:13 -0500 Subject: [PATCH 49/64] add more functions to TestClient, refactor all tests --- system_tests/aliasing_test.go | 26 +- system_tests/batch_poster_test.go | 54 ++--- system_tests/block_hash_test.go | 10 +- system_tests/block_validator_test.go | 73 +++--- system_tests/bloom_test.go | 6 +- system_tests/common_test.go | 67 +----- system_tests/contract_tx_test.go | 18 +- system_tests/delayedinbox_test.go | 15 +- system_tests/delayedinboxlong_test.go | 26 +- system_tests/estimation_test.go | 79 ++++--- system_tests/fees_test.go | 2 +- system_tests/infra_fee_test.go | 29 +-- system_tests/initialization_test.go | 8 +- system_tests/log_subscription_test.go | 15 +- system_tests/nodeinterface_test.go | 14 +- system_tests/outbox_test.go | 17 +- system_tests/precompile_test.go | 32 +-- system_tests/recreatestate_rpc_test.go | 19 +- system_tests/reorg_resequencing_test.go | 45 ++-- system_tests/retryable_test.go | 302 ++++++++++++------------ system_tests/seq_coordinator_test.go | 44 ++-- system_tests/seq_nonce_test.go | 54 +++-- system_tests/seq_pause_test.go | 22 +- system_tests/seq_reject_test.go | 57 +++-- system_tests/seq_whitelist_test.go | 23 +- system_tests/seqcompensation_test.go | 28 +-- system_tests/seqinbox_test.go | 29 ++- system_tests/staker_test.go | 13 +- system_tests/transfer_test.go | 17 +- system_tests/triedb_race_test.go | 38 ++- system_tests/twonodes_test.go | 33 +-- system_tests/twonodeslong_test.go | 82 ++++--- 32 files changed, 634 insertions(+), 663 deletions(-) diff --git a/system_tests/aliasing_test.go b/system_tests/aliasing_test.go index 5e4e65a2ca..60a89468a5 100644 --- a/system_tests/aliasing_test.go +++ b/system_tests/aliasing_test.go @@ -22,20 +22,20 @@ func TestAliasing(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - l2info, node, l2client, l1info, _, l1client, l1stack := createTestNodeOnL1(t, ctx, true) - defer requireClose(t, l1stack) - defer node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + cleanup := builder.Build(t) + defer cleanup() - auth := l2info.GetDefaultTransactOpts("Owner", ctx) - user := l1info.GetDefaultTransactOpts("User", ctx) - TransferBalanceTo(t, "Owner", util.RemapL1Address(user.From), big.NewInt(1e18), l2info, l2client, ctx) + auth := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) + user := builder.L1Info.GetDefaultTransactOpts("User", ctx) + builder.L2.TransferBalanceTo(t, "Owner", util.RemapL1Address(user.From), big.NewInt(1e18), builder.L2Info) - simpleAddr, simple := deploySimple(t, ctx, auth, l2client) + simpleAddr, simple := builder.L2.DeploySimple(t, auth) simpleContract, err := abi.JSON(strings.NewReader(mocksgen.SimpleABI)) Require(t, err) // Test direct calls - arbsys, err := precompilesgen.NewArbSys(types.ArbSysAddress, l2client) + arbsys, err := precompilesgen.NewArbSys(types.ArbSysAddress, builder.L2.Client) Require(t, err) top, err := arbsys.IsTopLevelCall(nil) Require(t, err) @@ -56,14 +56,14 @@ func TestAliasing(t *testing.T) { // check via L2 tx, err := simple.CheckCalls(&auth, top, direct, static, delegate, callcode, call) Require(t, err) - _, err = EnsureTxSucceeded(ctx, l2client, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) // check signed txes via L1 data, err := simpleContract.Pack("checkCalls", top, direct, static, delegate, callcode, call) Require(t, err) - tx = l2info.PrepareTxTo("Owner", &simpleAddr, 500000, big.NewInt(0), data) - SendSignedTxViaL1(t, ctx, l1info, l1client, l2client, tx) + tx = builder.L2Info.PrepareTxTo("Owner", &simpleAddr, 500000, big.NewInt(0), data) + builder.L1.SendSignedTx(t, builder.L2.Client, tx, builder.L1Info) } testUnsigned := func(top, direct, static, delegate, callcode, call bool) { @@ -72,8 +72,8 @@ func TestAliasing(t *testing.T) { // check unsigned txes via L1 data, err := simpleContract.Pack("checkCalls", top, direct, static, delegate, callcode, call) Require(t, err) - tx := l2info.PrepareTxTo("Owner", &simpleAddr, 500000, big.NewInt(0), data) - SendUnsignedTxViaL1(t, ctx, l1info, l1client, l2client, tx) + tx := builder.L2Info.PrepareTxTo("Owner", &simpleAddr, 500000, big.NewInt(0), data) + builder.L1.SendUnsignedTx(t, builder.L2.Client, tx, builder.L1Info) } testL2Signed(true, true, false, false, false, false) diff --git a/system_tests/batch_poster_test.go b/system_tests/batch_poster_test.go index 8c0de8c6db..8561e3ffc7 100644 --- a/system_tests/batch_poster_test.go +++ b/system_tests/batch_poster_test.go @@ -50,10 +50,9 @@ func testBatchPosterParallel(t *testing.T, useRedis bool) { builder.nodeConfig.BatchPoster.RedisUrl = redisUrl cleanup := builder.Build(t) defer cleanup() - l1A, l2A := builder.L1, builder.L2 - l2B, cleanup2nd := builder.Build2ndNode(t, &SecondNodeParams{}) - defer cleanup2nd() + testClientB, cleanupB := builder.Build2ndNode(t, &SecondNodeParams{}) + defer cleanupB() builder.L2Info.GenerateAccount("User2") @@ -63,12 +62,12 @@ func testBatchPosterParallel(t *testing.T, useRedis bool) { tx := builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, common.Big1, nil) txs = append(txs, tx) - err := l2A.Client.SendTransaction(ctx, tx) + err := builder.L2.Client.SendTransaction(ctx, tx) Require(t, err) } for _, tx := range txs { - _, err := EnsureTxSucceeded(ctx, l2A.Client, tx) + _, err := builder.L2.EnsureTxSucceeded(tx) Require(t, err) } @@ -77,7 +76,7 @@ func testBatchPosterParallel(t *testing.T, useRedis bool) { seqTxOpts := builder.L1Info.GetDefaultTransactOpts("Sequencer", ctx) builder.nodeConfig.BatchPoster.Enable = true builder.nodeConfig.BatchPoster.MaxSize = len(firstTxData) * 2 - startL1Block, err := l1A.Client.BlockNumber(ctx) + startL1Block, err := builder.L1.Client.BlockNumber(ctx) Require(t, err) for i := 0; i < parallelBatchPosters; i++ { // Make a copy of the batch poster config so NewBatchPoster calling Validate() on it doesn't race @@ -85,12 +84,12 @@ func testBatchPosterParallel(t *testing.T, useRedis bool) { batchPoster, err := arbnode.NewBatchPoster(ctx, &arbnode.BatchPosterOpts{ DataPosterDB: nil, - L1Reader: l2A.ConsensusNode.L1Reader, - Inbox: l2A.ConsensusNode.InboxTracker, - Streamer: l2A.ConsensusNode.TxStreamer, - SyncMonitor: l2A.ConsensusNode.SyncMonitor, + L1Reader: builder.L2.ConsensusNode.L1Reader, + Inbox: builder.L2.ConsensusNode.InboxTracker, + Streamer: builder.L2.ConsensusNode.TxStreamer, + SyncMonitor: builder.L2.ConsensusNode.SyncMonitor, Config: func() *arbnode.BatchPosterConfig { return &batchPosterConfig }, - DeployInfo: l2A.ConsensusNode.DeployInfo, + DeployInfo: builder.L2.ConsensusNode.DeployInfo, TransactOpts: &seqTxOpts, DAWriter: nil, }, @@ -102,11 +101,11 @@ func testBatchPosterParallel(t *testing.T, useRedis bool) { lastTxHash := txs[len(txs)-1].Hash() for i := 90; i > 0; i-- { - SendWaitTestTransactions(t, ctx, l1A.Client, []*types.Transaction{ + builder.L1.SendWaitTestTransactions(t, []*types.Transaction{ builder.L1Info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil), }) time.Sleep(500 * time.Millisecond) - _, err := l2B.Client.TransactionReceipt(ctx, lastTxHash) + _, err := testClientB.Client.TransactionReceipt(ctx, lastTxHash) if err == nil { break } @@ -121,9 +120,9 @@ func testBatchPosterParallel(t *testing.T, useRedis bool) { // However, setting the clique period to 1 slows everything else (including the L1 deployment for this test) down to a crawl. if false { // Make sure the batch poster is able to post multiple batches in one block - endL1Block, err := l1A.Client.BlockNumber(ctx) + endL1Block, err := builder.L1.Client.BlockNumber(ctx) Require(t, err) - seqInbox, err := arbnode.NewSequencerInbox(l1A.Client, l2A.ConsensusNode.DeployInfo.SequencerInbox, 0) + seqInbox, err := arbnode.NewSequencerInbox(builder.L1.Client, builder.L2.ConsensusNode.DeployInfo.SequencerInbox, 0) Require(t, err) batches, err := seqInbox.LookupBatchesInRange(ctx, new(big.Int).SetUint64(startL1Block), new(big.Int).SetUint64(endL1Block)) Require(t, err) @@ -143,7 +142,7 @@ func testBatchPosterParallel(t *testing.T, useRedis bool) { } } - l2balance, err := l2B.Client.BalanceAt(ctx, builder.L2Info.GetAddress("User2"), nil) + l2balance, err := testClientB.Client.BalanceAt(ctx, builder.L2Info.GetAddress("User2"), nil) Require(t, err) if l2balance.Sign() == 0 { @@ -160,10 +159,9 @@ func TestBatchPosterLargeTx(t *testing.T) { builder.execConfig.Sequencer.MaxTxDataSize = 110000 cleanup := builder.Build(t) defer cleanup() - l2A := builder.L2 - l2B, cleanup2nd := builder.Build2ndNode(t, &SecondNodeParams{}) - defer cleanup2nd() + testClientB, cleanupB := builder.Build2ndNode(t, &SecondNodeParams{}) + defer cleanupB() data := make([]byte, 100000) _, err := rand.Read(data) @@ -171,11 +169,11 @@ func TestBatchPosterLargeTx(t *testing.T) { faucetAddr := builder.L2Info.GetAddress("Faucet") gas := builder.L2Info.TransferGas + 20000*uint64(len(data)) tx := builder.L2Info.PrepareTxTo("Faucet", &faucetAddr, gas, common.Big0, data) - err = l2A.Client.SendTransaction(ctx, tx) + err = builder.L2.Client.SendTransaction(ctx, tx) Require(t, err) - receiptA, err := EnsureTxSucceeded(ctx, l2A.Client, tx) + receiptA, err := builder.L2.EnsureTxSucceeded(tx) Require(t, err) - receiptB, err := EnsureTxSucceededWithTimeout(ctx, l2B.Client, tx, time.Second*30) + receiptB, err := testClientB.EnsureTxSucceededWithTimeout(tx, time.Second*30) Require(t, err) if receiptA.BlockHash != receiptB.BlockHash { Fatal(t, "receipt A block hash", receiptA.BlockHash, "does not equal receipt B block hash", receiptB.BlockHash) @@ -193,7 +191,7 @@ func TestBatchPosterKeepsUp(t *testing.T) { builder.execConfig.RPC.RPCTxFeeCap = 1000. cleanup := builder.Build(t) defer cleanup() - l2A := builder.L2 + builder.L2Info.GasPrice = big.NewInt(100e9) go func() { @@ -203,9 +201,9 @@ func TestBatchPosterKeepsUp(t *testing.T) { for { gas := builder.L2Info.TransferGas + 20000*uint64(len(data)) tx := builder.L2Info.PrepareTx("Faucet", "Faucet", gas, common.Big0, data) - err = l2A.Client.SendTransaction(ctx, tx) + err = builder.L2.Client.SendTransaction(ctx, tx) Require(t, err) - _, err := EnsureTxSucceeded(ctx, l2A.Client, tx) + _, err := builder.L2.EnsureTxSucceeded(tx) Require(t, err) } }() @@ -213,11 +211,11 @@ func TestBatchPosterKeepsUp(t *testing.T) { start := time.Now() for { time.Sleep(time.Second) - batches, err := l2A.ConsensusNode.InboxTracker.GetBatchCount() + batches, err := builder.L2.ConsensusNode.InboxTracker.GetBatchCount() Require(t, err) - postedMessages, err := l2A.ConsensusNode.InboxTracker.GetBatchMessageCount(batches - 1) + postedMessages, err := builder.L2.ConsensusNode.InboxTracker.GetBatchMessageCount(batches - 1) Require(t, err) - haveMessages, err := l2A.ConsensusNode.TxStreamer.GetMessageCount() + haveMessages, err := builder.L2.ConsensusNode.TxStreamer.GetMessageCount() Require(t, err) duration := time.Since(start) fmt.Printf("batches posted: %v over %v (%.2f batches/second)\n", batches, duration, float64(batches)/(float64(duration)/float64(time.Second))) diff --git a/system_tests/block_hash_test.go b/system_tests/block_hash_test.go index 2b8051242e..b437f3dad9 100644 --- a/system_tests/block_hash_test.go +++ b/system_tests/block_hash_test.go @@ -16,13 +16,13 @@ func TestBlockHash(t *testing.T) { defer cancel() // Even though we don't use the L1, we need to create this node on L1 to get accurate L1 block numbers - l2info, l2node, l2client, _, _, _, l1stack := createTestNodeOnL1(t, ctx, true) - defer requireClose(t, l1stack) - defer l2node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + cleanup := builder.Build(t) + defer cleanup() - auth := l2info.GetDefaultTransactOpts("Faucet", ctx) + auth := builder.L2Info.GetDefaultTransactOpts("Faucet", ctx) - _, _, simple, err := mocksgen.DeploySimple(&auth, l2client) + _, _, simple, err := mocksgen.DeploySimple(&auth, builder.L2.Client) Require(t, err) _, err = simple.CheckBlockHashes(&bind.CallOpts{Context: ctx}) diff --git a/system_tests/block_validator_test.go b/system_tests/block_validator_test.go index 1699346b17..d0409f8679 100644 --- a/system_tests/block_validator_test.go +++ b/system_tests/block_validator_test.go @@ -51,20 +51,23 @@ func testBlockValidatorSimple(t *testing.T, dasModeString string, workloadLoops delayEvery = workloadLoops / 3 } - l2info, nodeA, l2client, l1info, _, l1client, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, l1NodeConfigA, nil, chainConfig, nil) - defer requireClose(t, l1stack) - defer nodeA.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.nodeConfig = l1NodeConfigA + builder.chainConfig = chainConfig + builder.L2Info = nil + cleanup := builder.Build(t) + defer cleanup() - authorizeDASKeyset(t, ctx, dasSignerKey, l1info, l1client) + authorizeDASKeyset(t, ctx, dasSignerKey, builder.L1Info, builder.L1.Client) validatorConfig := arbnode.ConfigDefaultL1NonSequencerTest() validatorConfig.BlockValidator.Enable = true validatorConfig.DataAvailability = l1NodeConfigA.DataAvailability validatorConfig.DataAvailability.RPCAggregator.Enable = false AddDefaultValNode(t, ctx, validatorConfig, !arbitrator) - l2clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, validatorConfig, nil, nil) - defer nodeB.StopAndWait() - l2info.GenerateAccount("User2") + testClientB, cleanupB := builder.Build2ndNode(t, &SecondNodeParams{nodeConfig: validatorConfig}) + defer cleanupB() + builder.L2Info.GenerateAccount("User2") perTransfer := big.NewInt(1e12) @@ -73,7 +76,7 @@ func testBlockValidatorSimple(t *testing.T, dasModeString string, workloadLoops var tx *types.Transaction if workload == ethSend { - tx = l2info.PrepareTx("Owner", "User2", l2info.TransferGas, perTransfer, nil) + tx = builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, perTransfer, nil) } else { var contractCode []byte var gas uint64 @@ -87,10 +90,10 @@ func testBlockValidatorSimple(t *testing.T, dasModeString string, workloadLoops contractCode = append(contractCode, byte(vm.CODECOPY)) contractCode = append(contractCode, byte(vm.PUSH0)) contractCode = append(contractCode, byte(vm.RETURN)) - basefee := GetBaseFee(t, l2client, ctx) + basefee := builder.L2.GetBaseFee(t) var err error - gas, err = l2client.EstimateGas(ctx, ethereum.CallMsg{ - From: l2info.GetAddress("Owner"), + gas, err = builder.L2.Client.EstimateGas(ctx, ethereum.CallMsg{ + From: builder.L2Info.GetAddress("Owner"), GasPrice: basefee, Value: big.NewInt(0), Data: contractCode, @@ -102,14 +105,14 @@ func testBlockValidatorSimple(t *testing.T, dasModeString string, workloadLoops contractCode = append(contractCode, 0x60, 0x00, 0x60, 0x00, 0x52) // PUSH1 0 MSTORE } contractCode = append(contractCode, 0x60, 0x00, 0x56) // JUMP - gas = l2info.TransferGas*2 + l2pricing.InitialPerBlockGasLimitV6 + gas = builder.L2Info.TransferGas*2 + l2pricing.InitialPerBlockGasLimitV6 } - tx = l2info.PrepareTxTo("Owner", nil, gas, common.Big0, contractCode) + tx = builder.L2Info.PrepareTxTo("Owner", nil, gas, common.Big0, contractCode) } - err := l2client.SendTransaction(ctx, tx) + err := builder.L2.Client.SendTransaction(ctx, tx) Require(t, err) - _, err = EnsureTxSucceededWithTimeout(ctx, l2client, tx, time.Second*5) + _, err = builder.L2.EnsureTxSucceeded(tx) if workload != depleteGas { Require(t, err) } @@ -118,49 +121,49 @@ func testBlockValidatorSimple(t *testing.T, dasModeString string, workloadLoops } } } else { - auth := l2info.GetDefaultTransactOpts("Owner", ctx) + auth := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) // make auth a chain owner - arbDebug, err := precompilesgen.NewArbDebug(common.HexToAddress("0xff"), l2client) + arbDebug, err := precompilesgen.NewArbDebug(common.HexToAddress("0xff"), builder.L2.Client) Require(t, err) tx, err := arbDebug.BecomeChainOwner(&auth) Require(t, err) - _, err = EnsureTxSucceeded(ctx, l2client, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) - arbOwner, err := precompilesgen.NewArbOwner(common.HexToAddress("0x70"), l2client) + arbOwner, err := precompilesgen.NewArbOwner(common.HexToAddress("0x70"), builder.L2.Client) Require(t, err) tx, err = arbOwner.ScheduleArbOSUpgrade(&auth, 11, 0) Require(t, err) - _, err = EnsureTxSucceeded(ctx, l2client, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) - tx = l2info.PrepareTxTo("Owner", nil, l2info.TransferGas, perTransfer, []byte{byte(vm.PUSH0)}) - err = l2client.SendTransaction(ctx, tx) + tx = builder.L2Info.PrepareTxTo("Owner", nil, builder.L2Info.TransferGas, perTransfer, []byte{byte(vm.PUSH0)}) + err = builder.L2.Client.SendTransaction(ctx, tx) Require(t, err) - _, err = EnsureTxSucceededWithTimeout(ctx, l2client, tx, time.Second*5) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) } if workload != depleteGas { - delayedTx := l2info.PrepareTx("Owner", "User2", 30002, perTransfer, nil) - SendWaitTestTransactions(t, ctx, l1client, []*types.Transaction{ - WrapL2ForDelayed(t, delayedTx, l1info, "User", 100000), + delayedTx := builder.L2Info.PrepareTx("Owner", "User2", 30002, perTransfer, nil) + builder.L1.SendWaitTestTransactions(t, []*types.Transaction{ + WrapL2ForDelayed(t, delayedTx, builder.L1Info, "User", 100000), }) // give the inbox reader a bit of time to pick up the delayed message time.Sleep(time.Millisecond * 500) // sending l1 messages creates l1 blocks.. make enough to get that delayed inbox message in for i := 0; i < 30; i++ { - SendWaitTestTransactions(t, ctx, l1client, []*types.Transaction{ - l1info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil), + builder.L1.SendWaitTestTransactions(t, []*types.Transaction{ + builder.L1Info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil), }) } - _, err := WaitForTx(ctx, l2clientB, delayedTx.Hash(), time.Second*5) + _, err := WaitForTx(ctx, testClientB.Client, delayedTx.Hash(), time.Second*5) Require(t, err) } if workload == ethSend { - l2balance, err := l2clientB.BalanceAt(ctx, l2info.GetAddress("User2"), nil) + l2balance, err := testClientB.Client.BalanceAt(ctx, builder.L2Info.GetAddress("User2"), nil) Require(t, err) expectedBalance := new(big.Int).Mul(perTransfer, big.NewInt(int64(workloadLoops+1))) @@ -169,7 +172,7 @@ func testBlockValidatorSimple(t *testing.T, dasModeString string, workloadLoops } } - lastBlock, err := l2clientB.BlockByNumber(ctx, nil) + lastBlock, err := testClientB.Client.BlockByNumber(ctx, nil) Require(t, err) for { usefulBlock := false @@ -182,22 +185,22 @@ func testBlockValidatorSimple(t *testing.T, dasModeString string, workloadLoops if usefulBlock { break } - lastBlock, err = l2clientB.BlockByHash(ctx, lastBlock.ParentHash()) + lastBlock, err = testClientB.Client.BlockByHash(ctx, lastBlock.ParentHash()) Require(t, err) } t.Log("waiting for block: ", lastBlock.NumberU64()) timeout := getDeadlineTimeout(t, time.Minute*10) // messageindex is same as block number here - if !nodeB.BlockValidator.WaitForPos(t, ctx, arbutil.MessageIndex(lastBlock.NumberU64()), timeout) { + if !testClientB.ConsensusNode.BlockValidator.WaitForPos(t, ctx, arbutil.MessageIndex(lastBlock.NumberU64()), timeout) { Fatal(t, "did not validate all blocks") } - gethExec, ok := nodeB.Execution.(*gethexec.ExecutionNode) + gethExec, ok := testClientB.ConsensusNode.Execution.(*gethexec.ExecutionNode) if !ok { t.Fail() } gethExec.Recorder.TrimAllPrepared(t) finalRefCount := gethExec.Recorder.RecordingDBReferenceCount() - lastBlockNow, err := l2clientB.BlockByNumber(ctx, nil) + lastBlockNow, err := testClientB.Client.BlockByNumber(ctx, nil) Require(t, err) // up to 3 extra references: awaiting validation, recently valid, lastValidatedHeader largestRefCount := lastBlockNow.NumberU64() - lastBlock.NumberU64() + 3 diff --git a/system_tests/bloom_test.go b/system_tests/bloom_test.go index 9079fd35f1..a3cab748e2 100644 --- a/system_tests/bloom_test.go +++ b/system_tests/bloom_test.go @@ -36,7 +36,7 @@ func TestBloom(t *testing.T) { ownerTxOpts := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) ownerTxOpts.Context = ctx - _, simple := deploySimple(t, ctx, ownerTxOpts, builder.L2.Client) + _, simple := builder.L2.DeploySimple(t, ownerTxOpts) simpleABI, err := mocksgen.SimpleMetaData.GetAbi() Require(t, err) @@ -64,7 +64,7 @@ func TestBloom(t *testing.T) { if sendNullEvent { tx, err = simple.EmitNullEvent(&ownerTxOpts) Require(t, err) - _, err = EnsureTxSucceeded(ctx, builder.L2.Client, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) } @@ -75,7 +75,7 @@ func TestBloom(t *testing.T) { tx, err = simple.Increment(&ownerTxOpts) } Require(t, err) - _, err = EnsureTxSucceeded(ctx, builder.L2.Client, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) if i%100 == 0 { t.Log("counts: ", i, "/", countsNum) diff --git a/system_tests/common_test.go b/system_tests/common_test.go index 41a3fd1417..0d7cd2dfaa 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -103,6 +103,10 @@ func (tc *TestClient) GetBalance(t *testing.T, account common.Address) *big.Int return GetBalance(t, tc.ctx, tc.Client, account) } +func (tc *TestClient) GetBaseFee(t *testing.T) *big.Int { + return GetBaseFee(t, tc.Client, tc.ctx) +} + func (tc *TestClient) GetBaseFeeAt(t *testing.T, blockNum *big.Int) *big.Int { return GetBaseFeeAt(t, tc.Client, tc.ctx, blockNum) } @@ -116,7 +120,11 @@ func (tc *TestClient) DeploySimple(t *testing.T, auth bind.TransactOpts) (common } func (tc *TestClient) EnsureTxSucceeded(transaction *types.Transaction) (*types.Receipt, error) { - return EnsureTxSucceeded(tc.ctx, tc.Client, transaction) + return tc.EnsureTxSucceededWithTimeout(transaction, time.Second*5) +} + +func (tc *TestClient) EnsureTxSucceededWithTimeout(transaction *types.Transaction, timeout time.Duration) (*types.Receipt, error) { + return EnsureTxSucceededWithTimeout(tc.ctx, tc.Client, transaction, timeout) } type NodeBuilder struct { @@ -169,13 +177,13 @@ func (b *NodeBuilder) Build(t *testing.T) func() { if b.withL1 { l1, l2 := NewTestClient(b.ctx), NewTestClient(b.ctx) b.L2Info, l2.ConsensusNode, l2.Client, l2.Stack, b.L1Info, l1.L1Backend, l1.Client, l1.Stack = - createTestNodeOnL1WithConfigImpl(t, b.ctx, b.isSequencer, b.nodeConfig, b.execConfig, b.chainConfig, b.l2StackConfig, b.L2Info) + createTestNodeWithL1(t, b.ctx, b.isSequencer, b.nodeConfig, b.execConfig, b.chainConfig, b.l2StackConfig, b.L2Info) b.L1, b.L2 = l1, l2 b.L1.cleanup = func() { requireClose(t, b.L1.Stack) } } else { l2 := NewTestClient(b.ctx) b.L2Info, l2.ConsensusNode, l2.Client = - CreateTestL2WithConfig(t, b.ctx, b.L2Info, b.nodeConfig, b.execConfig, b.takeOwnership) + createTestNode(t, b.ctx, b.L2Info, b.nodeConfig, b.execConfig, b.takeOwnership) b.L2 = l2 } b.L2.ExecNode = getExecNode(t, b.L2.ConsensusNode) @@ -702,34 +710,7 @@ func ClientForStack(t *testing.T, backend *node.Node) *ethclient.Client { } // Create and deploy L1 and arbnode for L2 -func createTestNodeOnL1( - t *testing.T, - ctx context.Context, - isSequencer bool, -) ( - l2info info, node *arbnode.Node, l2client *ethclient.Client, l1info info, - l1backend *eth.Ethereum, l1client *ethclient.Client, l1stack *node.Node, -) { - return createTestNodeOnL1WithConfig(t, ctx, isSequencer, nil, nil, nil, nil) -} - -func createTestNodeOnL1WithConfig( - t *testing.T, - ctx context.Context, - isSequencer bool, - nodeConfig *arbnode.Config, - execConfig *gethexec.Config, - chainConfig *params.ChainConfig, - stackConfig *node.Config, -) ( - l2info info, currentNode *arbnode.Node, l2client *ethclient.Client, l1info info, - l1backend *eth.Ethereum, l1client *ethclient.Client, l1stack *node.Node, -) { - l2info, currentNode, l2client, _, l1info, l1backend, l1client, l1stack = createTestNodeOnL1WithConfigImpl(t, ctx, isSequencer, nodeConfig, execConfig, chainConfig, stackConfig, nil) - return -} - -func createTestNodeOnL1WithConfigImpl( +func createTestNodeWithL1( t *testing.T, ctx context.Context, isSequencer bool, @@ -801,11 +782,7 @@ func createTestNodeOnL1WithConfigImpl( // L2 -Only. Enough for tests that needs no interface to L1 // Requires precompiles.AllowDebugPrecompiles = true -func CreateTestL2(t *testing.T, ctx context.Context) (*BlockchainTestInfo, *arbnode.Node, *ethclient.Client) { - return CreateTestL2WithConfig(t, ctx, nil, nil, nil, true) -} - -func CreateTestL2WithConfig( +func createTestNode( t *testing.T, ctx context.Context, l2Info *BlockchainTestInfo, nodeConfig *arbnode.Config, execConfig *gethexec.Config, takeOwnership bool, ) (*BlockchainTestInfo, *arbnode.Node, *ethclient.Client) { if nodeConfig == nil { @@ -879,24 +856,6 @@ func Fatal(t *testing.T, printables ...interface{}) { testhelpers.FailImpl(t, printables...) } -func Create2ndNode( - t *testing.T, - ctx context.Context, - first *arbnode.Node, - l1stack *node.Node, - l1info *BlockchainTestInfo, - l2InitData *statetransfer.ArbosInitializationInfo, - dasConfig *das.DataAvailabilityConfig, -) (*ethclient.Client, *arbnode.Node) { - nodeConf := arbnode.ConfigDefaultL1NonSequencerTest() - if dasConfig == nil { - nodeConf.DataAvailability.Enable = false - } else { - nodeConf.DataAvailability = *dasConfig - } - return Create2ndNodeWithConfig(t, ctx, first, l1stack, l1info, l2InitData, nodeConf, nil, nil) -} - func Create2ndNodeWithConfig( t *testing.T, ctx context.Context, diff --git a/system_tests/contract_tx_test.go b/system_tests/contract_tx_test.go index d6c2eb5f38..56d79b36d9 100644 --- a/system_tests/contract_tx_test.go +++ b/system_tests/contract_tx_test.go @@ -25,18 +25,20 @@ func TestContractTxDeploy(t *testing.T) { t.Parallel() ctx, cancel := context.WithCancel(context.Background()) defer cancel() - l2info, node, client := CreateTestL2WithConfig(t, ctx, nil, nil, nil, false) - defer node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + builder.takeOwnership = false + cleanup := builder.Build(t) + defer cleanup() from := common.HexToAddress("0x123412341234") - TransferBalanceTo(t, "Faucet", from, big.NewInt(1e18), l2info, client, ctx) + builder.L2.TransferBalanceTo(t, "Faucet", from, big.NewInt(1e18), builder.L2Info) for stateNonce := uint64(0); stateNonce < 2; stateNonce++ { - pos, err := node.TxStreamer.GetMessageCount() + pos, err := builder.L2.ConsensusNode.TxStreamer.GetMessageCount() Require(t, err) var delayedMessagesRead uint64 if pos > 0 { - lastMessage, err := node.TxStreamer.GetMessage(pos - 1) + lastMessage, err := builder.L2.ConsensusNode.TxStreamer.GetMessage(pos - 1) Require(t, err) delayedMessagesRead = lastMessage.DelayedMessagesRead } @@ -68,7 +70,7 @@ func TestContractTxDeploy(t *testing.T) { l2Msg = append(l2Msg, math.U256Bytes(contractTx.Value)...) l2Msg = append(l2Msg, contractTx.Data...) - err = node.TxStreamer.AddMessages(pos, true, []arbostypes.MessageWithMetadata{ + err = builder.L2.ConsensusNode.TxStreamer.AddMessages(pos, true, []arbostypes.MessageWithMetadata{ { Message: &arbostypes.L1IncomingMessage{ Header: &arbostypes.L1IncomingMessageHeader{ @@ -89,7 +91,7 @@ func TestContractTxDeploy(t *testing.T) { txHash := types.NewTx(contractTx).Hash() t.Log("made contract tx", contractTx, "with hash", txHash) - receipt, err := WaitForTx(ctx, client, txHash, time.Second*10) + receipt, err := WaitForTx(ctx, builder.L2.Client, txHash, time.Second*10) Require(t, err) if receipt.Status != types.ReceiptStatusSuccessful { Fatal(t, "Receipt has non-successful status", receipt.Status) @@ -102,7 +104,7 @@ func TestContractTxDeploy(t *testing.T) { t.Log("deployed contract", receipt.ContractAddress, "from address", from, "with nonce", stateNonce) stateNonce++ - code, err := client.CodeAt(ctx, receipt.ContractAddress, nil) + code, err := builder.L2.Client.CodeAt(ctx, receipt.ContractAddress, nil) Require(t, err) if !bytes.Equal(code, []byte{0xFE}) { Fatal(t, "expected contract", receipt.ContractAddress, "code of 0xFE but got", hex.EncodeToString(code)) diff --git a/system_tests/delayedinbox_test.go b/system_tests/delayedinbox_test.go index e48cb37028..ca3e7b5999 100644 --- a/system_tests/delayedinbox_test.go +++ b/system_tests/delayedinbox_test.go @@ -38,16 +38,17 @@ func TestDelayInboxSimple(t *testing.T) { t.Parallel() ctx, cancel := context.WithCancel(context.Background()) defer cancel() - l2info, l2node, l2client, l1info, _, l1client, l1stack := createTestNodeOnL1(t, ctx, true) - defer requireClose(t, l1stack) - defer l2node.StopAndWait() - l2info.GenerateAccount("User2") + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + cleanup := builder.Build(t) + defer cleanup() - delayedTx := l2info.PrepareTx("Owner", "User2", 50001, big.NewInt(1e6), nil) - SendSignedTxViaL1(t, ctx, l1info, l1client, l2client, delayedTx) + builder.L2Info.GenerateAccount("User2") - l2balance, err := l2client.BalanceAt(ctx, l2info.GetAddress("User2"), nil) + delayedTx := builder.L2Info.PrepareTx("Owner", "User2", 50001, big.NewInt(1e6), nil) + builder.L1.SendSignedTx(t, builder.L2.Client, delayedTx, builder.L1Info) + + l2balance, err := builder.L2.Client.BalanceAt(ctx, builder.L2Info.GetAddress("User2"), nil) Require(t, err) if l2balance.Cmp(big.NewInt(1e6)) != 0 { Fatal(t, "Unexpected balance:", l2balance) diff --git a/system_tests/delayedinboxlong_test.go b/system_tests/delayedinboxlong_test.go index b1c8ea361b..7c57771f50 100644 --- a/system_tests/delayedinboxlong_test.go +++ b/system_tests/delayedinboxlong_test.go @@ -25,11 +25,11 @@ func TestDelayInboxLong(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - l2info, l2node, l2client, l1info, l1backend, l1client, l1stack := createTestNodeOnL1(t, ctx, true) - defer requireClose(t, l1stack) - defer l2node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + cleanup := builder.Build(t) + defer cleanup() - l2info.GenerateAccount("User2") + builder.L2Info.GenerateAccount("User2") fundsPerDelayed := int64(1000000) delayedMessages := int64(0) @@ -42,22 +42,22 @@ func TestDelayInboxLong(t *testing.T) { randNum := rand.Int() % messagesPerDelayed var l1tx *types.Transaction if randNum == 0 { - delayedTx := l2info.PrepareTx("Owner", "User2", 50001, big.NewInt(fundsPerDelayed), nil) - l1tx = WrapL2ForDelayed(t, delayedTx, l1info, "User", 100000) + delayedTx := builder.L2Info.PrepareTx("Owner", "User2", 50001, big.NewInt(fundsPerDelayed), nil) + l1tx = WrapL2ForDelayed(t, delayedTx, builder.L1Info, "User", 100000) lastDelayedMessage = delayedTx delayedMessages++ } else { - l1tx = l1info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil) + l1tx = builder.L1Info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil) } l1Txs = append(l1Txs, l1tx) } // adding multiple messages in the same AddLocal to get them in the same L1 block - errs := l1backend.TxPool().AddLocals(l1Txs) + errs := builder.L1.L1Backend.TxPool().AddLocals(l1Txs) for _, err := range errs { Require(t, err) } // Checking every tx is expensive, so we just check the last, assuming that the others succeeded too - _, err := EnsureTxSucceeded(ctx, l1client, l1Txs[len(l1Txs)-1]) + _, err := builder.L1.EnsureTxSucceeded(l1Txs[len(l1Txs)-1]) Require(t, err) } @@ -68,14 +68,14 @@ func TestDelayInboxLong(t *testing.T) { // sending l1 messages creates l1 blocks.. make enough to get that delayed inbox message in for i := 0; i < 100; i++ { - SendWaitTestTransactions(t, ctx, l1client, []*types.Transaction{ - l1info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil), + builder.L1.SendWaitTestTransactions(t, []*types.Transaction{ + builder.L1Info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil), }) } - _, err := WaitForTx(ctx, l2client, lastDelayedMessage.Hash(), time.Second*5) + _, err := WaitForTx(ctx, builder.L2.Client, lastDelayedMessage.Hash(), time.Second*5) Require(t, err) - l2balance, err := l2client.BalanceAt(ctx, l2info.GetAddress("User2"), nil) + l2balance, err := builder.L2.Client.BalanceAt(ctx, builder.L2Info.GetAddress("User2"), nil) Require(t, err) if l2balance.Cmp(big.NewInt(fundsPerDelayed*delayedMessages)) != 0 { Fatal(t, "Unexpected balance:", "balance", l2balance, "expected", fundsPerDelayed*delayedMessages) diff --git a/system_tests/estimation_test.go b/system_tests/estimation_test.go index 9f2db62dab..691b02a123 100644 --- a/system_tests/estimation_test.go +++ b/system_tests/estimation_test.go @@ -26,17 +26,18 @@ func TestDeploy(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - l2info, node, client := CreateTestL2(t, ctx) - defer node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + cleanup := builder.Build(t) + defer cleanup() - auth := l2info.GetDefaultTransactOpts("Owner", ctx) + auth := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) auth.GasMargin = 0 // don't adjust, we want to see if the estimate alone is sufficient - _, simple := deploySimple(t, ctx, auth, client) + _, simple := builder.L2.DeploySimple(t, auth) tx, err := simple.Increment(&auth) Require(t, err, "failed to call Increment()") - _, err = EnsureTxSucceeded(ctx, client, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) counter, err := simple.Counter(&bind.CallOpts{}) @@ -51,24 +52,25 @@ func TestEstimate(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - l2info, node, client := CreateTestL2(t, ctx) - defer node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + cleanup := builder.Build(t) + defer cleanup() - auth := l2info.GetDefaultTransactOpts("Owner", ctx) + auth := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) auth.GasMargin = 0 // don't adjust, we want to see if the estimate alone is sufficient gasPrice := big.NewInt(params.GWei / 10) // set the gas price - arbOwner, err := precompilesgen.NewArbOwner(common.HexToAddress("0x70"), client) + arbOwner, err := precompilesgen.NewArbOwner(common.HexToAddress("0x70"), builder.L2.Client) Require(t, err, "could not deploy ArbOwner contract") tx, err := arbOwner.SetMinimumL2BaseFee(&auth, gasPrice) Require(t, err, "could not set L2 gas price") - _, err = EnsureTxSucceeded(ctx, client, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) // connect to arbGasInfo precompile - arbGasInfo, err := precompilesgen.NewArbGasInfo(common.HexToAddress("0x6c"), client) + arbGasInfo, err := precompilesgen.NewArbGasInfo(common.HexToAddress("0x6c"), builder.L2.Client) Require(t, err, "could not deploy contract") // wait for price to come to equilibrium @@ -76,8 +78,8 @@ func TestEstimate(t *testing.T) { numTriesLeft := 20 for !equilibrated && numTriesLeft > 0 { // make an empty block to let the gas price update - l2info.GasPrice = new(big.Int).Mul(l2info.GasPrice, big.NewInt(2)) - TransferBalance(t, "Owner", "Owner", common.Big0, l2info, client, ctx) + builder.L2Info.GasPrice = new(big.Int).Mul(builder.L2Info.GasPrice, big.NewInt(2)) + builder.L2.TransferBalance(t, "Owner", "Owner", common.Big0, builder.L2Info) // check if the price has equilibrated _, _, _, _, _, setPrice, err := arbGasInfo.GetPricesInWei(&bind.CallOpts{}) @@ -91,22 +93,22 @@ func TestEstimate(t *testing.T) { Fatal(t, "L2 gas price did not converge", gasPrice) } - initialBalance, err := client.BalanceAt(ctx, auth.From, nil) + initialBalance, err := builder.L2.Client.BalanceAt(ctx, auth.From, nil) Require(t, err, "could not get balance") // deploy a test contract - _, tx, simple, err := mocksgen.DeploySimple(&auth, client) + _, tx, simple, err := mocksgen.DeploySimple(&auth, builder.L2.Client) Require(t, err, "could not deploy contract") - receipt, err := EnsureTxSucceeded(ctx, client, tx) + receipt, err := builder.L2.EnsureTxSucceeded(tx) Require(t, err) - header, err := client.HeaderByNumber(ctx, receipt.BlockNumber) + header, err := builder.L2.Client.HeaderByNumber(ctx, receipt.BlockNumber) Require(t, err, "could not get header") if header.BaseFee.Cmp(gasPrice) != 0 { Fatal(t, "Header has wrong basefee", header.BaseFee, gasPrice) } - balance, err := client.BalanceAt(ctx, auth.From, nil) + balance, err := builder.L2.Client.BalanceAt(ctx, auth.From, nil) Require(t, err, "could not get balance") expectedCost := receipt.GasUsed * gasPrice.Uint64() observedCost := initialBalance.Uint64() - balance.Uint64() @@ -116,7 +118,7 @@ func TestEstimate(t *testing.T) { tx, err = simple.Increment(&auth) Require(t, err, "failed to call Increment()") - _, err = EnsureTxSucceeded(ctx, client, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) counter, err := simple.Counter(&bind.CallOpts{}) @@ -131,11 +133,12 @@ func TestComponentEstimate(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - l2info, node, client := CreateTestL2(t, ctx) - defer node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + cleanup := builder.Build(t) + defer cleanup() l1BaseFee := new(big.Int).Set(arbostypes.DefaultInitialL1BaseFee) - l2BaseFee := GetBaseFee(t, client, ctx) + l2BaseFee := builder.L2.GetBaseFee(t) colors.PrintGrey("l1 basefee ", l1BaseFee) colors.PrintGrey("l2 basefee ", l2BaseFee) @@ -144,10 +147,10 @@ func TestComponentEstimate(t *testing.T) { maxPriorityFeePerGas := big.NewInt(0) maxFeePerGas := arbmath.BigMulByUfrac(l2BaseFee, 3, 2) - l2info.GenerateAccount("User") - TransferBalance(t, "Owner", "User", userBalance, l2info, client, ctx) + builder.L2Info.GenerateAccount("User") + builder.L2.TransferBalance(t, "Owner", "User", userBalance, builder.L2Info) - from := l2info.GetAddress("User") + from := builder.L2Info.GetAddress("User") to := testhelpers.RandomAddress() gas := uint64(100000000) calldata := []byte{0x00, 0x12} @@ -171,7 +174,7 @@ func TestComponentEstimate(t *testing.T) { Value: value, Data: estimateCalldata, } - returnData, err := client.CallContract(ctx, msg, nil) + returnData, err := builder.L2.Client.CallContract(ctx, msg, nil) Require(t, err) outputs, err := nodeMethod.Outputs.Unpack(returnData) @@ -185,9 +188,8 @@ func TestComponentEstimate(t *testing.T) { baseFee, _ := outputs[2].(*big.Int) l1BaseFeeEstimate, _ := outputs[3].(*big.Int) - execNode := getExecNode(t, node) - tx := l2info.SignTxAs("User", &types.DynamicFeeTx{ - ChainID: execNode.ArbInterface.BlockChain().Config().ChainID, + tx := builder.L2Info.SignTxAs("User", &types.DynamicFeeTx{ + ChainID: builder.L2.ExecNode.ArbInterface.BlockChain().Config().ChainID, Nonce: 0, GasTipCap: maxPriorityFeePerGas, GasFeeCap: maxFeePerGas, @@ -208,8 +210,8 @@ func TestComponentEstimate(t *testing.T) { Fatal(t, baseFee, l2BaseFee.Uint64()) } - Require(t, client.SendTransaction(ctx, tx)) - receipt, err := EnsureTxSucceeded(ctx, client, tx) + Require(t, builder.L2.Client.SendTransaction(ctx, tx)) + receipt, err := builder.L2.EnsureTxSucceeded(tx) Require(t, err) l2Used := receipt.GasUsed - receipt.GasUsedForL1 @@ -224,14 +226,15 @@ func TestDisableL1Charging(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - _, node, client := CreateTestL2(t, ctx) - defer node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + cleanup := builder.Build(t) + defer cleanup() addr := common.HexToAddress("0x12345678") - gasWithL1Charging, err := client.EstimateGas(ctx, ethereum.CallMsg{To: &addr}) + gasWithL1Charging, err := builder.L2.Client.EstimateGas(ctx, ethereum.CallMsg{To: &addr}) Require(t, err) - gasWithoutL1Charging, err := client.EstimateGas(ctx, ethereum.CallMsg{To: &addr, SkipL1Charging: true}) + gasWithoutL1Charging, err := builder.L2.Client.EstimateGas(ctx, ethereum.CallMsg{To: &addr, SkipL1Charging: true}) Require(t, err) if gasWithL1Charging <= gasWithoutL1Charging { @@ -241,14 +244,14 @@ func TestDisableL1Charging(t *testing.T) { Fatal(t, "Incorrect gas estimate with disabled L1 charging") } - _, err = client.CallContract(ctx, ethereum.CallMsg{To: &addr, Gas: gasWithL1Charging}, nil) + _, err = builder.L2.Client.CallContract(ctx, ethereum.CallMsg{To: &addr, Gas: gasWithL1Charging}, nil) Require(t, err) - _, err = client.CallContract(ctx, ethereum.CallMsg{To: &addr, Gas: gasWithoutL1Charging}, nil) + _, err = builder.L2.Client.CallContract(ctx, ethereum.CallMsg{To: &addr, Gas: gasWithoutL1Charging}, nil) if err == nil { Fatal(t, "CallContract passed with insufficient gas") } - _, err = client.CallContract(ctx, ethereum.CallMsg{To: &addr, Gas: gasWithoutL1Charging, SkipL1Charging: true}, nil) + _, err = builder.L2.Client.CallContract(ctx, ethereum.CallMsg{To: &addr, Gas: gasWithoutL1Charging, SkipL1Charging: true}, nil) Require(t, err) } diff --git a/system_tests/fees_test.go b/system_tests/fees_test.go index 17ab7b69c4..3ac5b29b0a 100644 --- a/system_tests/fees_test.go +++ b/system_tests/fees_test.go @@ -53,7 +53,7 @@ func TestSequencerFeePaid(t *testing.T) { l1Estimate, err := arbGasInfo.GetL1BaseFeeEstimate(callOpts) Require(t, err) - baseFee := GetBaseFee(t, builder.L2.Client, ctx) + baseFee := builder.L2.GetBaseFee(t) builder.L2Info.GasPrice = baseFee testFees := func(tip uint64) (*big.Int, *big.Int) { diff --git a/system_tests/infra_fee_test.go b/system_tests/infra_fee_test.go index a56e054563..9366fc204e 100644 --- a/system_tests/infra_fee_test.go +++ b/system_tests/infra_fee_test.go @@ -23,45 +23,46 @@ func TestInfraFee(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - l2info, node, client := CreateTestL2WithConfig(t, ctx, nil, nil, nil, true) - defer node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + cleanup := builder.Build(t) + defer cleanup() - l2info.GenerateAccount("User2") + builder.L2Info.GenerateAccount("User2") - ownerTxOpts := l2info.GetDefaultTransactOpts("Owner", ctx) + ownerTxOpts := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) ownerTxOpts.Context = ctx - ownerCallOpts := l2info.GetDefaultCallOpts("Owner", ctx) + ownerCallOpts := builder.L2Info.GetDefaultCallOpts("Owner", ctx) - arbowner, err := precompilesgen.NewArbOwner(common.HexToAddress("70"), client) + arbowner, err := precompilesgen.NewArbOwner(common.HexToAddress("70"), builder.L2.Client) Require(t, err) - arbownerPublic, err := precompilesgen.NewArbOwnerPublic(common.HexToAddress("6b"), client) + arbownerPublic, err := precompilesgen.NewArbOwnerPublic(common.HexToAddress("6b"), builder.L2.Client) Require(t, err) networkFeeAddr, err := arbownerPublic.GetNetworkFeeAccount(ownerCallOpts) Require(t, err) infraFeeAddr := common.BytesToAddress(crypto.Keccak256([]byte{3, 2, 6})) tx, err := arbowner.SetInfraFeeAccount(&ownerTxOpts, infraFeeAddr) Require(t, err) - _, err = EnsureTxSucceeded(ctx, client, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) - _, simple := deploySimple(t, ctx, ownerTxOpts, client) + _, simple := builder.L2.DeploySimple(t, ownerTxOpts) - netFeeBalanceBefore, err := client.BalanceAt(ctx, networkFeeAddr, nil) + netFeeBalanceBefore, err := builder.L2.Client.BalanceAt(ctx, networkFeeAddr, nil) Require(t, err) - infraFeeBalanceBefore, err := client.BalanceAt(ctx, infraFeeAddr, nil) + infraFeeBalanceBefore, err := builder.L2.Client.BalanceAt(ctx, infraFeeAddr, nil) Require(t, err) tx, err = simple.Increment(&ownerTxOpts) Require(t, err) - receipt, err := EnsureTxSucceeded(ctx, client, tx) + receipt, err := builder.L2.EnsureTxSucceeded(tx) Require(t, err) l2GasUsed := receipt.GasUsed - receipt.GasUsedForL1 expectedFunds := arbmath.BigMulByUint(arbmath.UintToBig(l2pricing.InitialBaseFeeWei), l2GasUsed) expectedBalanceAfter := arbmath.BigAdd(infraFeeBalanceBefore, expectedFunds) - netFeeBalanceAfter, err := client.BalanceAt(ctx, networkFeeAddr, nil) + netFeeBalanceAfter, err := builder.L2.Client.BalanceAt(ctx, networkFeeAddr, nil) Require(t, err) - infraFeeBalanceAfter, err := client.BalanceAt(ctx, infraFeeAddr, nil) + infraFeeBalanceAfter, err := builder.L2.Client.BalanceAt(ctx, infraFeeAddr, nil) Require(t, err) if !arbmath.BigEquals(netFeeBalanceBefore, netFeeBalanceAfter) { diff --git a/system_tests/initialization_test.go b/system_tests/initialization_test.go index 0e055adc5f..6707df1c64 100644 --- a/system_tests/initialization_test.go +++ b/system_tests/initialization_test.go @@ -62,14 +62,16 @@ func TestInitContract(t *testing.T) { l2info.ArbInitData.Accounts = append(l2info.ArbInitData.Accounts, accountInfo) expectedSums[accountAddress] = sum } - _, node, client := CreateTestL2WithConfig(t, ctx, l2info, nil, nil, true) - defer node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + builder.L2Info = l2info + cleanup := builder.Build(t) + defer cleanup() for accountAddress, sum := range expectedSums { msg := ethereum.CallMsg{ To: &accountAddress, } - res, err := client.CallContract(ctx, msg, big.NewInt(0)) + res, err := builder.L2.Client.CallContract(ctx, msg, big.NewInt(0)) Require(t, err) resBig := new(big.Int).SetBytes(res) if resBig.Cmp(sum) != 0 { diff --git a/system_tests/log_subscription_test.go b/system_tests/log_subscription_test.go index 5ee1732fb0..e4402533a6 100644 --- a/system_tests/log_subscription_test.go +++ b/system_tests/log_subscription_test.go @@ -19,21 +19,22 @@ func TestLogSubscription(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - l2info, node, client := CreateTestL2(t, ctx) - defer node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + cleanup := builder.Build(t) + defer cleanup() - auth := l2info.GetDefaultTransactOpts("Owner", ctx) - arbSys, err := precompilesgen.NewArbSys(types.ArbSysAddress, client) + auth := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) + arbSys, err := precompilesgen.NewArbSys(types.ArbSysAddress, builder.L2.Client) Require(t, err) logChan := make(chan types.Log, 128) - subscription, err := client.SubscribeFilterLogs(ctx, ethereum.FilterQuery{}, logChan) + subscription, err := builder.L2.Client.SubscribeFilterLogs(ctx, ethereum.FilterQuery{}, logChan) Require(t, err) defer subscription.Unsubscribe() tx, err := arbSys.WithdrawEth(&auth, common.Address{}) Require(t, err) - receipt, err := EnsureTxSucceeded(ctx, client, tx) + receipt, err := builder.L2.EnsureTxSucceeded(tx) Require(t, err) if len(receipt.Logs) != 1 { @@ -52,6 +53,6 @@ func TestLogSubscription(t *testing.T) { if !reflect.DeepEqual(receiptLog, subscriptionLog) { Fatal(t, "Receipt log", receiptLog, "is different than subscription log", subscriptionLog) } - _, err = client.BlockByHash(ctx, subscriptionLog.BlockHash) + _, err = builder.L2.Client.BlockByHash(ctx, subscriptionLog.BlockHash) Require(t, err) } diff --git a/system_tests/nodeinterface_test.go b/system_tests/nodeinterface_test.go index 167f2204cd..40953a449d 100644 --- a/system_tests/nodeinterface_test.go +++ b/system_tests/nodeinterface_test.go @@ -19,23 +19,23 @@ func TestL2BlockRangeForL1(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - l2info, node, l2client, l1info, _, _, l1stack := createTestNodeOnL1(t, ctx, true) - defer requireClose(t, l1stack) - defer node.StopAndWait() - user := l1info.GetDefaultTransactOpts("User", ctx) + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + cleanup := builder.Build(t) + defer cleanup() + user := builder.L1Info.GetDefaultTransactOpts("User", ctx) numTransactions := 200 for i := 0; i < numTransactions; i++ { - TransferBalanceTo(t, "Owner", util.RemapL1Address(user.From), big.NewInt(1e18), l2info, l2client, ctx) + builder.L2.TransferBalanceTo(t, "Owner", util.RemapL1Address(user.From), big.NewInt(1e18), builder.L2Info) } - nodeInterface, err := node_interfacegen.NewNodeInterface(types.NodeInterfaceAddress, l2client) + nodeInterface, err := node_interfacegen.NewNodeInterface(types.NodeInterfaceAddress, builder.L2.Client) if err != nil { t.Fatalf("Error creating node interface: %v", err) } l1BlockNums := map[uint64]*[2]uint64{} - latestL2, err := l2client.BlockNumber(ctx) + latestL2, err := builder.L2.Client.BlockNumber(ctx) if err != nil { t.Fatalf("Error querying most recent l2 block: %v", err) } diff --git a/system_tests/outbox_test.go b/system_tests/outbox_test.go index 6b43cc83b0..d0ca0ccda3 100644 --- a/system_tests/outbox_test.go +++ b/system_tests/outbox_test.go @@ -35,14 +35,15 @@ func TestOutboxProofs(t *testing.T) { withdrawTopic := arbSysAbi.Events["L2ToL1Tx"].ID merkleTopic := arbSysAbi.Events["SendMerkleUpdate"].ID - l2info, node, client := CreateTestL2(t, ctx) - defer node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + cleanup := builder.Build(t) + defer cleanup() - auth := l2info.GetDefaultTransactOpts("Owner", ctx) + auth := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) - arbSys, err := precompilesgen.NewArbSys(types.ArbSysAddress, client) + arbSys, err := precompilesgen.NewArbSys(types.ArbSysAddress, builder.L2.Client) Require(t, err) - nodeInterface, err := node_interfacegen.NewNodeInterface(types.NodeInterfaceAddress, client) + nodeInterface, err := node_interfacegen.NewNodeInterface(types.NodeInterfaceAddress, builder.L2.Client) Require(t, err) txnCount := int64(1 + rand.Intn(16)) @@ -71,7 +72,7 @@ func TestOutboxProofs(t *testing.T) { txns = append(txns, tx.Hash()) time.Sleep(4 * time.Millisecond) // Geth takes a few ms for the receipt to show up - _, err = client.TransactionReceipt(ctx, tx.Hash()) + _, err = builder.L2.Client.TransactionReceipt(ctx, tx.Hash()) if err == nil { merkleState, err := arbSys.SendMerkleTreeState(&bind.CallOpts{}) Require(t, err, "could not get merkle root") @@ -86,7 +87,7 @@ func TestOutboxProofs(t *testing.T) { for _, tx := range txns { var receipt *types.Receipt - receipt, err = client.TransactionReceipt(ctx, tx) + receipt, err = builder.L2.Client.TransactionReceipt(ctx, tx) Require(t, err, "No receipt for txn") if receipt.Status != types.ReceiptStatusSuccessful { @@ -187,7 +188,7 @@ func TestOutboxProofs(t *testing.T) { // in one lookup, query geth for all the data we need to construct a proof var logs []types.Log if len(query) > 0 { - logs, err = client.FilterLogs(ctx, ethereum.FilterQuery{ + logs, err = builder.L2.Client.FilterLogs(ctx, ethereum.FilterQuery{ Addresses: []common.Address{ types.ArbSysAddress, }, diff --git a/system_tests/precompile_test.go b/system_tests/precompile_test.go index ad08ff7471..10db09275b 100644 --- a/system_tests/precompile_test.go +++ b/system_tests/precompile_test.go @@ -21,10 +21,11 @@ func TestPurePrecompileMethodCalls(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - _, node, client := CreateTestL2(t, ctx) - defer node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + cleanup := builder.Build(t) + defer cleanup() - arbSys, err := precompilesgen.NewArbSys(common.HexToAddress("0x64"), client) + arbSys, err := precompilesgen.NewArbSys(common.HexToAddress("0x64"), builder.L2.Client) Require(t, err, "could not deploy ArbSys contract") chainId, err := arbSys.ArbChainID(&bind.CallOpts{}) Require(t, err, "failed to get the ChainID") @@ -37,10 +38,11 @@ func TestViewLogReverts(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - _, node, client := CreateTestL2(t, ctx) - defer node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + cleanup := builder.Build(t) + defer cleanup() - arbDebug, err := precompilesgen.NewArbDebug(common.HexToAddress("0xff"), client) + arbDebug, err := precompilesgen.NewArbDebug(common.HexToAddress("0xff"), builder.L2.Client) Require(t, err, "could not deploy ArbSys contract") err = arbDebug.EventsView(nil) @@ -53,11 +55,12 @@ func TestCustomSolidityErrors(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - _, node, client := CreateTestL2(t, ctx) - defer node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + cleanup := builder.Build(t) + defer cleanup() callOpts := &bind.CallOpts{Context: ctx} - arbDebug, err := precompilesgen.NewArbDebug(common.HexToAddress("0xff"), client) + arbDebug, err := precompilesgen.NewArbDebug(common.HexToAddress("0xff"), builder.L2.Client) Require(t, err, "could not bind ArbDebug contract") customError := arbDebug.CustomRevert(callOpts, 1024) if customError == nil { @@ -69,7 +72,7 @@ func TestCustomSolidityErrors(t *testing.T) { Fatal(t, observedMessage) } - arbSys, err := precompilesgen.NewArbSys(arbos.ArbSysAddress, client) + arbSys, err := precompilesgen.NewArbSys(arbos.ArbSysAddress, builder.L2.Client) Require(t, err, "could not bind ArbSys contract") _, customError = arbSys.ArbBlockHash(callOpts, big.NewInt(1e9)) if customError == nil { @@ -86,11 +89,12 @@ func TestPrecompileErrorGasLeft(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - info, node, client := CreateTestL2(t, ctx) - defer node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + cleanup := builder.Build(t) + defer cleanup() - auth := info.GetDefaultTransactOpts("Faucet", ctx) - _, _, simple, err := mocksgen.DeploySimple(&auth, client) + auth := builder.L2Info.GetDefaultTransactOpts("Faucet", ctx) + _, _, simple, err := mocksgen.DeploySimple(&auth, builder.L2.Client) Require(t, err) assertNotAllGasConsumed := func(to common.Address, input []byte) { diff --git a/system_tests/recreatestate_rpc_test.go b/system_tests/recreatestate_rpc_test.go index 285548dcdb..9429155d7c 100644 --- a/system_tests/recreatestate_rpc_test.go +++ b/system_tests/recreatestate_rpc_test.go @@ -23,26 +23,23 @@ import ( func prepareNodeWithHistory(t *testing.T, ctx context.Context, execConfig *gethexec.Config, txCount uint64) (node *arbnode.Node, executionNode *gethexec.ExecutionNode, l2client *ethclient.Client, cancel func()) { t.Helper() - l2info, node, l2client, _, _, _, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, nil, execConfig, nil, nil) - cancel = func() { - defer requireClose(t, l1stack) - defer node.StopAndWait() - } - l2info.GenerateAccount("User2") + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.execConfig = execConfig + cleanup := builder.Build(t) + builder.L2Info.GenerateAccount("User2") var txs []*types.Transaction for i := uint64(0); i < txCount; i++ { - tx := l2info.PrepareTx("Owner", "User2", l2info.TransferGas, common.Big1, nil) + tx := builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, common.Big1, nil) txs = append(txs, tx) - err := l2client.SendTransaction(ctx, tx) + err := builder.L2.Client.SendTransaction(ctx, tx) Require(t, err) } for _, tx := range txs { - _, err := EnsureTxSucceeded(ctx, l2client, tx) + _, err := builder.L2.EnsureTxSucceeded(tx) Require(t, err) } - exec := getExecNode(t, node) - return node, exec, l2client, cancel + return builder.L2.ConsensusNode, builder.L2.ExecNode, builder.L2.Client, cleanup } func fillHeaderCache(t *testing.T, bc *core.BlockChain, from, to uint64) { diff --git a/system_tests/reorg_resequencing_test.go b/system_tests/reorg_resequencing_test.go index bdd4c4af45..fcc6603aed 100644 --- a/system_tests/reorg_resequencing_test.go +++ b/system_tests/reorg_resequencing_test.go @@ -19,29 +19,28 @@ func TestReorgResequencing(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - l2info, node, client := CreateTestL2(t, ctx) - defer node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + cleanup := builder.Build(t) + defer cleanup() - execNode := getExecNode(t, node) - - startMsgCount, err := node.TxStreamer.GetMessageCount() + startMsgCount, err := builder.L2.ConsensusNode.TxStreamer.GetMessageCount() Require(t, err) - l2info.GenerateAccount("Intermediate") - l2info.GenerateAccount("User1") - l2info.GenerateAccount("User2") - l2info.GenerateAccount("User3") - l2info.GenerateAccount("User4") - TransferBalance(t, "Owner", "User1", big.NewInt(params.Ether), l2info, client, ctx) - TransferBalance(t, "Owner", "Intermediate", big.NewInt(params.Ether*3), l2info, client, ctx) - TransferBalance(t, "Intermediate", "User2", big.NewInt(params.Ether), l2info, client, ctx) - TransferBalance(t, "Intermediate", "User3", big.NewInt(params.Ether), l2info, client, ctx) + builder.L2Info.GenerateAccount("Intermediate") + builder.L2Info.GenerateAccount("User1") + builder.L2Info.GenerateAccount("User2") + builder.L2Info.GenerateAccount("User3") + builder.L2Info.GenerateAccount("User4") + builder.L2.TransferBalance(t, "Owner", "User1", big.NewInt(params.Ether), builder.L2Info) + builder.L2.TransferBalance(t, "Owner", "Intermediate", big.NewInt(params.Ether*3), builder.L2Info) + builder.L2.TransferBalance(t, "Intermediate", "User2", big.NewInt(params.Ether), builder.L2Info) + builder.L2.TransferBalance(t, "Intermediate", "User3", big.NewInt(params.Ether), builder.L2Info) // Intermediate does not have exactly 1 ether because of fees accountsWithBalance := []string{"User1", "User2", "User3"} verifyBalances := func(scenario string) { for _, account := range accountsWithBalance { - balance, err := client.BalanceAt(ctx, l2info.GetAddress(account), nil) + balance, err := builder.L2.Client.BalanceAt(ctx, builder.L2Info.GetAddress(account), nil) Require(t, err) if balance.Int64() != params.Ether { Fatal(t, "expected account", account, "to have a balance of 1 ether but instead it has", balance, "wei "+scenario) @@ -50,15 +49,15 @@ func TestReorgResequencing(t *testing.T) { } verifyBalances("before reorg") - err = node.TxStreamer.ReorgTo(startMsgCount) + err = builder.L2.ConsensusNode.TxStreamer.ReorgTo(startMsgCount) Require(t, err) - _, err = execNode.ExecEngine.HeadMessageNumberSync(t) + _, err = builder.L2.ExecNode.ExecEngine.HeadMessageNumberSync(t) Require(t, err) verifyBalances("after empty reorg") - prevMessage, err := node.TxStreamer.GetMessage(startMsgCount - 1) + prevMessage, err := builder.L2.ConsensusNode.TxStreamer.GetMessage(startMsgCount - 1) Require(t, err) delayedIndexHash := common.BigToHash(big.NewInt(int64(prevMessage.DelayedMessagesRead))) newMessage := &arbostypes.L1IncomingMessage{ @@ -70,24 +69,24 @@ func TestReorgResequencing(t *testing.T) { RequestId: &delayedIndexHash, L1BaseFee: common.Big0, }, - L2msg: append(l2info.GetAddress("User4").Bytes(), math.U256Bytes(big.NewInt(params.Ether))...), + L2msg: append(builder.L2Info.GetAddress("User4").Bytes(), math.U256Bytes(big.NewInt(params.Ether))...), } - err = node.TxStreamer.AddMessages(startMsgCount, true, []arbostypes.MessageWithMetadata{{ + err = builder.L2.ConsensusNode.TxStreamer.AddMessages(startMsgCount, true, []arbostypes.MessageWithMetadata{{ Message: newMessage, DelayedMessagesRead: prevMessage.DelayedMessagesRead + 1, }}) Require(t, err) - _, err = execNode.ExecEngine.HeadMessageNumberSync(t) + _, err = builder.L2.ExecNode.ExecEngine.HeadMessageNumberSync(t) Require(t, err) accountsWithBalance = append(accountsWithBalance, "User4") verifyBalances("after reorg with new deposit") - err = node.TxStreamer.ReorgTo(startMsgCount) + err = builder.L2.ConsensusNode.TxStreamer.ReorgTo(startMsgCount) Require(t, err) - _, err = execNode.ExecEngine.HeadMessageNumberSync(t) + _, err = builder.L2.ExecNode.ExecEngine.HeadMessageNumberSync(t) Require(t, err) verifyBalances("after second empty reorg") diff --git a/system_tests/retryable_test.go b/system_tests/retryable_test.go index 0fc6d24ed0..3400af335d 100644 --- a/system_tests/retryable_test.go +++ b/system_tests/retryable_test.go @@ -14,16 +14,15 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbos" "github.com/offchainlabs/nitro/arbos/arbostypes" + "github.com/offchainlabs/nitro/arbos/l2pricing" "github.com/offchainlabs/nitro/arbos/retryables" "github.com/offchainlabs/nitro/arbos/util" "github.com/offchainlabs/nitro/execution/gethexec" - "github.com/offchainlabs/nitro/arbos/l2pricing" "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/solgen/go/mocksgen" "github.com/offchainlabs/nitro/solgen/go/node_interfacegen" @@ -33,25 +32,23 @@ import ( ) func retryableSetup(t *testing.T) ( - *BlockchainTestInfo, - *BlockchainTestInfo, - *ethclient.Client, - *ethclient.Client, + *NodeBuilder, *bridgegen.Inbox, func(*types.Receipt) *types.Transaction, context.Context, func(), ) { ctx, cancel := context.WithCancel(context.Background()) - l2info, l2node, l2client, l1info, _, l1client, l1stack := createTestNodeOnL1(t, ctx, true) + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.Build(t) - l2info.GenerateAccount("User2") - l2info.GenerateAccount("Beneficiary") - l2info.GenerateAccount("Burn") + builder.L2Info.GenerateAccount("User2") + builder.L2Info.GenerateAccount("Beneficiary") + builder.L2Info.GenerateAccount("Burn") - delayedInbox, err := bridgegen.NewInbox(l1info.GetAddress("Inbox"), l1client) + delayedInbox, err := bridgegen.NewInbox(builder.L1Info.GetAddress("Inbox"), builder.L1.Client) Require(t, err) - delayedBridge, err := arbnode.NewDelayedBridge(l1client, l1info.GetAddress("Bridge"), 0) + delayedBridge, err := arbnode.NewDelayedBridge(builder.L1.Client, builder.L1Info.GetAddress("Bridge"), 0) Require(t, err) lookupL2Tx := func(l1Receipt *types.Receipt) *types.Transaction { @@ -91,15 +88,15 @@ func retryableSetup(t *testing.T) ( // burn some gas so that the faucet's Callvalue + Balance never exceeds a uint256 discard := arbmath.BigMul(big.NewInt(1e12), big.NewInt(1e12)) - TransferBalance(t, "Faucet", "Burn", discard, l2info, l2client, ctx) + builder.L2.TransferBalance(t, "Faucet", "Burn", discard, builder.L2Info) teardown := func() { // check the integrity of the RPC - blockNum, err := l2client.BlockNumber(ctx) + blockNum, err := builder.L2.Client.BlockNumber(ctx) Require(t, err, "failed to get L2 block number") for number := uint64(0); number < blockNum; number++ { - block, err := l2client.BlockByNumber(ctx, arbmath.UintToBig(number)) + block, err := builder.L2.Client.BlockByNumber(ctx, arbmath.UintToBig(number)) Require(t, err, "failed to get L2 block", number, "of", blockNum) if block.Number().Uint64() != number { Fatal(t, "block number mismatch", number, block.Number().Uint64()) @@ -108,19 +105,20 @@ func retryableSetup(t *testing.T) ( cancel() - l2node.StopAndWait() - requireClose(t, l1stack) + builder.L2.ConsensusNode.StopAndWait() + requireClose(t, builder.L1.Stack) } - return l2info, l1info, l2client, l1client, delayedInbox, lookupL2Tx, ctx, teardown + return builder, delayedInbox, lookupL2Tx, ctx, teardown } func TestRetryableNoExist(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - _, node, l2client := CreateTestL2(t, ctx) - defer node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + cleanup := builder.Build(t) + defer cleanup() - arbRetryableTx, err := precompilesgen.NewArbRetryableTx(common.HexToAddress("6e"), l2client) + arbRetryableTx, err := precompilesgen.NewArbRetryableTx(common.HexToAddress("6e"), builder.L2.Client) Require(t, err) _, err = arbRetryableTx.GetTimeout(&bind.CallOpts{}, common.Hash{}) if err.Error() != "execution reverted: error NoTicketWithID()" { @@ -130,20 +128,20 @@ func TestRetryableNoExist(t *testing.T) { func TestSubmitRetryableImmediateSuccess(t *testing.T) { t.Parallel() - l2info, l1info, l2client, l1client, delayedInbox, lookupL2Tx, ctx, teardown := retryableSetup(t) + builder, delayedInbox, lookupL2Tx, ctx, teardown := retryableSetup(t) defer teardown() - user2Address := l2info.GetAddress("User2") - beneficiaryAddress := l2info.GetAddress("Beneficiary") + user2Address := builder.L2Info.GetAddress("User2") + beneficiaryAddress := builder.L2Info.GetAddress("Beneficiary") deposit := arbmath.BigMul(big.NewInt(1e12), big.NewInt(1e12)) callValue := big.NewInt(1e6) - nodeInterface, err := node_interfacegen.NewNodeInterface(types.NodeInterfaceAddress, l2client) + nodeInterface, err := node_interfacegen.NewNodeInterface(types.NodeInterfaceAddress, builder.L2.Client) Require(t, err, "failed to deploy NodeInterface") // estimate the gas needed to auto redeem the retryable - usertxoptsL2 := l2info.GetDefaultTransactOpts("Faucet", ctx) + usertxoptsL2 := builder.L2Info.GetDefaultTransactOpts("Faucet", ctx) usertxoptsL2.NoSend = true usertxoptsL2.GasMargin = 0 tx, err := nodeInterface.EstimateRetryableTicket( @@ -161,7 +159,7 @@ func TestSubmitRetryableImmediateSuccess(t *testing.T) { colors.PrintBlue("estimate: ", estimate) // submit & auto redeem the retryable using the gas estimate - usertxoptsL1 := l1info.GetDefaultTransactOpts("Faucet", ctx) + usertxoptsL1 := builder.L1Info.GetDefaultTransactOpts("Faucet", ctx) usertxoptsL1.Value = deposit l1tx, err := delayedInbox.CreateRetryableTicket( &usertxoptsL1, @@ -176,21 +174,21 @@ func TestSubmitRetryableImmediateSuccess(t *testing.T) { ) Require(t, err) - l1Receipt, err := EnsureTxSucceeded(ctx, l1client, l1tx) + l1Receipt, err := builder.L1.EnsureTxSucceeded(l1tx) Require(t, err) if l1Receipt.Status != types.ReceiptStatusSuccessful { Fatal(t, "l1Receipt indicated failure") } - waitForL1DelayBlocks(t, ctx, l1client, l1info) + waitForL1DelayBlocks(t, ctx, builder) - receipt, err := EnsureTxSucceeded(ctx, l2client, lookupL2Tx(l1Receipt)) + receipt, err := builder.L2.EnsureTxSucceeded(lookupL2Tx(l1Receipt)) Require(t, err) if receipt.Status != types.ReceiptStatusSuccessful { Fatal(t) } - l2balance, err := l2client.BalanceAt(ctx, l2info.GetAddress("User2"), nil) + l2balance, err := builder.L2.Client.BalanceAt(ctx, builder.L2Info.GetAddress("User2"), nil) Require(t, err) if !arbmath.BigEquals(l2balance, big.NewInt(1e6)) { @@ -200,18 +198,18 @@ func TestSubmitRetryableImmediateSuccess(t *testing.T) { func TestSubmitRetryableFailThenRetry(t *testing.T) { t.Parallel() - l2info, l1info, l2client, l1client, delayedInbox, lookupL2Tx, ctx, teardown := retryableSetup(t) + builder, delayedInbox, lookupL2Tx, ctx, teardown := retryableSetup(t) defer teardown() - ownerTxOpts := l2info.GetDefaultTransactOpts("Owner", ctx) - usertxopts := l1info.GetDefaultTransactOpts("Faucet", ctx) + ownerTxOpts := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) + usertxopts := builder.L1Info.GetDefaultTransactOpts("Faucet", ctx) usertxopts.Value = arbmath.BigMul(big.NewInt(1e12), big.NewInt(1e12)) - simpleAddr, simple := deploySimple(t, ctx, ownerTxOpts, l2client) + simpleAddr, simple := builder.L2.DeploySimple(t, ownerTxOpts) simpleABI, err := mocksgen.SimpleMetaData.GetAbi() Require(t, err) - beneficiaryAddress := l2info.GetAddress("Beneficiary") + beneficiaryAddress := builder.L2Info.GetAddress("Beneficiary") l1tx, err := delayedInbox.CreateRetryableTicket( &usertxopts, simpleAddr, @@ -226,15 +224,15 @@ func TestSubmitRetryableFailThenRetry(t *testing.T) { ) Require(t, err) - l1Receipt, err := EnsureTxSucceeded(ctx, l1client, l1tx) + l1Receipt, err := builder.L1.EnsureTxSucceeded(l1tx) Require(t, err) if l1Receipt.Status != types.ReceiptStatusSuccessful { Fatal(t, "l1Receipt indicated failure") } - waitForL1DelayBlocks(t, ctx, l1client, l1info) + waitForL1DelayBlocks(t, ctx, builder) - receipt, err := EnsureTxSucceeded(ctx, l2client, lookupL2Tx(l1Receipt)) + receipt, err := builder.L2.EnsureTxSucceeded(lookupL2Tx(l1Receipt)) Require(t, err) if len(receipt.Logs) != 2 { Fatal(t, len(receipt.Logs)) @@ -243,23 +241,23 @@ func TestSubmitRetryableFailThenRetry(t *testing.T) { firstRetryTxId := receipt.Logs[1].Topics[2] // get receipt for the auto redeem, make sure it failed - receipt, err = WaitForTx(ctx, l2client, firstRetryTxId, time.Second*5) + receipt, err = WaitForTx(ctx, builder.L2.Client, firstRetryTxId, time.Second*5) Require(t, err) if receipt.Status != types.ReceiptStatusFailed { Fatal(t, receipt.GasUsed) } - arbRetryableTx, err := precompilesgen.NewArbRetryableTx(common.HexToAddress("6e"), l2client) + arbRetryableTx, err := precompilesgen.NewArbRetryableTx(common.HexToAddress("6e"), builder.L2.Client) Require(t, err) tx, err := arbRetryableTx.Redeem(&ownerTxOpts, ticketId) Require(t, err) - receipt, err = EnsureTxSucceeded(ctx, l2client, tx) + receipt, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) retryTxId := receipt.Logs[0].Topics[2] // check the receipt for the retry - receipt, err = WaitForTx(ctx, l2client, retryTxId, time.Second*1) + receipt, err = WaitForTx(ctx, builder.L2.Client, retryTxId, time.Second*1) Require(t, err) if receipt.Status != types.ReceiptStatusSuccessful { Fatal(t, receipt.Status) @@ -289,32 +287,32 @@ func TestSubmitRetryableFailThenRetry(t *testing.T) { func TestSubmissionGasCosts(t *testing.T) { t.Parallel() - l2info, l1info, l2client, l1client, delayedInbox, lookupL2Tx, ctx, teardown := retryableSetup(t) + builder, delayedInbox, lookupL2Tx, ctx, teardown := retryableSetup(t) defer teardown() - infraFeeAddr, networkFeeAddr := setupFeeAddresses(t, ctx, l2client, l2info) - elevateL2Basefee(t, ctx, l2client, l2info) + infraFeeAddr, networkFeeAddr := setupFeeAddresses(t, ctx, builder) + elevateL2Basefee(t, ctx, builder) - usertxopts := l1info.GetDefaultTransactOpts("Faucet", ctx) + usertxopts := builder.L1Info.GetDefaultTransactOpts("Faucet", ctx) usertxopts.Value = arbmath.BigMul(big.NewInt(1e12), big.NewInt(1e12)) - l2info.GenerateAccount("Refund") - l2info.GenerateAccount("Receive") - faucetAddress := util.RemapL1Address(l1info.GetAddress("Faucet")) - beneficiaryAddress := l2info.GetAddress("Beneficiary") - feeRefundAddress := l2info.GetAddress("Refund") - receiveAddress := l2info.GetAddress("Receive") + builder.L2Info.GenerateAccount("Refund") + builder.L2Info.GenerateAccount("Receive") + faucetAddress := util.RemapL1Address(builder.L1Info.GetAddress("Faucet")) + beneficiaryAddress := builder.L2Info.GetAddress("Beneficiary") + feeRefundAddress := builder.L2Info.GetAddress("Refund") + receiveAddress := builder.L2Info.GetAddress("Receive") colors.PrintBlue("Faucet ", faucetAddress) colors.PrintBlue("Receive ", receiveAddress) colors.PrintBlue("Beneficiary ", beneficiaryAddress) colors.PrintBlue("Fee Refund ", feeRefundAddress) - fundsBeforeSubmit, err := l2client.BalanceAt(ctx, faucetAddress, nil) + fundsBeforeSubmit, err := builder.L2.Client.BalanceAt(ctx, faucetAddress, nil) Require(t, err) - infraBalanceBefore, err := l2client.BalanceAt(ctx, infraFeeAddr, nil) + infraBalanceBefore, err := builder.L2.Client.BalanceAt(ctx, infraFeeAddr, nil) Require(t, err) - networkBalanceBefore, err := l2client.BalanceAt(ctx, networkFeeAddr, nil) + networkBalanceBefore, err := builder.L2.Client.BalanceAt(ctx, networkFeeAddr, nil) Require(t, err) usefulGas := params.TxGas @@ -338,28 +336,28 @@ func TestSubmissionGasCosts(t *testing.T) { ) Require(t, err) - l1Receipt, err := EnsureTxSucceeded(ctx, l1client, l1tx) + l1Receipt, err := builder.L1.EnsureTxSucceeded(l1tx) Require(t, err) if l1Receipt.Status != types.ReceiptStatusSuccessful { Fatal(t, "l1Receipt indicated failure") } - waitForL1DelayBlocks(t, ctx, l1client, l1info) + waitForL1DelayBlocks(t, ctx, builder) submissionTxOuter := lookupL2Tx(l1Receipt) - submissionReceipt, err := EnsureTxSucceeded(ctx, l2client, submissionTxOuter) + submissionReceipt, err := builder.L2.EnsureTxSucceeded(submissionTxOuter) Require(t, err) if len(submissionReceipt.Logs) != 2 { Fatal(t, "Unexpected number of logs:", len(submissionReceipt.Logs)) } firstRetryTxId := submissionReceipt.Logs[1].Topics[2] // get receipt for the auto redeem - redeemReceipt, err := WaitForTx(ctx, l2client, firstRetryTxId, time.Second*5) + redeemReceipt, err := WaitForTx(ctx, builder.L2.Client, firstRetryTxId, time.Second*5) Require(t, err) if redeemReceipt.Status != types.ReceiptStatusSuccessful { Fatal(t, "first retry tx failed") } - redeemBlock, err := l2client.HeaderByNumber(ctx, redeemReceipt.BlockNumber) + redeemBlock, err := builder.L2.Client.HeaderByNumber(ctx, redeemReceipt.BlockNumber) Require(t, err) l2BaseFee := redeemBlock.BaseFee @@ -367,18 +365,18 @@ func TestSubmissionGasCosts(t *testing.T) { excessWei := arbmath.BigMulByUint(l2BaseFee, excessGasLimit) excessWei.Add(excessWei, arbmath.BigMul(excessGasPrice, retryableGas)) - fundsAfterSubmit, err := l2client.BalanceAt(ctx, faucetAddress, nil) + fundsAfterSubmit, err := builder.L2.Client.BalanceAt(ctx, faucetAddress, nil) Require(t, err) - beneficiaryFunds, err := l2client.BalanceAt(ctx, beneficiaryAddress, nil) + beneficiaryFunds, err := builder.L2.Client.BalanceAt(ctx, beneficiaryAddress, nil) Require(t, err) - refundFunds, err := l2client.BalanceAt(ctx, feeRefundAddress, nil) + refundFunds, err := builder.L2.Client.BalanceAt(ctx, feeRefundAddress, nil) Require(t, err) - receiveFunds, err := l2client.BalanceAt(ctx, receiveAddress, nil) + receiveFunds, err := builder.L2.Client.BalanceAt(ctx, receiveAddress, nil) Require(t, err) - infraBalanceAfter, err := l2client.BalanceAt(ctx, infraFeeAddr, nil) + infraBalanceAfter, err := builder.L2.Client.BalanceAt(ctx, infraFeeAddr, nil) Require(t, err) - networkBalanceAfter, err := l2client.BalanceAt(ctx, networkFeeAddr, nil) + networkBalanceAfter, err := builder.L2.Client.BalanceAt(ctx, networkFeeAddr, nil) Require(t, err) colors.PrintBlue("CallGas ", retryableGas) @@ -425,7 +423,7 @@ func TestSubmissionGasCosts(t *testing.T) { Fatal(t, "Supplied gas was improperly deducted\n", fundsBeforeSubmit, "\n", fundsAfterSubmit) } - arbGasInfo, err := precompilesgen.NewArbGasInfo(common.HexToAddress("0x6c"), l2client) + arbGasInfo, err := precompilesgen.NewArbGasInfo(common.HexToAddress("0x6c"), builder.L2.Client) Require(t, err) minimumBaseFee, err := arbGasInfo.GetMinimumGasPrice(&bind.CallOpts{Context: ctx}) Require(t, err) @@ -450,28 +448,28 @@ func TestSubmissionGasCosts(t *testing.T) { } } -func waitForL1DelayBlocks(t *testing.T, ctx context.Context, l1client *ethclient.Client, l1info *BlockchainTestInfo) { +func waitForL1DelayBlocks(t *testing.T, ctx context.Context, builder *NodeBuilder) { // sending l1 messages creates l1 blocks.. make enough to get that delayed inbox message in for i := 0; i < 30; i++ { - SendWaitTestTransactions(t, ctx, l1client, []*types.Transaction{ - l1info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil), + builder.L1.SendWaitTestTransactions(t, []*types.Transaction{ + builder.L1Info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil), }) } } func TestDepositETH(t *testing.T) { t.Parallel() - _, l1info, l2client, l1client, delayedInbox, lookupL2Tx, ctx, teardown := retryableSetup(t) + builder, delayedInbox, lookupL2Tx, ctx, teardown := retryableSetup(t) defer teardown() - faucetAddr := l1info.GetAddress("Faucet") + faucetAddr := builder.L1Info.GetAddress("Faucet") - oldBalance, err := l2client.BalanceAt(ctx, faucetAddr, nil) + oldBalance, err := builder.L2.Client.BalanceAt(ctx, faucetAddr, nil) if err != nil { t.Fatalf("BalanceAt(%v) unexpected error: %v", faucetAddr, err) } - txOpts := l1info.GetDefaultTransactOpts("Faucet", ctx) + txOpts := builder.L1Info.GetDefaultTransactOpts("Faucet", ctx) txOpts.Value = big.NewInt(13) l1tx, err := delayedInbox.DepositEth0(&txOpts) @@ -479,20 +477,20 @@ func TestDepositETH(t *testing.T) { t.Fatalf("DepositEth0() unexected error: %v", err) } - l1Receipt, err := EnsureTxSucceeded(ctx, l1client, l1tx) + l1Receipt, err := builder.L1.EnsureTxSucceeded(l1tx) if err != nil { t.Fatalf("EnsureTxSucceeded() unexpected error: %v", err) } if l1Receipt.Status != types.ReceiptStatusSuccessful { t.Errorf("Got transaction status: %v, want: %v", l1Receipt.Status, types.ReceiptStatusSuccessful) } - waitForL1DelayBlocks(t, ctx, l1client, l1info) + waitForL1DelayBlocks(t, ctx, builder) - l2Receipt, err := EnsureTxSucceeded(ctx, l2client, lookupL2Tx(l1Receipt)) + l2Receipt, err := builder.L2.EnsureTxSucceeded(lookupL2Tx(l1Receipt)) if err != nil { t.Fatalf("EnsureTxSucceeded unexpected error: %v", err) } - newBalance, err := l2client.BalanceAt(ctx, faucetAddr, l2Receipt.BlockNumber) + newBalance, err := builder.L2.Client.BalanceAt(ctx, faucetAddr, l2Receipt.BlockNumber) if err != nil { t.Fatalf("BalanceAt(%v) unexpected error: %v", faucetAddr, err) } @@ -502,13 +500,13 @@ func TestDepositETH(t *testing.T) { } func TestArbitrumContractTx(t *testing.T) { - l2Info, l1Info, l2Client, l1Client, delayedInbox, lookupL2Tx, ctx, teardown := retryableSetup(t) + builder, delayedInbox, lookupL2Tx, ctx, teardown := retryableSetup(t) defer teardown() - faucetL2Addr := util.RemapL1Address(l1Info.GetAddress("Faucet")) - TransferBalanceTo(t, "Faucet", faucetL2Addr, big.NewInt(1e18), l2Info, l2Client, ctx) + faucetL2Addr := util.RemapL1Address(builder.L1Info.GetAddress("Faucet")) + builder.L2.TransferBalanceTo(t, "Faucet", faucetL2Addr, big.NewInt(1e18), builder.L2Info) - l2TxOpts := l2Info.GetDefaultTransactOpts("Faucet", ctx) - l2ContractAddr, _ := deploySimple(t, ctx, l2TxOpts, l2Client) + l2TxOpts := builder.L2Info.GetDefaultTransactOpts("Faucet", ctx) + l2ContractAddr, _ := builder.L2.DeploySimple(t, l2TxOpts) l2ContractABI, err := abi.JSON(strings.NewReader(mocksgen.SimpleABI)) if err != nil { t.Fatalf("Error parsing contract ABI: %v", err) @@ -518,15 +516,15 @@ func TestArbitrumContractTx(t *testing.T) { t.Fatalf("Error packing method's call data: %v", err) } unsignedTx := types.NewTx(&types.ArbitrumContractTx{ - ChainId: l2Info.Signer.ChainID(), + ChainId: builder.L2Info.Signer.ChainID(), From: faucetL2Addr, - GasFeeCap: l2Info.GasPrice.Mul(l2Info.GasPrice, big.NewInt(2)), + GasFeeCap: builder.L2Info.GasPrice.Mul(builder.L2Info.GasPrice, big.NewInt(2)), Gas: 1e6, To: &l2ContractAddr, Value: common.Big0, Data: data, }) - txOpts := l1Info.GetDefaultTransactOpts("Faucet", ctx) + txOpts := builder.L1Info.GetDefaultTransactOpts("Faucet", ctx) l1tx, err := delayedInbox.SendContractTransaction( &txOpts, arbmath.UintToBig(unsignedTx.Gas()), @@ -538,15 +536,15 @@ func TestArbitrumContractTx(t *testing.T) { if err != nil { t.Fatalf("Error sending unsigned transaction: %v", err) } - receipt, err := EnsureTxSucceeded(ctx, l1Client, l1tx) + receipt, err := builder.L1.EnsureTxSucceeded(l1tx) if err != nil { t.Fatalf("EnsureTxSucceeded(%v) unexpected error: %v", l1tx.Hash(), err) } if receipt.Status != types.ReceiptStatusSuccessful { t.Errorf("L1 transaction: %v has failed", l1tx.Hash()) } - waitForL1DelayBlocks(t, ctx, l1Client, l1Info) - receipt, err = EnsureTxSucceeded(ctx, l2Client, lookupL2Tx(receipt)) + waitForL1DelayBlocks(t, ctx, builder) + _, err = builder.L2.EnsureTxSucceeded(lookupL2Tx(receipt)) if err != nil { t.Fatalf("EnsureTxSucceeded(%v) unexpected error: %v", unsignedTx.Hash(), err) } @@ -555,17 +553,17 @@ func TestArbitrumContractTx(t *testing.T) { func TestL1FundedUnsignedTransaction(t *testing.T) { t.Parallel() ctx := context.Background() - l2Info, node, l2Client, l1Info, _, l1Client, l1Stack := createTestNodeOnL1(t, ctx, true) - defer requireClose(t, l1Stack) - defer node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + cleanup := builder.Build(t) + defer cleanup() - faucetL2Addr := util.RemapL1Address(l1Info.GetAddress("Faucet")) + faucetL2Addr := util.RemapL1Address(builder.L1Info.GetAddress("Faucet")) // Transfer balance to Faucet's corresponding L2 address, so that there is // enough balance on its' account for executing L2 transaction. - TransferBalanceTo(t, "Faucet", faucetL2Addr, big.NewInt(1e18), l2Info, l2Client, ctx) + builder.L2.TransferBalanceTo(t, "Faucet", faucetL2Addr, big.NewInt(1e18), builder.L2Info) - l2TxOpts := l2Info.GetDefaultTransactOpts("Faucet", ctx) - contractAddr, _ := deploySimple(t, ctx, l2TxOpts, l2Client) + l2TxOpts := builder.L2Info.GetDefaultTransactOpts("Faucet", ctx) + contractAddr, _ := builder.L2.DeploySimple(t, l2TxOpts) contractABI, err := abi.JSON(strings.NewReader(mocksgen.SimpleABI)) if err != nil { t.Fatalf("Error parsing contract ABI: %v", err) @@ -574,27 +572,27 @@ func TestL1FundedUnsignedTransaction(t *testing.T) { if err != nil { t.Fatalf("Error packing method's call data: %v", err) } - nonce, err := l2Client.NonceAt(ctx, faucetL2Addr, nil) + nonce, err := builder.L2.Client.NonceAt(ctx, faucetL2Addr, nil) if err != nil { t.Fatalf("Error getting nonce at address: %v, error: %v", faucetL2Addr, err) } unsignedTx := types.NewTx(&types.ArbitrumUnsignedTx{ - ChainId: l2Info.Signer.ChainID(), + ChainId: builder.L2Info.Signer.ChainID(), From: faucetL2Addr, Nonce: nonce, - GasFeeCap: l2Info.GasPrice, + GasFeeCap: builder.L2Info.GasPrice, Gas: 1e6, To: &contractAddr, Value: common.Big0, Data: data, }) - delayedInbox, err := bridgegen.NewInbox(l1Info.GetAddress("Inbox"), l1Client) + delayedInbox, err := bridgegen.NewInbox(builder.L1Info.GetAddress("Inbox"), builder.L1.Client) if err != nil { t.Fatalf("Error getting Go binding of L1 Inbox contract: %v", err) } - txOpts := l1Info.GetDefaultTransactOpts("Faucet", ctx) + txOpts := builder.L1Info.GetDefaultTransactOpts("Faucet", ctx) l1tx, err := delayedInbox.SendUnsignedTransaction( &txOpts, arbmath.UintToBig(unsignedTx.Gas()), @@ -607,15 +605,15 @@ func TestL1FundedUnsignedTransaction(t *testing.T) { if err != nil { t.Fatalf("Error sending unsigned transaction: %v", err) } - receipt, err := EnsureTxSucceeded(ctx, l1Client, l1tx) + receipt, err := builder.L1.EnsureTxSucceeded(l1tx) if err != nil { t.Fatalf("EnsureTxSucceeded(%v) unexpected error: %v", l1tx.Hash(), err) } if receipt.Status != types.ReceiptStatusSuccessful { t.Errorf("L1 transaction: %v has failed", l1tx.Hash()) } - waitForL1DelayBlocks(t, ctx, l1Client, l1Info) - receipt, err = EnsureTxSucceeded(ctx, l2Client, unsignedTx) + waitForL1DelayBlocks(t, ctx, builder) + receipt, err = builder.L2.EnsureTxSucceeded(unsignedTx) if err != nil { t.Fatalf("EnsureTxSucceeded(%v) unexpected error: %v", unsignedTx.Hash(), err) } @@ -625,28 +623,28 @@ func TestL1FundedUnsignedTransaction(t *testing.T) { } func TestRetryableSubmissionAndRedeemFees(t *testing.T) { - l2info, l1info, l2client, l1client, delayedInbox, lookupL2Tx, ctx, teardown := retryableSetup(t) + builder, delayedInbox, lookupL2Tx, ctx, teardown := retryableSetup(t) defer teardown() - infraFeeAddr, networkFeeAddr := setupFeeAddresses(t, ctx, l2client, l2info) + infraFeeAddr, networkFeeAddr := setupFeeAddresses(t, ctx, builder) - ownerTxOpts := l2info.GetDefaultTransactOpts("Owner", ctx) - simpleAddr, simple := deploySimple(t, ctx, ownerTxOpts, l2client) + ownerTxOpts := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) + simpleAddr, simple := builder.L2.DeploySimple(t, ownerTxOpts) simpleABI, err := mocksgen.SimpleMetaData.GetAbi() Require(t, err) - elevateL2Basefee(t, ctx, l2client, l2info) + elevateL2Basefee(t, ctx, builder) - infraBalanceBefore, err := l2client.BalanceAt(ctx, infraFeeAddr, nil) + infraBalanceBefore, err := builder.L2.Client.BalanceAt(ctx, infraFeeAddr, nil) Require(t, err) - networkBalanceBefore, err := l2client.BalanceAt(ctx, networkFeeAddr, nil) + networkBalanceBefore, err := builder.L2.Client.BalanceAt(ctx, networkFeeAddr, nil) Require(t, err) - beneficiaryAddress := l2info.GetAddress("Beneficiary") + beneficiaryAddress := builder.L2Info.GetAddress("Beneficiary") deposit := arbmath.BigMul(big.NewInt(1e12), big.NewInt(1e12)) callValue := common.Big0 - usertxoptsL1 := l1info.GetDefaultTransactOpts("Faucet", ctx) + usertxoptsL1 := builder.L1Info.GetDefaultTransactOpts("Faucet", ctx) usertxoptsL1.Value = deposit - baseFee := GetBaseFee(t, l2client, ctx) + baseFee := builder.L2.GetBaseFee(t) l1tx, err := delayedInbox.CreateRetryableTicket( &usertxoptsL1, simpleAddr, @@ -660,16 +658,16 @@ func TestRetryableSubmissionAndRedeemFees(t *testing.T) { simpleABI.Methods["incrementRedeem"].ID, ) Require(t, err) - l1Receipt, err := EnsureTxSucceeded(ctx, l1client, l1tx) + l1Receipt, err := builder.L1.EnsureTxSucceeded(l1tx) Require(t, err) if l1Receipt.Status != types.ReceiptStatusSuccessful { Fatal(t, "l1Receipt indicated failure") } - waitForL1DelayBlocks(t, ctx, l1client, l1info) + waitForL1DelayBlocks(t, ctx, builder) submissionTxOuter := lookupL2Tx(l1Receipt) - submissionReceipt, err := EnsureTxSucceeded(ctx, l2client, submissionTxOuter) + submissionReceipt, err := builder.L2.EnsureTxSucceeded(submissionTxOuter) Require(t, err) if len(submissionReceipt.Logs) != 2 { Fatal(t, len(submissionReceipt.Logs)) @@ -677,36 +675,36 @@ func TestRetryableSubmissionAndRedeemFees(t *testing.T) { ticketId := submissionReceipt.Logs[0].Topics[1] firstRetryTxId := submissionReceipt.Logs[1].Topics[2] // get receipt for the auto redeem, make sure it failed - autoRedeemReceipt, err := WaitForTx(ctx, l2client, firstRetryTxId, time.Second*5) + autoRedeemReceipt, err := WaitForTx(ctx, builder.L2.Client, firstRetryTxId, time.Second*5) Require(t, err) if autoRedeemReceipt.Status != types.ReceiptStatusFailed { Fatal(t, "first retry tx shouldn't have succeeded") } - infraBalanceAfterSubmission, err := l2client.BalanceAt(ctx, infraFeeAddr, nil) + infraBalanceAfterSubmission, err := builder.L2.Client.BalanceAt(ctx, infraFeeAddr, nil) Require(t, err) - networkBalanceAfterSubmission, err := l2client.BalanceAt(ctx, networkFeeAddr, nil) + networkBalanceAfterSubmission, err := builder.L2.Client.BalanceAt(ctx, networkFeeAddr, nil) Require(t, err) - usertxoptsL2 := l2info.GetDefaultTransactOpts("Faucet", ctx) - arbRetryableTx, err := precompilesgen.NewArbRetryableTx(common.HexToAddress("6e"), l2client) + usertxoptsL2 := builder.L2Info.GetDefaultTransactOpts("Faucet", ctx) + arbRetryableTx, err := precompilesgen.NewArbRetryableTx(common.HexToAddress("6e"), builder.L2.Client) Require(t, err) tx, err := arbRetryableTx.Redeem(&usertxoptsL2, ticketId) Require(t, err) - redeemReceipt, err := EnsureTxSucceeded(ctx, l2client, tx) + redeemReceipt, err := builder.L2.EnsureTxSucceeded(tx) Require(t, err) retryTxId := redeemReceipt.Logs[0].Topics[2] // check the receipt for the retry - retryReceipt, err := WaitForTx(ctx, l2client, retryTxId, time.Second*1) + retryReceipt, err := WaitForTx(ctx, builder.L2.Client, retryTxId, time.Second*1) Require(t, err) if retryReceipt.Status != types.ReceiptStatusSuccessful { Fatal(t, "retry failed") } - infraBalanceAfterRedeem, err := l2client.BalanceAt(ctx, infraFeeAddr, nil) + infraBalanceAfterRedeem, err := builder.L2.Client.BalanceAt(ctx, infraFeeAddr, nil) Require(t, err) - networkBalanceAfterRedeem, err := l2client.BalanceAt(ctx, networkFeeAddr, nil) + networkBalanceAfterRedeem, err := builder.L2.Client.BalanceAt(ctx, networkFeeAddr, nil) Require(t, err) // verify that the increment happened, so we know the retry succeeded @@ -735,11 +733,11 @@ func TestRetryableSubmissionAndRedeemFees(t *testing.T) { infraRedeemFee := arbmath.BigSub(infraBalanceAfterRedeem, infraBalanceAfterSubmission) networkRedeemFee := arbmath.BigSub(networkBalanceAfterRedeem, networkBalanceAfterSubmission) - arbGasInfo, err := precompilesgen.NewArbGasInfo(common.HexToAddress("0x6c"), l2client) + arbGasInfo, err := precompilesgen.NewArbGasInfo(common.HexToAddress("0x6c"), builder.L2.Client) Require(t, err) minimumBaseFee, err := arbGasInfo.GetMinimumGasPrice(&bind.CallOpts{Context: ctx}) Require(t, err) - submissionBaseFee := GetBaseFeeAt(t, l2client, ctx, submissionReceipt.BlockNumber) + submissionBaseFee := builder.L2.GetBaseFeeAt(t, submissionReceipt.BlockNumber) submissionTx, ok := submissionTxOuter.GetInner().(*types.ArbitrumSubmitRetryableTx) if !ok { Fatal(t, "inner tx isn't ArbitrumSubmitRetryableTx") @@ -753,13 +751,13 @@ func TestRetryableSubmissionAndRedeemFees(t *testing.T) { retryableSubmissionFee, ) - retryTxOuter, _, err := l2client.TransactionByHash(ctx, retryTxId) + retryTxOuter, _, err := builder.L2.Client.TransactionByHash(ctx, retryTxId) Require(t, err) retryTx, ok := retryTxOuter.GetInner().(*types.ArbitrumRetryTx) if !ok { Fatal(t, "inner tx isn't ArbitrumRetryTx") } - redeemBaseFee := GetBaseFeeAt(t, l2client, ctx, redeemReceipt.BlockNumber) + redeemBaseFee := builder.L2.GetBaseFeeAt(t, redeemReceipt.BlockNumber) t.Log("redeem base fee:", redeemBaseFee) // redeem & retry expected fees @@ -795,59 +793,59 @@ func TestRetryableSubmissionAndRedeemFees(t *testing.T) { } // elevateL2Basefee by burning gas exceeding speed limit -func elevateL2Basefee(t *testing.T, ctx context.Context, l2client *ethclient.Client, l2info *BlockchainTestInfo) { - baseFeeBefore := GetBaseFee(t, l2client, ctx) +func elevateL2Basefee(t *testing.T, ctx context.Context, builder *NodeBuilder) { + baseFeeBefore := builder.L2.GetBaseFee(t) colors.PrintBlue("Elevating base fee...") arbostestabi, err := precompilesgen.ArbosTestMetaData.GetAbi() Require(t, err) - _, err = precompilesgen.NewArbosTest(common.HexToAddress("0x69"), l2client) + _, err = precompilesgen.NewArbosTest(common.HexToAddress("0x69"), builder.L2.Client) Require(t, err, "failed to deploy ArbosTest") burnAmount := gethexec.ConfigDefaultTest().RPC.RPCGasCap burnTarget := uint64(5 * l2pricing.InitialSpeedLimitPerSecondV6 * l2pricing.InitialBacklogTolerance) for i := uint64(0); i < (burnTarget+burnAmount)/burnAmount; i++ { burnArbGas := arbostestabi.Methods["burnArbGas"] - data, err := burnArbGas.Inputs.Pack(arbmath.UintToBig(burnAmount - l2info.TransferGas)) + data, err := burnArbGas.Inputs.Pack(arbmath.UintToBig(burnAmount - builder.L2Info.TransferGas)) Require(t, err) input := append([]byte{}, burnArbGas.ID...) input = append(input, data...) to := common.HexToAddress("0x69") - tx := l2info.PrepareTxTo("Faucet", &to, burnAmount, big.NewInt(0), input) - Require(t, l2client.SendTransaction(ctx, tx)) - _, err = EnsureTxSucceeded(ctx, l2client, tx) + tx := builder.L2Info.PrepareTxTo("Faucet", &to, burnAmount, big.NewInt(0), input) + Require(t, builder.L2.Client.SendTransaction(ctx, tx)) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) } - baseFee := GetBaseFee(t, l2client, ctx) + baseFee := builder.L2.GetBaseFee(t) colors.PrintBlue("New base fee: ", baseFee, " diff:", baseFee.Uint64()-baseFeeBefore.Uint64()) } -func setupFeeAddresses(t *testing.T, ctx context.Context, l2client *ethclient.Client, l2info *BlockchainTestInfo) (common.Address, common.Address) { - ownerTxOpts := l2info.GetDefaultTransactOpts("Owner", ctx) - ownerCallOpts := l2info.GetDefaultCallOpts("Owner", ctx) +func setupFeeAddresses(t *testing.T, ctx context.Context, builder *NodeBuilder) (common.Address, common.Address) { + ownerTxOpts := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) + ownerCallOpts := builder.L2Info.GetDefaultCallOpts("Owner", ctx) // make "Owner" a chain owner - arbdebug, err := precompilesgen.NewArbDebug(common.HexToAddress("0xff"), l2client) + arbdebug, err := precompilesgen.NewArbDebug(common.HexToAddress("0xff"), builder.L2.Client) Require(t, err, "failed to deploy ArbDebug") tx, err := arbdebug.BecomeChainOwner(&ownerTxOpts) Require(t, err, "failed to deploy ArbDebug") - _, err = EnsureTxSucceeded(ctx, l2client, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) - arbowner, err := precompilesgen.NewArbOwner(common.HexToAddress("70"), l2client) + arbowner, err := precompilesgen.NewArbOwner(common.HexToAddress("70"), builder.L2.Client) Require(t, err) - arbownerPublic, err := precompilesgen.NewArbOwnerPublic(common.HexToAddress("6b"), l2client) + arbownerPublic, err := precompilesgen.NewArbOwnerPublic(common.HexToAddress("6b"), builder.L2.Client) Require(t, err) - l2info.GenerateAccount("InfraFee") - l2info.GenerateAccount("NetworkFee") - networkFeeAddr := l2info.GetAddress("NetworkFee") - infraFeeAddr := l2info.GetAddress("InfraFee") + builder.L2Info.GenerateAccount("InfraFee") + builder.L2Info.GenerateAccount("NetworkFee") + networkFeeAddr := builder.L2Info.GetAddress("NetworkFee") + infraFeeAddr := builder.L2Info.GetAddress("InfraFee") tx, err = arbowner.SetNetworkFeeAccount(&ownerTxOpts, networkFeeAddr) Require(t, err) - _, err = EnsureTxSucceeded(ctx, l2client, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) networkFeeAccount, err := arbownerPublic.GetNetworkFeeAccount(ownerCallOpts) Require(t, err) tx, err = arbowner.SetInfraFeeAccount(&ownerTxOpts, infraFeeAddr) Require(t, err) - _, err = EnsureTxSucceeded(ctx, l2client, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) infraFeeAccount, err := arbownerPublic.GetInfraFeeAccount(ownerCallOpts) Require(t, err) diff --git a/system_tests/seq_coordinator_test.go b/system_tests/seq_coordinator_test.go index ac3167a604..886a0528c7 100644 --- a/system_tests/seq_coordinator_test.go +++ b/system_tests/seq_coordinator_test.go @@ -56,7 +56,7 @@ func TestRedisSeqCoordinatorPriorities(t *testing.T) { // stdio protocol makes sure forwarder initialization doesn't fail nodeNames := []string{"stdio://A", "stdio://B", "stdio://C", "stdio://D", "stdio://E"} - nodes := make([]*arbnode.Node, len(nodeNames)) + testNodes := make([]*TestClient, len(nodeNames)) // init DB to known state initRedisForTest(t, ctx, builder.nodeConfig.SeqCoordinator.RedisUrl, nodeNames) @@ -65,11 +65,11 @@ func TestRedisSeqCoordinatorPriorities(t *testing.T) { builder.nodeConfig.SeqCoordinator.MyUrl = nodeNames[nodeNum] builder.L2Info = l2Info builder.Build(t) - nodes[nodeNum] = builder.L2.ConsensusNode + testNodes[nodeNum] = builder.L2 } trySequencing := func(nodeNum int) bool { - node := nodes[nodeNum] + node := testNodes[nodeNum].ConsensusNode curMsgs, err := node.TxStreamer.GetMessageCountSync(t) Require(t, err) emptyMessage := arbostypes.MessageWithMetadata{ @@ -98,14 +98,15 @@ func TestRedisSeqCoordinatorPriorities(t *testing.T) { // node(n) has higher prio than node(n+1), so should be impossible for more than one to succeed trySequencingEverywhere := func() int { succeeded := -1 - for nodeNum, node := range nodes { + for nodeNum, testNode := range testNodes { + node := testNode.ConsensusNode if node == nil { continue } if trySequencing(nodeNum) { if succeeded >= 0 { t.Fatal("sequnced succeeded in parallel", - "index1:", succeeded, "debug", nodes[succeeded].SeqCoordinator.DebugPrint(), + "index1:", succeeded, "debug", testNodes[succeeded].ConsensusNode.SeqCoordinator.DebugPrint(), "index2:", nodeNum, "debug", node.SeqCoordinator.DebugPrint(), "now", time.Now().UnixMilli()) } @@ -116,7 +117,8 @@ func TestRedisSeqCoordinatorPriorities(t *testing.T) { } waitForMsgEverywhere := func(msgNum arbutil.MessageIndex) { - for _, currentNode := range nodes { + for _, testNode := range testNodes { + currentNode := testNode.ConsensusNode if currentNode == nil { continue } @@ -137,16 +139,16 @@ func TestRedisSeqCoordinatorPriorities(t *testing.T) { var needsStop []*arbnode.Node killNode := func(nodeNum int) { if nodeNum%3 == 0 { - nodes[nodeNum].SeqCoordinator.PrepareForShutdown() - needsStop = append(needsStop, nodes[nodeNum]) + testNodes[nodeNum].ConsensusNode.SeqCoordinator.PrepareForShutdown() + needsStop = append(needsStop, testNodes[nodeNum].ConsensusNode) } else { - nodes[nodeNum].StopAndWait() + testNodes[nodeNum].ConsensusNode.StopAndWait() } - nodes[nodeNum] = nil + testNodes[nodeNum].ConsensusNode = nil } nodeForwardTarget := func(nodeNum int) int { - execNode := getExecNode(t, nodes[nodeNum]) + execNode := testNodes[nodeNum].ExecNode fwTarget := execNode.TxPublisher.(*gethexec.TxPreChecker).TransactionPublisher.(*gethexec.Sequencer).ForwardTarget() if fwTarget == "" { return -1 @@ -178,7 +180,7 @@ func TestRedisSeqCoordinatorPriorities(t *testing.T) { t.Log("Starting other nodes") - for i := 1; i < len(nodes); i++ { + for i := 1; i < len(testNodes); i++ { createStartNode(i) } @@ -189,7 +191,7 @@ func TestRedisSeqCoordinatorPriorities(t *testing.T) { for { // all remaining nodes know which is the chosen one - for i := currentSequencer + 1; i < len(nodes); i++ { + for i := currentSequencer + 1; i < len(testNodes); i++ { for attempts := 1; nodeForwardTarget(i) != currentSequencer; attempts++ { if attempts > 10 { t.Fatal("initial forward target not set") @@ -206,7 +208,7 @@ func TestRedisSeqCoordinatorPriorities(t *testing.T) { sequencedMesssages++ } - if currentSequencer == len(nodes)-1 { + if currentSequencer == len(testNodes)-1 { addNodes = true } if addNodes { @@ -258,7 +260,7 @@ func TestRedisSeqCoordinatorPriorities(t *testing.T) { waitForMsgEverywhere(sequencedMesssages) } - for nodeNum := range nodes { + for nodeNum := range testNodes { killNode(nodeNum) } for _, node := range needsStop { @@ -282,7 +284,6 @@ func testCoordinatorMessageSync(t *testing.T, successCase bool) { cleanup := builder.Build(t) defer cleanup() - clientA := builder.L2.Client redisClient, err := redisutil.RedisClientFromURL(builder.nodeConfig.SeqCoordinator.RedisUrl) Require(t, err) @@ -312,26 +313,25 @@ func testCoordinatorMessageSync(t *testing.T, successCase bool) { testClientB, cleanupB := builder.Build2ndNode(t, &SecondNodeParams{nodeConfig: builder.nodeConfig}) defer cleanupB() - clientB := testClientB.Client tx := builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, big.NewInt(1e12), nil) - err = clientA.SendTransaction(ctx, tx) + err = builder.L2.Client.SendTransaction(ctx, tx) Require(t, err) - _, err = EnsureTxSucceeded(ctx, clientA, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) if successCase { - _, err = WaitForTx(ctx, clientB, tx.Hash(), time.Second*5) + _, err = WaitForTx(ctx, testClientB.Client, tx.Hash(), time.Second*5) Require(t, err) - l2balance, err := clientB.BalanceAt(ctx, builder.L2Info.GetAddress("User2"), nil) + l2balance, err := testClientB.Client.BalanceAt(ctx, builder.L2Info.GetAddress("User2"), nil) Require(t, err) if l2balance.Cmp(big.NewInt(1e12)) != 0 { t.Fatal("Unexpected balance:", l2balance) } } else { - _, err = WaitForTx(ctx, clientB, tx.Hash(), time.Second) + _, err = WaitForTx(ctx, testClientB.Client, tx.Hash(), time.Second) if err == nil { Fatal(t, "tx received by node with different seq coordinator signing key") } diff --git a/system_tests/seq_nonce_test.go b/system_tests/seq_nonce_test.go index d70f47a146..f0e3dcffd7 100644 --- a/system_tests/seq_nonce_test.go +++ b/system_tests/seq_nonce_test.go @@ -15,7 +15,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" - "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/util/arbmath" ) @@ -24,12 +23,13 @@ func TestSequencerParallelNonces(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - config := gethexec.ConfigDefaultTest() - config.Sequencer.NonceFailureCacheExpiry = time.Minute - l2info, node, client := CreateTestL2WithConfig(t, ctx, nil, nil, config, false) - defer node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + builder.takeOwnership = false + builder.execConfig.Sequencer.NonceFailureCacheExpiry = time.Minute + cleanup := builder.Build(t) + defer cleanup() - l2info.GenerateAccount("Destination") + builder.L2Info.GenerateAccount("Destination") wg := sync.WaitGroup{} for thread := 0; thread < 10; thread++ { @@ -37,11 +37,11 @@ func TestSequencerParallelNonces(t *testing.T) { go func() { defer wg.Done() for i := 0; i < 10; i++ { - tx := l2info.PrepareTx("Owner", "Destination", l2info.TransferGas, common.Big1, nil) + tx := builder.L2Info.PrepareTx("Owner", "Destination", builder.L2Info.TransferGas, common.Big1, nil) // Sleep a random amount of time up to 20 milliseconds time.Sleep(time.Millisecond * time.Duration(rand.Intn(20))) t.Log("Submitting transaction with nonce", tx.Nonce()) - err := client.SendTransaction(ctx, tx) + err := builder.L2.Client.SendTransaction(ctx, tx) Require(t, err) t.Log("Got response for transaction with nonce", tx.Nonce()) } @@ -49,8 +49,8 @@ func TestSequencerParallelNonces(t *testing.T) { } wg.Wait() - addr := l2info.GetAddress("Destination") - balance, err := client.BalanceAt(ctx, addr, nil) + addr := builder.L2Info.GetAddress("Destination") + balance, err := builder.L2.Client.BalanceAt(ctx, addr, nil) Require(t, err) if !arbmath.BigEquals(balance, big.NewInt(100)) { Fatal(t, "Unexpected user balance", balance) @@ -62,15 +62,16 @@ func TestSequencerNonceTooHigh(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - config := gethexec.ConfigDefaultTest() - l2info, node, client := CreateTestL2WithConfig(t, ctx, nil, nil, config, false) - defer node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + builder.takeOwnership = false + cleanup := builder.Build(t) + defer cleanup() - l2info.GetInfoWithPrivKey("Owner").Nonce++ + builder.L2Info.GetInfoWithPrivKey("Owner").Nonce++ before := time.Now() - tx := l2info.PrepareTx("Owner", "Owner", l2info.TransferGas, common.Big0, nil) - err := client.SendTransaction(ctx, tx) + tx := builder.L2Info.PrepareTx("Owner", "Owner", builder.L2Info.TransferGas, common.Big0, nil) + err := builder.L2.Client.SendTransaction(ctx, tx) if err == nil { Fatal(t, "No error when nonce was too high") } @@ -78,7 +79,7 @@ func TestSequencerNonceTooHigh(t *testing.T) { Fatal(t, "Unexpected transaction error", err) } elapsed := time.Since(before) - if elapsed > 2*config.Sequencer.NonceFailureCacheExpiry { + if elapsed > 2*builder.execConfig.Sequencer.NonceFailureCacheExpiry { Fatal(t, "Sequencer took too long to respond with nonce too high") } } @@ -88,19 +89,20 @@ func TestSequencerNonceTooHighQueueFull(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - config := gethexec.ConfigDefaultTest() - config.Sequencer.NonceFailureCacheSize = 5 - config.Sequencer.NonceFailureCacheExpiry = time.Minute - l2info, node, client := CreateTestL2WithConfig(t, ctx, nil, nil, config, false) - defer node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + builder.takeOwnership = false + builder.execConfig.Sequencer.NonceFailureCacheSize = 5 + builder.execConfig.Sequencer.NonceFailureCacheExpiry = time.Minute + cleanup := builder.Build(t) + defer cleanup() count := 15 var completed uint64 for i := 0; i < count; i++ { - l2info.GetInfoWithPrivKey("Owner").Nonce++ - tx := l2info.PrepareTx("Owner", "Owner", l2info.TransferGas, common.Big0, nil) + builder.L2Info.GetInfoWithPrivKey("Owner").Nonce++ + tx := builder.L2Info.PrepareTx("Owner", "Owner", builder.L2Info.TransferGas, common.Big0, nil) go func() { - err := client.SendTransaction(ctx, tx) + err := builder.L2.Client.SendTransaction(ctx, tx) if err == nil { Fatal(t, "No error when nonce was too high") } @@ -110,7 +112,7 @@ func TestSequencerNonceTooHighQueueFull(t *testing.T) { for wait := 9; wait >= 0; wait-- { got := int(atomic.LoadUint64(&completed)) - expected := count - config.Sequencer.NonceFailureCacheSize + expected := count - builder.execConfig.Sequencer.NonceFailureCacheSize if got == expected { break } diff --git a/system_tests/seq_pause_test.go b/system_tests/seq_pause_test.go index 3817768517..6ce464d8da 100644 --- a/system_tests/seq_pause_test.go +++ b/system_tests/seq_pause_test.go @@ -16,13 +16,13 @@ func TestSequencerPause(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - l2info1, nodeA, client := CreateTestL2(t, ctx) - defer nodeA.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + cleanup := builder.Build(t) + defer cleanup() const numUsers = 100 - execA := getExecNode(t, nodeA) - prechecker, ok := execA.TxPublisher.(*gethexec.TxPreChecker) + prechecker, ok := builder.L2.ExecNode.TxPublisher.(*gethexec.TxPreChecker) if !ok { t.Error("prechecker not found on node") } @@ -35,15 +35,15 @@ func TestSequencerPause(t *testing.T) { for num := 0; num < numUsers; num++ { userName := fmt.Sprintf("My_User_%d", num) - l2info1.GenerateAccount(userName) + builder.L2Info.GenerateAccount(userName) users = append(users, userName) } for _, userName := range users { - tx := l2info1.PrepareTx("Owner", userName, l2info1.TransferGas, big.NewInt(1e16), nil) - err := client.SendTransaction(ctx, tx) + tx := builder.L2Info.PrepareTx("Owner", userName, builder.L2Info.TransferGas, big.NewInt(1e16), nil) + err := builder.L2.Client.SendTransaction(ctx, tx) Require(t, err) - _, err = EnsureTxSucceeded(ctx, client, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) } @@ -52,7 +52,7 @@ func TestSequencerPause(t *testing.T) { var txs types.Transactions for _, userName := range users { - tx := l2info1.PrepareTx(userName, "Owner", l2info1.TransferGas, big.NewInt(2), nil) + tx := builder.L2Info.PrepareTx(userName, "Owner", builder.L2Info.TransferGas, big.NewInt(2), nil) txs = append(txs, tx) } @@ -63,7 +63,7 @@ func TestSequencerPause(t *testing.T) { }(tx) } - _, err := EnsureTxSucceededWithTimeout(ctx, client, txs[0], time.Second) + _, err := builder.L2.EnsureTxSucceededWithTimeout(txs[0], time.Second) if err == nil { t.Error("tx passed while sequencer paused") } @@ -71,7 +71,7 @@ func TestSequencerPause(t *testing.T) { sequencer.Activate() for _, tx := range txs { - _, err := EnsureTxSucceeded(ctx, client, tx) + _, err := builder.L2.EnsureTxSucceeded(tx) Require(t, err) } } diff --git a/system_tests/seq_reject_test.go b/system_tests/seq_reject_test.go index 34a14c660e..76bdfc2612 100644 --- a/system_tests/seq_reject_test.go +++ b/system_tests/seq_reject_test.go @@ -17,7 +17,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/params" - "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/solgen/go/mocksgen" "github.com/offchainlabs/nitro/util/arbmath" "github.com/offchainlabs/nitro/util/colors" @@ -28,21 +27,21 @@ func TestSequencerRejection(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - seqNodeConfig := arbnode.ConfigDefaultL2Test() - seqNodeConfig.Feed.Output = *newBroadcasterConfigTest() feedErrChan := make(chan error, 10) - l2info1, nodeA, client1 := CreateTestL2WithConfig(t, ctx, nil, seqNodeConfig, nil, true) - defer nodeA.StopAndWait() - - clientNodeConfig := arbnode.ConfigDefaultL2Test() - port := nodeA.BroadcastServer.ListenerAddr().(*net.TCPAddr).Port - clientNodeConfig.Feed.Input = *newBroadcastClientConfigTest(port) - - _, nodeB, client2 := CreateTestL2WithConfig(t, ctx, nil, clientNodeConfig, nil, false) - defer nodeB.StopAndWait() - - auth := l2info1.GetDefaultTransactOpts("Owner", ctx) - simpleAddr, _ := deploySimple(t, ctx, auth, client1) + builderSeq := NewNodeBuilder(ctx).DefaultConfig(t, false) + builderSeq.nodeConfig.Feed.Output = *newBroadcasterConfigTest() + cleanupSeq := builderSeq.Build(t) + defer cleanupSeq() + + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + builder.takeOwnership = false + port := builderSeq.L2.ConsensusNode.BroadcastServer.ListenerAddr().(*net.TCPAddr).Port + builder.nodeConfig.Feed.Input = *newBroadcastClientConfigTest(port) + cleanup := builder.Build(t) + defer cleanup() + + auth := builderSeq.L2Info.GetDefaultTransactOpts("Owner", ctx) + simpleAddr, _ := builderSeq.L2.DeploySimple(t, auth) simpleAbi, err := mocksgen.SimpleMetaData.GetAbi() Require(t, err) noopId := simpleAbi.Methods["noop"].ID @@ -51,7 +50,7 @@ func TestSequencerRejection(t *testing.T) { // Generate the accounts before hand to avoid races for user := 0; user < 9; user++ { name := fmt.Sprintf("User%v", user) - l2info1.GenerateAccount(name) + builderSeq.L2Info.GenerateAccount(name) } wg := sync.WaitGroup{} @@ -59,24 +58,24 @@ func TestSequencerRejection(t *testing.T) { for user := 0; user < 9; user++ { user := user name := fmt.Sprintf("User%v", user) - tx := l2info1.PrepareTx("Owner", name, l2info1.TransferGas, big.NewInt(params.Ether), nil) + tx := builderSeq.L2Info.PrepareTx("Owner", name, builderSeq.L2Info.TransferGas, big.NewInt(params.Ether), nil) - err := client1.SendTransaction(ctx, tx) + err := builderSeq.L2.Client.SendTransaction(ctx, tx) Require(t, err) - _, err = EnsureTxSucceeded(ctx, client1, tx) + _, err = builderSeq.L2.EnsureTxSucceeded(tx) Require(t, err) - _, err = EnsureTxSucceeded(ctx, client2, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) wg.Add(1) go func() { defer wg.Done() - info := l2info1.GetInfoWithPrivKey(name) + info := builderSeq.L2Info.GetInfoWithPrivKey(name) txData := &types.DynamicFeeTx{ To: &simpleAddr, - Gas: l2info1.TransferGas + 10000, - GasFeeCap: arbmath.BigMulByUint(l2info1.GasPrice, 100), + Gas: builderSeq.L2Info.TransferGas + 10000, + GasFeeCap: arbmath.BigMulByUint(builderSeq.L2Info.GasPrice, 100), Value: common.Big0, } for atomic.LoadInt32(&stopBackground) == 0 { @@ -92,8 +91,8 @@ func TestSequencerRejection(t *testing.T) { txData.Nonce = 1 << 32 expectedErr = "nonce too high" } - tx = l2info1.SignTxAs(name, txData) - err = client1.SendTransaction(ctx, tx) + tx = builderSeq.L2Info.SignTxAs(name, txData) + err = builderSeq.L2.Client.SendTransaction(ctx, tx) if err != nil && (expectedErr == "" || !strings.Contains(err.Error(), expectedErr)) { Require(t, err, "failed to send tx for user", user) } @@ -102,7 +101,7 @@ func TestSequencerRejection(t *testing.T) { } for i := 100; i >= 0; i-- { - block, err := client1.BlockNumber(ctx) + block, err := builderSeq.L2.Client.BlockNumber(ctx) Require(t, err) if block >= 200 { break @@ -120,11 +119,11 @@ func TestSequencerRejection(t *testing.T) { atomic.StoreInt32(&stopBackground, 1) wg.Wait() - header1, err := client1.HeaderByNumber(ctx, nil) + header1, err := builderSeq.L2.Client.HeaderByNumber(ctx, nil) Require(t, err) for i := 100; i >= 0; i-- { - header2, err := client2.HeaderByNumber(ctx, header1.Number) + header2, err := builder.L2.Client.HeaderByNumber(ctx, header1.Number) if err != nil { select { case err := <-feedErrChan: @@ -132,7 +131,7 @@ func TestSequencerRejection(t *testing.T) { case <-time.After(time.Millisecond * 100): } if i == 0 { - client2Block, _ := client2.BlockNumber(ctx) + client2Block, _ := builder.L2.Client.BlockNumber(ctx) Fatal(t, "client2 failed to reach client1 block ", header1.Number, ", only reached block", client2Block) } continue diff --git a/system_tests/seq_whitelist_test.go b/system_tests/seq_whitelist_test.go index 36e309a5d7..efa30171ac 100644 --- a/system_tests/seq_whitelist_test.go +++ b/system_tests/seq_whitelist_test.go @@ -9,31 +9,30 @@ import ( "testing" "github.com/ethereum/go-ethereum/params" - "github.com/offchainlabs/nitro/execution/gethexec" ) func TestSequencerWhitelist(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - config := gethexec.ConfigDefaultTest() - config.Sequencer.SenderWhitelist = GetTestAddressForAccountName(t, "Owner").String() + "," + GetTestAddressForAccountName(t, "User").String() - l2info, l2node, client := CreateTestL2WithConfig(t, ctx, nil, nil, config, true) - defer l2node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + builder.execConfig.Sequencer.SenderWhitelist = GetTestAddressForAccountName(t, "Owner").String() + "," + GetTestAddressForAccountName(t, "User").String() + cleanup := builder.Build(t) + defer cleanup() - l2info.GenerateAccount("User") - l2info.GenerateAccount("User2") + builder.L2Info.GenerateAccount("User") + builder.L2Info.GenerateAccount("User2") // Owner is on the whitelist - TransferBalance(t, "Owner", "User", big.NewInt(params.Ether), l2info, client, ctx) - TransferBalance(t, "Owner", "User2", big.NewInt(params.Ether), l2info, client, ctx) + builder.L2.TransferBalance(t, "Owner", "User", big.NewInt(params.Ether), builder.L2Info) + builder.L2.TransferBalance(t, "Owner", "User2", big.NewInt(params.Ether), builder.L2Info) // User is on the whitelist - TransferBalance(t, "User", "User2", big.NewInt(params.Ether/10), l2info, client, ctx) + builder.L2.TransferBalance(t, "User", "User2", big.NewInt(params.Ether/10), builder.L2Info) // User2 is *not* on the whitelist, therefore this should fail - tx := l2info.PrepareTx("User2", "User", l2info.TransferGas, big.NewInt(params.Ether/10), nil) - err := client.SendTransaction(ctx, tx) + tx := builder.L2Info.PrepareTx("User2", "User", builder.L2Info.TransferGas, big.NewInt(params.Ether/10), nil) + err := builder.L2.Client.SendTransaction(ctx, tx) if err == nil { Fatal(t, "transaction from user not on whitelist accepted") } diff --git a/system_tests/seqcompensation_test.go b/system_tests/seqcompensation_test.go index 362acf6a30..156ced6bfc 100644 --- a/system_tests/seqcompensation_test.go +++ b/system_tests/seqcompensation_test.go @@ -18,19 +18,19 @@ func TestSequencerCompensation(t *testing.T) { t.Parallel() ctx, cancel := context.WithCancel(context.Background()) defer cancel() - l2info, nodeA, l2clientA, l1info, _, l1client, l1stack := createTestNodeOnL1(t, ctx, true) - defer requireClose(t, l1stack) - defer nodeA.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + cleanup := builder.Build(t) + defer cleanup() - l2clientB, nodeB := Create2ndNode(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, nil) - defer nodeB.StopAndWait() + TestClientB, cleanupB := builder.Build2ndNode(t, &SecondNodeParams{}) + defer cleanupB() - l2info.GenerateAccount("User2") + builder.L2Info.GenerateAccount("User2") - tx := l2info.PrepareTx("Owner", "User2", l2info.TransferGas, big.NewInt(1e12), nil) - err := l2clientA.SendTransaction(ctx, tx) + tx := builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, big.NewInt(1e12), nil) + err := builder.L2.Client.SendTransaction(ctx, tx) Require(t, err) - _, err = EnsureTxSucceeded(ctx, l2clientA, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) // give the inbox reader a bit of time to pick up the delayed message @@ -38,22 +38,22 @@ func TestSequencerCompensation(t *testing.T) { // sending l1 messages creates l1 blocks.. make enough to get that delayed inbox message in for i := 0; i < 30; i++ { - SendWaitTestTransactions(t, ctx, l1client, []*types.Transaction{ - l1info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil), + builder.L1.SendWaitTestTransactions(t, []*types.Transaction{ + builder.L1Info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil), }) } - _, err = WaitForTx(ctx, l2clientB, tx.Hash(), time.Second*5) + _, err = WaitForTx(ctx, TestClientB.Client, tx.Hash(), time.Second*5) Require(t, err) // clientB sees balance means sequencer message was sent - l2balance, err := l2clientB.BalanceAt(ctx, l2info.GetAddress("User2"), nil) + l2balance, err := TestClientB.Client.BalanceAt(ctx, builder.L2Info.GetAddress("User2"), nil) Require(t, err) if l2balance.Cmp(big.NewInt(1e12)) != 0 { Fatal(t, "Unexpected balance:", l2balance) } - initialSeqBalance, err := l2clientB.BalanceAt(ctx, l1pricing.BatchPosterAddress, big.NewInt(0)) + initialSeqBalance, err := TestClientB.Client.BalanceAt(ctx, l1pricing.BatchPosterAddress, big.NewInt(0)) Require(t, err) if initialSeqBalance.Sign() != 0 { Fatal(t, "Unexpected initial sequencer balance:", initialSeqBalance) diff --git a/system_tests/seqinbox_test.go b/system_tests/seqinbox_test.go index c90617455a..69aeab0c83 100644 --- a/system_tests/seqinbox_test.go +++ b/system_tests/seqinbox_test.go @@ -18,7 +18,6 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethclient/gethclient" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" @@ -91,44 +90,44 @@ func diffAccessList(accessed, al types.AccessList) string { return diff } -func deployGasRefunder(ctx context.Context, t *testing.T, info *BlockchainTestInfo, client *ethclient.Client) common.Address { +func deployGasRefunder(ctx context.Context, t *testing.T, builder *NodeBuilder) common.Address { t.Helper() abi, err := bridgegen.GasRefunderMetaData.GetAbi() if err != nil { t.Fatalf("Error getting gas refunder abi: %v", err) } - fauOpts := info.GetDefaultTransactOpts("Faucet", ctx) - addr, tx, _, err := bind.DeployContract(&fauOpts, *abi, common.FromHex(bridgegen.GasRefunderBin), client) + fauOpts := builder.L1Info.GetDefaultTransactOpts("Faucet", ctx) + addr, tx, _, err := bind.DeployContract(&fauOpts, *abi, common.FromHex(bridgegen.GasRefunderBin), builder.L1.Client) if err != nil { t.Fatalf("Error getting gas refunder contract deployment transaction: %v", err) } - if _, err := EnsureTxSucceeded(ctx, client, tx); err != nil { + if _, err := builder.L1.EnsureTxSucceeded(tx); err != nil { t.Fatalf("Error deploying gas refunder contract: %v", err) } - tx = info.PrepareTxTo("Faucet", &addr, 30000, big.NewInt(9223372036854775807), nil) - if err := client.SendTransaction(ctx, tx); err != nil { + tx = builder.L1Info.PrepareTxTo("Faucet", &addr, 30000, big.NewInt(9223372036854775807), nil) + if err := builder.L1.Client.SendTransaction(ctx, tx); err != nil { t.Fatalf("Error sending gas refunder funding transaction") } - if _, err := EnsureTxSucceeded(ctx, client, tx); err != nil { + if _, err := builder.L1.EnsureTxSucceeded(tx); err != nil { t.Fatalf("Error funding gas refunder") } - contract, err := bridgegen.NewGasRefunder(addr, client) + contract, err := bridgegen.NewGasRefunder(addr, builder.L1.Client) if err != nil { t.Fatalf("Error getting gas refunder contract binding: %v", err) } - tx, err = contract.AllowContracts(&fauOpts, []common.Address{info.GetAddress("SequencerInbox")}) + tx, err = contract.AllowContracts(&fauOpts, []common.Address{builder.L1Info.GetAddress("SequencerInbox")}) if err != nil { t.Fatalf("Error creating transaction for altering allowlist in refunder: %v", err) } - if _, err := EnsureTxSucceeded(ctx, client, tx); err != nil { + if _, err := builder.L1.EnsureTxSucceeded(tx); err != nil { t.Fatalf("Error addting sequencer inbox in gas refunder allowlist: %v", err) } - tx, err = contract.AllowRefundees(&fauOpts, []common.Address{info.GetAddress("Sequencer")}) + tx, err = contract.AllowRefundees(&fauOpts, []common.Address{builder.L1Info.GetAddress("Sequencer")}) if err != nil { t.Fatalf("Error creating transaction for altering allowlist in refunder: %v", err) } - if _, err := EnsureTxSucceeded(ctx, client, tx); err != nil { + if _, err := builder.L1.EnsureTxSucceeded(tx); err != nil { t.Fatalf("Error addting sequencer in gas refunder allowlist: %v", err) } return addr @@ -162,7 +161,7 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { Require(t, err) seqOpts := builder.L1Info.GetDefaultTransactOpts("Sequencer", ctx) - gasRefunderAddr := deployGasRefunder(ctx, t, builder.L1Info, builder.L1.Client) + gasRefunderAddr := deployGasRefunder(ctx, t, builder) ownerAddress := builder.L2Info.GetAddress("Owner") var startL2BlockNumber uint64 = 0 @@ -200,7 +199,7 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { builder.L1Info.GenerateAccount(acct) faucetTxs = append(faucetTxs, builder.L1Info.PrepareTx("Faucet", acct, 30000, big.NewInt(1e16), nil)) } - SendWaitTestTransactions(t, ctx, builder.L1.Client, faucetTxs) + builder.L1.SendWaitTestTransactions(t, faucetTxs) seqABI, err := bridgegen.SequencerInboxMetaData.GetAbi() if err != nil { diff --git a/system_tests/staker_test.go b/system_tests/staker_test.go index 0239491422..e9eb884c95 100644 --- a/system_tests/staker_test.go +++ b/system_tests/staker_test.go @@ -27,7 +27,6 @@ import ( "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbnode/dataposter/storage" "github.com/offchainlabs/nitro/arbos/l2pricing" - "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/solgen/go/mocksgen" "github.com/offchainlabs/nitro/solgen/go/rollupgen" "github.com/offchainlabs/nitro/staker" @@ -38,15 +37,15 @@ import ( "github.com/offchainlabs/nitro/validator/valnode" ) -func makeBackgroundTxs(ctx context.Context, l2info *BlockchainTestInfo, l2clientA arbutil.L1Interface) error { +func makeBackgroundTxs(ctx context.Context, builder *NodeBuilder) error { for i := uint64(0); ctx.Err() == nil; i++ { - l2info.Accounts["BackgroundUser"].Nonce = i - tx := l2info.PrepareTx("BackgroundUser", "BackgroundUser", l2info.TransferGas, common.Big0, nil) - err := l2clientA.SendTransaction(ctx, tx) + builder.L2Info.Accounts["BackgroundUser"].Nonce = i + tx := builder.L2Info.PrepareTx("BackgroundUser", "BackgroundUser", builder.L2Info.TransferGas, common.Big0, nil) + err := builder.L2.Client.SendTransaction(ctx, tx) if err != nil { return err } - _, err = EnsureTxSucceeded(ctx, l2clientA, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) if err != nil { return err } @@ -263,7 +262,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) })() go (func() { defer close(backgroundTxsShutdownChan) - err := makeBackgroundTxs(backgroundTxsCtx, builder.L2Info, builder.L2.Client) + err := makeBackgroundTxs(backgroundTxsCtx, builder) if !errors.Is(err, context.Canceled) { log.Warn("error making background txs", "err", err) } diff --git a/system_tests/transfer_test.go b/system_tests/transfer_test.go index 2e3317907b..a270cca76b 100644 --- a/system_tests/transfer_test.go +++ b/system_tests/transfer_test.go @@ -13,23 +13,24 @@ import ( func TestTransfer(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - l2info, l2node, client := CreateTestL2(t, ctx) - defer l2node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + cleanup := builder.Build(t) + defer cleanup() - l2info.GenerateAccount("User2") + builder.L2Info.GenerateAccount("User2") - tx := l2info.PrepareTx("Owner", "User2", l2info.TransferGas, big.NewInt(1e12), nil) + tx := builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, big.NewInt(1e12), nil) - err := client.SendTransaction(ctx, tx) + err := builder.L2.Client.SendTransaction(ctx, tx) Require(t, err) - _, err = EnsureTxSucceeded(ctx, client, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) - bal, err := client.BalanceAt(ctx, l2info.GetAddress("Owner"), nil) + bal, err := builder.L2.Client.BalanceAt(ctx, builder.L2Info.GetAddress("Owner"), nil) Require(t, err) fmt.Println("Owner balance is: ", bal) - bal2, err := client.BalanceAt(ctx, l2info.GetAddress("User2"), nil) + bal2, err := builder.L2.Client.BalanceAt(ctx, builder.L2Info.GetAddress("User2"), nil) Require(t, err) if bal2.Cmp(big.NewInt(1e12)) != 0 { Fatal(t, "Unexpected recipient balance: ", bal2) diff --git a/system_tests/triedb_race_test.go b/system_tests/triedb_race_test.go index 8174a9b6a2..6d9415df83 100644 --- a/system_tests/triedb_race_test.go +++ b/system_tests/triedb_race_test.go @@ -10,7 +10,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rpc" - "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/util/testhelpers" ) @@ -18,24 +17,21 @@ func TestTrieDBCommitRace(t *testing.T) { _ = testhelpers.InitTestLog(t, log.LvlError) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - execConfig := gethexec.ConfigDefaultTest() - execConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth - execConfig.Sequencer.MaxBlockSpeed = 0 - execConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 - execConfig.Caching.Archive = true - execConfig.Caching.BlockCount = 127 - execConfig.Caching.BlockAge = 0 - execConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 127 - execConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 - l2info, node, l2client, _, _, _, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, nil, execConfig, nil, nil) - cancel = func() { - defer requireClose(t, l1stack) - defer node.StopAndWait() - } - defer cancel() - execNode := getExecNode(t, node) - l2info.GenerateAccount("User2") - bc := execNode.Backend.ArbInterface().BlockChain() + + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.execConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth + builder.execConfig.Sequencer.MaxBlockSpeed = 0 + builder.execConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 + builder.execConfig.Caching.Archive = true + builder.execConfig.Caching.BlockCount = 127 + builder.execConfig.Caching.BlockAge = 0 + builder.execConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 127 + builder.execConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 + cleanup := builder.Build(t) + defer cleanup() + + builder.L2Info.GenerateAccount("User2") + bc := builder.L2.ExecNode.Backend.ArbInterface().BlockChain() var wg sync.WaitGroup quit := make(chan struct{}) @@ -45,13 +41,13 @@ func TestTrieDBCommitRace(t *testing.T) { for { select { default: - TransferBalance(t, "Faucet", "User2", common.Big1, l2info, l2client, ctx) + builder.L2.TransferBalance(t, "Faucet", "User2", common.Big1, builder.L2Info) case <-quit: return } } }() - api := execNode.Backend.APIBackend() + api := builder.L2.ExecNode.Backend.APIBackend() blockNumber := 1 for i := 0; i < 5; i++ { var roots []common.Hash diff --git a/system_tests/twonodes_test.go b/system_tests/twonodes_test.go index 6280a4a575..c8e348cffb 100644 --- a/system_tests/twonodes_test.go +++ b/system_tests/twonodes_test.go @@ -20,24 +20,27 @@ func testTwoNodesSimple(t *testing.T, dasModeStr string) { chainConfig, l1NodeConfigA, lifecycleManager, _, dasSignerKey := setupConfigWithDAS(t, ctx, dasModeStr) defer lifecycleManager.StopAndWaitUntil(time.Second) - l2info, nodeA, l2clientA, l1info, _, l1client, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, l1NodeConfigA, nil, chainConfig, nil) - defer requireClose(t, l1stack) - defer nodeA.StopAndWait() - - authorizeDASKeyset(t, ctx, dasSignerKey, l1info, l1client) + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.nodeConfig = l1NodeConfigA + builder.chainConfig = chainConfig + builder.L2Info = nil + cleanup := builder.Build(t) + defer cleanup() + + authorizeDASKeyset(t, ctx, dasSignerKey, builder.L1Info, builder.L1.Client) l1NodeConfigBDataAvailability := l1NodeConfigA.DataAvailability l1NodeConfigBDataAvailability.RPCAggregator.Enable = false - l2clientB, nodeB := Create2ndNode(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, &l1NodeConfigBDataAvailability) - defer nodeB.StopAndWait() + testClientB, cleanupB := builder.Build2ndNode(t, &SecondNodeParams{dasConfig: &l1NodeConfigBDataAvailability}) + defer cleanupB() - l2info.GenerateAccount("User2") + builder.L2Info.GenerateAccount("User2") - tx := l2info.PrepareTx("Owner", "User2", l2info.TransferGas, big.NewInt(1e12), nil) + tx := builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, big.NewInt(1e12), nil) - err := l2clientA.SendTransaction(ctx, tx) + err := builder.L2.Client.SendTransaction(ctx, tx) Require(t, err) - _, err = EnsureTxSucceeded(ctx, l2clientA, tx) + _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) // give the inbox reader a bit of time to pick up the delayed message @@ -45,15 +48,15 @@ func testTwoNodesSimple(t *testing.T, dasModeStr string) { // sending l1 messages creates l1 blocks.. make enough to get that delayed inbox message in for i := 0; i < 30; i++ { - SendWaitTestTransactions(t, ctx, l1client, []*types.Transaction{ - l1info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil), + builder.L1.SendWaitTestTransactions(t, []*types.Transaction{ + builder.L1Info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil), }) } - _, err = WaitForTx(ctx, l2clientB, tx.Hash(), time.Second*5) + _, err = WaitForTx(ctx, testClientB.Client, tx.Hash(), time.Second*5) Require(t, err) - l2balance, err := l2clientB.BalanceAt(ctx, l2info.GetAddress("User2"), nil) + l2balance, err := testClientB.Client.BalanceAt(ctx, builder.L2Info.GetAddress("User2"), nil) Require(t, err) if l2balance.Cmp(big.NewInt(1e12)) != 0 { diff --git a/system_tests/twonodeslong_test.go b/system_tests/twonodeslong_test.go index 16c369df46..09203e3bcd 100644 --- a/system_tests/twonodeslong_test.go +++ b/system_tests/twonodeslong_test.go @@ -42,32 +42,36 @@ func testTwoNodesLong(t *testing.T, dasModeStr string) { chainConfig, l1NodeConfigA, lifecycleManager, _, dasSignerKey := setupConfigWithDAS(t, ctx, dasModeStr) defer lifecycleManager.StopAndWaitUntil(time.Second) - l2info, nodeA, l2client, l1info, l1backend, l1client, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, l1NodeConfigA, nil, chainConfig, nil) - defer requireClose(t, l1stack) + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.nodeConfig = l1NodeConfigA + builder.chainConfig = chainConfig + builder.L2Info = nil + builder.Build(t) + defer requireClose(t, builder.L1.Stack) - authorizeDASKeyset(t, ctx, dasSignerKey, l1info, l1client) + authorizeDASKeyset(t, ctx, dasSignerKey, builder.L1Info, builder.L1.Client) l1NodeConfigBDataAvailability := l1NodeConfigA.DataAvailability l1NodeConfigBDataAvailability.RPCAggregator.Enable = false - l2clientB, nodeB := Create2ndNode(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, &l1NodeConfigBDataAvailability) - defer nodeB.StopAndWait() + testClientB, cleanupB := builder.Build2ndNode(t, &SecondNodeParams{dasConfig: &l1NodeConfigBDataAvailability}) + defer cleanupB() - l2info.GenerateAccount("DelayedFaucet") - l2info.GenerateAccount("DelayedReceiver") - l2info.GenerateAccount("DirectReceiver") + builder.L2Info.GenerateAccount("DelayedFaucet") + builder.L2Info.GenerateAccount("DelayedReceiver") + builder.L2Info.GenerateAccount("DirectReceiver") - l2info.GenerateAccount("ErrorTxSender") + builder.L2Info.GenerateAccount("ErrorTxSender") - SendWaitTestTransactions(t, ctx, l2client, []*types.Transaction{ - l2info.PrepareTx("Faucet", "ErrorTxSender", l2info.TransferGas, big.NewInt(l2pricing.InitialBaseFeeWei*int64(l2info.TransferGas)), nil), + builder.L2.SendWaitTestTransactions(t, []*types.Transaction{ + builder.L2Info.PrepareTx("Faucet", "ErrorTxSender", builder.L2Info.TransferGas, big.NewInt(l2pricing.InitialBaseFeeWei*int64(builder.L2Info.TransferGas)), nil), }) delayedMsgsToSendMax := big.NewInt(int64(largeLoops * avgDelayedMessagesPerLoop * 10)) delayedFaucetNeeds := new(big.Int).Mul(new(big.Int).Add(fundsPerDelayed, new(big.Int).SetUint64(l2pricing.InitialBaseFeeWei*100000)), delayedMsgsToSendMax) - SendWaitTestTransactions(t, ctx, l2client, []*types.Transaction{ - l2info.PrepareTx("Faucet", "DelayedFaucet", l2info.TransferGas, delayedFaucetNeeds, nil), + builder.L2.SendWaitTestTransactions(t, []*types.Transaction{ + builder.L2Info.PrepareTx("Faucet", "DelayedFaucet", builder.L2Info.TransferGas, delayedFaucetNeeds, nil), }) - delayedFaucetBalance, err := l2client.BalanceAt(ctx, l2info.GetAddress("DelayedFaucet"), nil) + delayedFaucetBalance, err := builder.L2.Client.BalanceAt(ctx, builder.L2Info.GetAddress("DelayedFaucet"), nil) Require(t, err) if delayedFaucetBalance.Cmp(delayedFaucetNeeds) != 0 { @@ -85,17 +89,17 @@ func testTwoNodesLong(t *testing.T, dasModeStr string) { randNum := rand.Int() % avgTotalL1MessagesPerLoop var l1tx *types.Transaction if randNum < avgDelayedMessagesPerLoop { - delayedTx := l2info.PrepareTx("DelayedFaucet", "DelayedReceiver", 30001, fundsPerDelayed, nil) - l1tx = WrapL2ForDelayed(t, delayedTx, l1info, "User", 100000) + delayedTx := builder.L2Info.PrepareTx("DelayedFaucet", "DelayedReceiver", 30001, fundsPerDelayed, nil) + l1tx = WrapL2ForDelayed(t, delayedTx, builder.L1Info, "User", 100000) delayedTxs = append(delayedTxs, delayedTx) delayedTransfers++ } else { - l1tx = l1info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil) + l1tx = builder.L1Info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil) } l1Txs = append(l1Txs, l1tx) } // adding multiple messages in the same AddLocal to get them in the same L1 block - errs := l1backend.TxPool().AddLocals(l1Txs) + errs := builder.L1.L1Backend.TxPool().AddLocals(l1Txs) for _, err := range errs { if err != nil { Fatal(t, err) @@ -104,26 +108,26 @@ func testTwoNodesLong(t *testing.T, dasModeStr string) { l2TxsThisTime := rand.Int() % (avgL2MsgsPerLoop * 2) l2Txs := make([]*types.Transaction, 0, l2TxsThisTime) for len(l2Txs) < l2TxsThisTime { - l2Txs = append(l2Txs, l2info.PrepareTx("Faucet", "DirectReceiver", l2info.TransferGas, fundsPerDirect, nil)) + l2Txs = append(l2Txs, builder.L2Info.PrepareTx("Faucet", "DirectReceiver", builder.L2Info.TransferGas, fundsPerDirect, nil)) } - SendWaitTestTransactions(t, ctx, l2client, l2Txs) + builder.L2.SendWaitTestTransactions(t, l2Txs) directTransfers += int64(l2TxsThisTime) if len(l1Txs) > 0 { - _, err := EnsureTxSucceeded(ctx, l1client, l1Txs[len(l1Txs)-1]) + _, err := builder.L1.EnsureTxSucceeded(l1Txs[len(l1Txs)-1]) if err != nil { Fatal(t, err) } } // create bad tx on delayed inbox - l2info.GetInfoWithPrivKey("ErrorTxSender").Nonce = 10 - SendWaitTestTransactions(t, ctx, l1client, []*types.Transaction{ - WrapL2ForDelayed(t, l2info.PrepareTx("ErrorTxSender", "DelayedReceiver", 30002, delayedFaucetNeeds, nil), l1info, "User", 100000), + builder.L2Info.GetInfoWithPrivKey("ErrorTxSender").Nonce = 10 + builder.L1.SendWaitTestTransactions(t, []*types.Transaction{ + WrapL2ForDelayed(t, builder.L2Info.PrepareTx("ErrorTxSender", "DelayedReceiver", 30002, delayedFaucetNeeds, nil), builder.L1Info, "User", 100000), }) extrBlocksThisTime := rand.Int() % (avgExtraBlocksPerLoop * 2) for i := 0; i < extrBlocksThisTime; i++ { - SendWaitTestTransactions(t, ctx, l1client, []*types.Transaction{ - l1info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil), + builder.L1.SendWaitTestTransactions(t, []*types.Transaction{ + builder.L1Info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil), }) } } @@ -137,45 +141,45 @@ func testTwoNodesLong(t *testing.T, dasModeStr string) { for i := 0; i < finalPropagateLoops; i++ { var tx *types.Transaction for j := 0; j < 30; j++ { - tx = l1info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil) - err := l1client.SendTransaction(ctx, tx) + tx = builder.L1Info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil) + err := builder.L1.Client.SendTransaction(ctx, tx) if err != nil { Fatal(t, err) } - _, err = EnsureTxSucceeded(ctx, l1client, tx) + _, err = builder.L1.EnsureTxSucceeded(tx) if err != nil { Fatal(t, err) } } } - _, err = EnsureTxSucceededWithTimeout(ctx, l2client, delayedTxs[len(delayedTxs)-1], time.Second*10) + _, err = builder.L2.EnsureTxSucceededWithTimeout(delayedTxs[len(delayedTxs)-1], time.Second*10) Require(t, err, "Failed waiting for Tx on main node") - _, err = EnsureTxSucceededWithTimeout(ctx, l2clientB, delayedTxs[len(delayedTxs)-1], time.Second*10) + _, err = testClientB.EnsureTxSucceededWithTimeout(delayedTxs[len(delayedTxs)-1], time.Second*10) Require(t, err, "Failed waiting for Tx on secondary node") - delayedBalance, err := l2clientB.BalanceAt(ctx, l2info.GetAddress("DelayedReceiver"), nil) + delayedBalance, err := testClientB.Client.BalanceAt(ctx, builder.L2Info.GetAddress("DelayedReceiver"), nil) Require(t, err) - directBalance, err := l2clientB.BalanceAt(ctx, l2info.GetAddress("DirectReceiver"), nil) + directBalance, err := testClientB.Client.BalanceAt(ctx, builder.L2Info.GetAddress("DirectReceiver"), nil) Require(t, err) delayedExpectd := new(big.Int).Mul(fundsPerDelayed, big.NewInt(delayedTransfers)) directExpectd := new(big.Int).Mul(fundsPerDirect, big.NewInt(directTransfers)) if (delayedBalance.Cmp(delayedExpectd) != 0) || (directBalance.Cmp(directExpectd) != 0) { t.Error("delayed balance", delayedBalance, "expected", delayedExpectd, "transfers", delayedTransfers) t.Error("direct balance", directBalance, "expected", directExpectd, "transfers", directTransfers) - ownerBalance, _ := l2clientB.BalanceAt(ctx, l2info.GetAddress("Owner"), nil) - delayedFaucetBalance, _ := l2clientB.BalanceAt(ctx, l2info.GetAddress("DelayedFaucet"), nil) + ownerBalance, _ := testClientB.Client.BalanceAt(ctx, builder.L2Info.GetAddress("Owner"), nil) + delayedFaucetBalance, _ := testClientB.Client.BalanceAt(ctx, builder.L2Info.GetAddress("DelayedFaucet"), nil) t.Error("owner balance", ownerBalance, "delayed faucet", delayedFaucetBalance) Fatal(t, "Unexpected balance") } - nodeA.StopAndWait() + builder.L2.ConsensusNode.StopAndWait() - if nodeB.BlockValidator != nil { - lastBlockHeader, err := l2clientB.HeaderByNumber(ctx, nil) + if testClientB.ConsensusNode.BlockValidator != nil { + lastBlockHeader, err := testClientB.Client.HeaderByNumber(ctx, nil) Require(t, err) timeout := getDeadlineTimeout(t, time.Minute*30) // messageindex is same as block number here - if !nodeB.BlockValidator.WaitForPos(t, ctx, arbutil.MessageIndex(lastBlockHeader.Number.Uint64()), timeout) { + if !testClientB.ConsensusNode.BlockValidator.WaitForPos(t, ctx, arbutil.MessageIndex(lastBlockHeader.Number.Uint64()), timeout) { Fatal(t, "did not validate all blocks") } } From e8f06193fa3c0641258ea2e961eaaa30852823b0 Mon Sep 17 00:00:00 2001 From: Joshua Colvin Date: Wed, 25 Oct 2023 19:21:18 -0700 Subject: [PATCH 50/64] Prevent double `v` in reported version https://github.com/OffchainLabs/nitro/issues/1935 Ensure that `web3_clientVersion` does not have extra `v` included --- cmd/nitro-val/nitro_val.go | 4 ++++ cmd/nitro/nitro.go | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/cmd/nitro-val/nitro_val.go b/cmd/nitro-val/nitro_val.go index 43b1c1d206..6bcb55f055 100644 --- a/cmd/nitro-val/nitro_val.go +++ b/cmd/nitro-val/nitro_val.go @@ -75,6 +75,10 @@ func mainImpl() int { stackConf.P2P.NoDiscovery = true vcsRevision, vcsTime := confighelpers.GetVersion() stackConf.Version = vcsRevision + if stackConf.Version[0] == 'v' { + // Skip leading "v" in version string + stackConf.Version = stackConf.Version[1:] + } pathResolver := func(workdir string) func(string) string { if workdir == "" { diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index 80b21e5ebe..426f514c14 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -179,6 +179,10 @@ func mainImpl() int { stackConf.P2P.NoDiscovery = true vcsRevision, vcsTime := confighelpers.GetVersion() stackConf.Version = vcsRevision + if stackConf.Version[0] == 'v' { + // Skip leading "v" in version string + stackConf.Version = stackConf.Version[1:] + } pathResolver := func(workdir string) func(string) string { if workdir == "" { From ed5694bea32a8159b6551bebc1f5856af8cd041a Mon Sep 17 00:00:00 2001 From: Joshua Colvin Date: Wed, 25 Oct 2023 20:41:37 -0700 Subject: [PATCH 51/64] Fix issue attempting to access empty string Also remove code duplication --- cmd/daserver/daserver.go | 2 +- cmd/genericconf/getversion18.go | 9 +++++++-- cmd/nitro-val/nitro_val.go | 8 ++------ cmd/nitro/nitro.go | 8 ++------ cmd/relay/relay.go | 2 +- cmd/util/confighelpers/configuration.go | 4 ++-- 6 files changed, 15 insertions(+), 18 deletions(-) diff --git a/cmd/daserver/daserver.go b/cmd/daserver/daserver.go index b2f8728a7d..07481651b2 100644 --- a/cmd/daserver/daserver.go +++ b/cmd/daserver/daserver.go @@ -239,7 +239,7 @@ func startup() error { dasLifecycleManager.Register(&L1ReaderCloser{l1Reader}) } - vcsRevision, vcsTime := confighelpers.GetVersion() + vcsRevision, _, vcsTime := confighelpers.GetVersion() var rpcServer *http.Server if serverConfig.EnableRPC { log.Info("Starting HTTP-RPC server", "addr", serverConfig.RPCAddr, "port", serverConfig.RPCPort, "revision", vcsRevision, "vcs.time", vcsTime) diff --git a/cmd/genericconf/getversion18.go b/cmd/genericconf/getversion18.go index 2c183f6879..4aabae91ef 100644 --- a/cmd/genericconf/getversion18.go +++ b/cmd/genericconf/getversion18.go @@ -7,7 +7,7 @@ package genericconf import "runtime/debug" -func GetVersion(definedVersion string, definedTime string, definedModified string) (string, string) { +func GetVersion(definedVersion string, definedTime string, definedModified string) (string, string, string) { vcsVersion := "development" vcsTime := "development" vcsModified := "false" @@ -43,5 +43,10 @@ func GetVersion(definedVersion string, definedTime string, definedModified strin vcsVersion = vcsVersion + "-modified" } - return vcsVersion, vcsTime + strippedVersion := vcsVersion + if len(strippedVersion) > 0 && strippedVersion[0] == 'v' { + strippedVersion = strippedVersion[1:] + } + + return vcsVersion, strippedVersion, vcsTime } diff --git a/cmd/nitro-val/nitro_val.go b/cmd/nitro-val/nitro_val.go index 6bcb55f055..20b8b23628 100644 --- a/cmd/nitro-val/nitro_val.go +++ b/cmd/nitro-val/nitro_val.go @@ -73,12 +73,8 @@ func mainImpl() int { stackConf.P2P.ListenAddr = "" stackConf.P2P.NoDial = true stackConf.P2P.NoDiscovery = true - vcsRevision, vcsTime := confighelpers.GetVersion() - stackConf.Version = vcsRevision - if stackConf.Version[0] == 'v' { - // Skip leading "v" in version string - stackConf.Version = stackConf.Version[1:] - } + vcsRevision, strippedRevision, vcsTime := confighelpers.GetVersion() + stackConf.Version = strippedRevision pathResolver := func(workdir string) func(string) string { if workdir == "" { diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index 426f514c14..72668c60e8 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -177,12 +177,8 @@ func mainImpl() int { stackConf.P2P.ListenAddr = "" stackConf.P2P.NoDial = true stackConf.P2P.NoDiscovery = true - vcsRevision, vcsTime := confighelpers.GetVersion() - stackConf.Version = vcsRevision - if stackConf.Version[0] == 'v' { - // Skip leading "v" in version string - stackConf.Version = stackConf.Version[1:] - } + vcsRevision, strippedRevision, vcsTime := confighelpers.GetVersion() + stackConf.Version = strippedRevision pathResolver := func(workdir string) func(string) string { if workdir == "" { diff --git a/cmd/relay/relay.go b/cmd/relay/relay.go index 552838308d..b25aadf57b 100644 --- a/cmd/relay/relay.go +++ b/cmd/relay/relay.go @@ -76,7 +76,7 @@ func startup() error { glogger.Verbosity(log.Lvl(relayConfig.LogLevel)) log.Root().SetHandler(glogger) - vcsRevision, vcsTime := confighelpers.GetVersion() + vcsRevision, _, vcsTime := confighelpers.GetVersion() log.Info("Running Arbitrum nitro relay", "revision", vcsRevision, "vcs.time", vcsTime) defer log.Info("Cleanly shutting down relay") diff --git a/cmd/util/confighelpers/configuration.go b/cmd/util/confighelpers/configuration.go index 6116a492c9..85a8f4adef 100644 --- a/cmd/util/confighelpers/configuration.go +++ b/cmd/util/confighelpers/configuration.go @@ -118,12 +118,12 @@ func loadS3Variables(k *koanf.Koanf) error { var ErrVersion = errors.New("configuration: version requested") -func GetVersion() (string, string) { +func GetVersion() (string, string, string) { return genericconf.GetVersion(version, datetime, modified) } func PrintErrorAndExit(err error, usage func(string)) { - vcsRevision, vcsTime := GetVersion() + vcsRevision, _, vcsTime := GetVersion() fmt.Printf("Version: %v, time: %v\n", vcsRevision, vcsTime) if err != nil && errors.Is(err, ErrVersion) { // Already printed version, just exit From daea49bdf4d0f46ea66611bf0abf6025554f2a98 Mon Sep 17 00:00:00 2001 From: Joshua Colvin Date: Wed, 25 Oct 2023 22:02:02 -0700 Subject: [PATCH 52/64] Update geth pin Update geth pin to include the following changes: OffchainLabs/go-ethereum#255 OffchainLabs/go-ethereum#262 OffchainLabs/go-ethereum#263 --- go-ethereum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go-ethereum b/go-ethereum index e8c8827c0b..859182f2fa 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit e8c8827c0b9e22e60829da1945cba9c451cda85a +Subproject commit 859182f2fa2d33c03fba5e29e1e750d3f49525fe From e88f53e12a99e8c8ef553945779f10d0f3e56ba6 Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Thu, 26 Oct 2023 09:40:19 -0500 Subject: [PATCH 53/64] Update wsbroadcastserver.go --- wsbroadcastserver/wsbroadcastserver.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/wsbroadcastserver/wsbroadcastserver.go b/wsbroadcastserver/wsbroadcastserver.go index c26a910144..d51b368400 100644 --- a/wsbroadcastserver/wsbroadcastserver.go +++ b/wsbroadcastserver/wsbroadcastserver.go @@ -208,11 +208,7 @@ func (s *WSBroadcastServer) Start(ctx context.Context) error { err := s.StartWithHeader(ctx, header) elapsed := time.Since(startTime) startWithHeaderTimer.Update(elapsed) - - if err != nil { - return err - } - return nil + return err } func (s *WSBroadcastServer) StartWithHeader(ctx context.Context, header ws.HandshakeHeader) error { From 756ef6ff323c6e133882e04cf48f5abecaa0cb50 Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Thu, 26 Oct 2023 10:43:52 -0500 Subject: [PATCH 54/64] code refactor --- broadcastclients/broadcastclients.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/broadcastclients/broadcastclients.go b/broadcastclients/broadcastclients.go index f508404799..acfcf8045b 100644 --- a/broadcastclients/broadcastclients.go +++ b/broadcastclients/broadcastclients.go @@ -201,7 +201,7 @@ func (bcs *BroadcastClients) Start(ctx context.Context) { // failed to get messages from both primary and secondary feeds for ~5 seconds, start a new secondary feed case <-startSecondaryFeedTimer.C: - bcs.StartSecondaryFeed(ctx) + bcs.startSecondaryFeed(ctx) // failed to get messages from primary feed for ~5 seconds, reset the timer responsible for stopping a secondary case <-primaryFeedIsDownTimer.C: @@ -209,13 +209,13 @@ func (bcs *BroadcastClients) Start(ctx context.Context) { // primary feeds have been up and running for PRIMARY_FEED_UPTIME=10 mins without a failure, stop the recently started secondary feed case <-stopSecondaryFeedTimer.C: - bcs.StopSecondaryFeed(ctx) + bcs.stopSecondaryFeed(ctx) } } }) } -func (bcs *BroadcastClients) StartSecondaryFeed(ctx context.Context) { +func (bcs *BroadcastClients) startSecondaryFeed(ctx context.Context) { if bcs.numOfStartedSecondary < len(bcs.secondaryClients) { client := bcs.secondaryClients[bcs.numOfStartedSecondary] bcs.numOfStartedSecondary += 1 @@ -224,7 +224,7 @@ func (bcs *BroadcastClients) StartSecondaryFeed(ctx context.Context) { log.Warn("failed to start a new secondary feed all available secondary feeds were started") } } -func (bcs *BroadcastClients) StopSecondaryFeed(ctx context.Context) { +func (bcs *BroadcastClients) stopSecondaryFeed(ctx context.Context) { if bcs.numOfStartedSecondary > 0 { bcs.numOfStartedSecondary -= 1 client := bcs.secondaryClients[bcs.numOfStartedSecondary] From cf82b27fe65a2b7ac043a42ff4b9896686b3f412 Mon Sep 17 00:00:00 2001 From: Goran Vladika Date: Thu, 26 Oct 2023 18:59:44 +0200 Subject: [PATCH 55/64] Update nitro-contracts to v1.1.0 --- contracts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contracts b/contracts index b22f93c7a1..1a94dabd80 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit b22f93c7a1322fb8063ad71d58acb37416d71146 +Subproject commit 1a94dabd805673e4c85e4071662814a142b20893 From 1969387881cf637f4495d32c4c1c978daa468c06 Mon Sep 17 00:00:00 2001 From: Goran Vladika Date: Fri, 27 Oct 2023 09:09:13 +0200 Subject: [PATCH 56/64] Update maxDataSize param description --- cmd/deploy/deploy.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/deploy/deploy.go b/cmd/deploy/deploy.go index d726ad21f4..0b72038908 100644 --- a/cmd/deploy/deploy.go +++ b/cmd/deploy/deploy.go @@ -41,7 +41,7 @@ func main() { ownerAddressString := flag.String("ownerAddress", "", "the rollup owner's address") sequencerAddressString := flag.String("sequencerAddress", "", "the sequencer's address") nativeTokenAddressString := flag.String("nativeTokenAddress", "0x0000000000000000000000000000000000000000", "address of the ERC20 token which is used as native L2 currency") - maxDataSizeUint := flag.Uint64("maxDataSize", 117964, "maximum size of data") + maxDataSizeUint := flag.Uint64("maxDataSize", 117964, "maximum data size of a batch or a cross-chain message (default = 90% of Geth's 128KB tx size limit)") loserEscrowAddressString := flag.String("loserEscrowAddress", "", "the address which half of challenge loser's funds accumulate at") wasmmoduleroot := flag.String("wasmmoduleroot", "", "WASM module root hash") wasmrootpath := flag.String("wasmrootpath", "", "path to machine folders") From 1e5a97b2139a6df1793af90143b180c326929ef8 Mon Sep 17 00:00:00 2001 From: Goran Vladika Date: Fri, 27 Oct 2023 13:46:38 +0200 Subject: [PATCH 57/64] Turn on feature to deploy the L2 utility factories as part of rollup creation process --- arbnode/node.go | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/arbnode/node.go b/arbnode/node.go index 0782e8ecb7..b026284ef1 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -41,6 +41,7 @@ import ( "github.com/offchainlabs/nitro/solgen/go/ospgen" "github.com/offchainlabs/nitro/solgen/go/precompilesgen" "github.com/offchainlabs/nitro/solgen/go/rollupgen" + "github.com/offchainlabs/nitro/solgen/go/test_helpersgen" "github.com/offchainlabs/nitro/solgen/go/upgrade_executorgen" "github.com/offchainlabs/nitro/staker" "github.com/offchainlabs/nitro/staker/validatorwallet" @@ -292,7 +293,7 @@ func DeployOnL1(ctx context.Context, parentChainReader *headerreader.HeaderReade return nil, errors.New("no machine specified") } - rollupCreator, _, validatorUtils, validatorWalletCreator, err := deployRollupCreator(ctx, parentChainReader, deployAuth, maxDataSize) + rollupCreator, rollupCreatorAddress, validatorUtils, validatorWalletCreator, err := deployRollupCreator(ctx, parentChainReader, deployAuth, maxDataSize) if err != nil { return nil, fmt.Errorf("error deploying rollup creator: %w", err) } @@ -302,14 +303,32 @@ func DeployOnL1(ctx context.Context, parentChainReader *headerreader.HeaderReade validatorAddrs = append(validatorAddrs, crypto.CreateAddress(validatorWalletCreator, i)) } + // 0.13 ETH is enough to deploy L2 factories via retryables. Excess is refunded + feeCost := big.NewInt(130000000000000000) + // if there is a fee token, approve the rollup creator to spend it + if (nativeToken != common.Address{}) { + // use ERC20 binding to approve + nativeTokenContract, err := test_helpersgen.NewTestToken(nativeToken, parentChainReader.Client()) + if err != nil { + return nil, fmt.Errorf("error binding native token: %w", err) + } + tx, err := nativeTokenContract.Approve(deployAuth, rollupCreatorAddress, feeCost) + err = andTxSucceeded(ctx, parentChainReader, tx, err) + if err != nil { + return nil, fmt.Errorf("error calling approve: %w", err) + } + feeCost = big.NewInt(0) + } + + deployAuth.Value = feeCost deployParams := rollupgen.RollupCreatorRollupDeploymentParams{ Config: config, BatchPoster: batchPoster, Validators: validatorAddrs, MaxDataSize: maxDataSize, NativeToken: nativeToken, - DeployFactoriesToL2: false, - MaxFeePerGasForRetryables: big.NewInt(0), // needed when utility factories are deployed + DeployFactoriesToL2: true, + MaxFeePerGasForRetryables: big.NewInt(100000000), // 0.1 gwei } tx, err := rollupCreator.CreateRollup( From e1eb6b520557fd603ea4b6948a0886f826bc0cff Mon Sep 17 00:00:00 2001 From: Goran Vladika Date: Fri, 27 Oct 2023 14:31:00 +0200 Subject: [PATCH 58/64] Revert "Turn on feature to deploy the L2 utility factories as part of rollup creation process" This reverts commit 1e5a97b2139a6df1793af90143b180c326929ef8. --- arbnode/node.go | 25 +++---------------------- 1 file changed, 3 insertions(+), 22 deletions(-) diff --git a/arbnode/node.go b/arbnode/node.go index b026284ef1..0782e8ecb7 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -41,7 +41,6 @@ import ( "github.com/offchainlabs/nitro/solgen/go/ospgen" "github.com/offchainlabs/nitro/solgen/go/precompilesgen" "github.com/offchainlabs/nitro/solgen/go/rollupgen" - "github.com/offchainlabs/nitro/solgen/go/test_helpersgen" "github.com/offchainlabs/nitro/solgen/go/upgrade_executorgen" "github.com/offchainlabs/nitro/staker" "github.com/offchainlabs/nitro/staker/validatorwallet" @@ -293,7 +292,7 @@ func DeployOnL1(ctx context.Context, parentChainReader *headerreader.HeaderReade return nil, errors.New("no machine specified") } - rollupCreator, rollupCreatorAddress, validatorUtils, validatorWalletCreator, err := deployRollupCreator(ctx, parentChainReader, deployAuth, maxDataSize) + rollupCreator, _, validatorUtils, validatorWalletCreator, err := deployRollupCreator(ctx, parentChainReader, deployAuth, maxDataSize) if err != nil { return nil, fmt.Errorf("error deploying rollup creator: %w", err) } @@ -303,32 +302,14 @@ func DeployOnL1(ctx context.Context, parentChainReader *headerreader.HeaderReade validatorAddrs = append(validatorAddrs, crypto.CreateAddress(validatorWalletCreator, i)) } - // 0.13 ETH is enough to deploy L2 factories via retryables. Excess is refunded - feeCost := big.NewInt(130000000000000000) - // if there is a fee token, approve the rollup creator to spend it - if (nativeToken != common.Address{}) { - // use ERC20 binding to approve - nativeTokenContract, err := test_helpersgen.NewTestToken(nativeToken, parentChainReader.Client()) - if err != nil { - return nil, fmt.Errorf("error binding native token: %w", err) - } - tx, err := nativeTokenContract.Approve(deployAuth, rollupCreatorAddress, feeCost) - err = andTxSucceeded(ctx, parentChainReader, tx, err) - if err != nil { - return nil, fmt.Errorf("error calling approve: %w", err) - } - feeCost = big.NewInt(0) - } - - deployAuth.Value = feeCost deployParams := rollupgen.RollupCreatorRollupDeploymentParams{ Config: config, BatchPoster: batchPoster, Validators: validatorAddrs, MaxDataSize: maxDataSize, NativeToken: nativeToken, - DeployFactoriesToL2: true, - MaxFeePerGasForRetryables: big.NewInt(100000000), // 0.1 gwei + DeployFactoriesToL2: false, + MaxFeePerGasForRetryables: big.NewInt(0), // needed when utility factories are deployed } tx, err := rollupCreator.CreateRollup( From 0871ed186db11cbda2e62842335bb33c82db63bd Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Fri, 27 Oct 2023 11:17:04 -0500 Subject: [PATCH 59/64] Add pause to stopwaiter to enable restart after stopping --- broadcastclients/broadcastclients.go | 5 +++-- util/stopwaiter/stopwaiter.go | 20 ++++++++++++++++++++ 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/broadcastclients/broadcastclients.go b/broadcastclients/broadcastclients.go index acfcf8045b..52346ecbb3 100644 --- a/broadcastclients/broadcastclients.go +++ b/broadcastclients/broadcastclients.go @@ -220,7 +220,7 @@ func (bcs *BroadcastClients) startSecondaryFeed(ctx context.Context) { client := bcs.secondaryClients[bcs.numOfStartedSecondary] bcs.numOfStartedSecondary += 1 client.Start(ctx) - } else { + } else if len(bcs.secondaryClients) > 0 { log.Warn("failed to start a new secondary feed all available secondary feeds were started") } } @@ -228,7 +228,8 @@ func (bcs *BroadcastClients) stopSecondaryFeed(ctx context.Context) { if bcs.numOfStartedSecondary > 0 { bcs.numOfStartedSecondary -= 1 client := bcs.secondaryClients[bcs.numOfStartedSecondary] - client.StopAndWait() + client.Pause() + log.Info("disconnected secondary feed") } } diff --git a/util/stopwaiter/stopwaiter.go b/util/stopwaiter/stopwaiter.go index 1e70e328eb..e279e333a4 100644 --- a/util/stopwaiter/stopwaiter.go +++ b/util/stopwaiter/stopwaiter.go @@ -116,6 +116,20 @@ func (s *StopWaiterSafe) StopAndWait() error { return s.stopAndWaitImpl(stopDelayWarningTimeout) } +// Pause calls StopAndWait but updates started and stopped booleans to default. Only call if you want to restart the stopwaiter +func (s *StopWaiterSafe) Pause() error { + if err := s.stopAndWaitImpl(stopDelayWarningTimeout); err != nil { + return err + } + + s.mutex.Lock() + defer s.mutex.Unlock() + s.started = false + s.stopped = false + + return nil +} + func getAllStackTraces() string { buf := make([]byte, 64*1024*1024) size := runtime.Stack(buf, true) @@ -326,6 +340,12 @@ func (s *StopWaiter) StopAndWait() { } } +func (s *StopWaiter) Pause() { + if err := s.StopWaiterSafe.Pause(); err != nil { + panic(err) + } +} + // If stop was already called, thread might silently not be launched func (s *StopWaiter) LaunchThread(foo func(context.Context)) { if err := s.StopWaiterSafe.LaunchThreadSafe(foo); err != nil { From dd8a25655bffcf25d9493ad29248308f32d66ea4 Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Fri, 27 Oct 2023 15:34:12 -0500 Subject: [PATCH 60/64] remove pause implementation and code refactor --- broadcastclients/broadcastclients.go | 88 +++++++++++++++------------- util/stopwaiter/stopwaiter.go | 20 ------- 2 files changed, 46 insertions(+), 62 deletions(-) diff --git a/broadcastclients/broadcastclients.go b/broadcastclients/broadcastclients.go index 52346ecbb3..7fef07be71 100644 --- a/broadcastclients/broadcastclients.go +++ b/broadcastclients/broadcastclients.go @@ -42,6 +42,7 @@ func (r *Router) AddBroadcastMessages(feedMessages []*broadcaster.BroadcastFeedM type BroadcastClients struct { primaryClients []*broadcastclient.BroadcastClient secondaryClients []*broadcastclient.BroadcastClient + secondaryURL []string numOfStartedSecondary int primaryRouter *Router @@ -51,6 +52,8 @@ type BroadcastClients struct { connected int32 } +var makeClient func(string, *Router) (*broadcastclient.BroadcastClient, error) + func NewBroadcastClients( configFetcher broadcastclient.ConfigFetcher, l2ChainId uint64, @@ -75,44 +78,36 @@ func NewBroadcastClients( clients := BroadcastClients{ primaryRouter: newStandardRouter(), secondaryRouter: newStandardRouter(), + secondaryURL: config.SecondaryURL, } var lastClientErr error - makeFeeds := func(url []string, router *Router) []*broadcastclient.BroadcastClient { - feeds := make([]*broadcastclient.BroadcastClient, 0, len(url)) - for _, address := range url { - client, err := broadcastclient.NewBroadcastClient( - configFetcher, - address, - l2ChainId, - currentMessageCount, - router, - router.confirmedSequenceNumberChan, - fatalErrChan, - addrVerifier, - func(delta int32) { clients.adjustCount(delta) }, - ) - if err != nil { - lastClientErr = err - log.Warn("init broadcast client failed", "address", address) - continue - } - feeds = append(feeds, client) - } - return feeds + makeClient = func(url string, router *Router) (*broadcastclient.BroadcastClient, error) { + return broadcastclient.NewBroadcastClient( + configFetcher, + url, + l2ChainId, + currentMessageCount, + router, + router.confirmedSequenceNumberChan, + fatalErrChan, + addrVerifier, + func(delta int32) { clients.adjustCount(delta) }, + ) } - clients.primaryClients = makeFeeds(config.URL, clients.primaryRouter) - clients.secondaryClients = makeFeeds(config.SecondaryURL, clients.secondaryRouter) - - if len(clients.primaryClients) == 0 && len(clients.secondaryClients) == 0 { - log.Error("no connected feed on startup, last error: %w", lastClientErr) - return nil, nil + clients.primaryClients = make([]*broadcastclient.BroadcastClient, 0, len(config.URL)) + for _, address := range config.URL { + client, err := makeClient(address, clients.primaryRouter) + if err != nil { + lastClientErr = err + log.Warn("init broadcast client failed", "address", address) + continue + } + clients.primaryClients = append(clients.primaryClients, client) } - - // have atleast one primary client if len(clients.primaryClients) == 0 { - clients.primaryClients = append(clients.primaryClients, clients.secondaryClients[0]) - clients.secondaryClients = clients.secondaryClients[1:] + log.Error("no connected feed on startup, last error: %w", lastClientErr) + return nil, nil } return &clients, nil @@ -209,27 +204,36 @@ func (bcs *BroadcastClients) Start(ctx context.Context) { // primary feeds have been up and running for PRIMARY_FEED_UPTIME=10 mins without a failure, stop the recently started secondary feed case <-stopSecondaryFeedTimer.C: - bcs.stopSecondaryFeed(ctx) + bcs.stopSecondaryFeed() } } }) } func (bcs *BroadcastClients) startSecondaryFeed(ctx context.Context) { - if bcs.numOfStartedSecondary < len(bcs.secondaryClients) { - client := bcs.secondaryClients[bcs.numOfStartedSecondary] + if bcs.numOfStartedSecondary < len(bcs.secondaryURL) { + pos := bcs.numOfStartedSecondary + url := bcs.secondaryURL[pos] + client, err := makeClient(url, bcs.secondaryRouter) + if err != nil { + log.Warn("init broadcast secondary client failed", "address", url) + bcs.secondaryURL = append(bcs.secondaryURL[:pos], bcs.secondaryURL[pos+1:]...) + return + } bcs.numOfStartedSecondary += 1 + bcs.secondaryClients = append(bcs.secondaryClients, client) client.Start(ctx) - } else if len(bcs.secondaryClients) > 0 { + log.Info("secondary feed started", "url", url) + } else if len(bcs.secondaryURL) > 0 { log.Warn("failed to start a new secondary feed all available secondary feeds were started") } } -func (bcs *BroadcastClients) stopSecondaryFeed(ctx context.Context) { + +func (bcs *BroadcastClients) stopSecondaryFeed() { if bcs.numOfStartedSecondary > 0 { bcs.numOfStartedSecondary -= 1 - client := bcs.secondaryClients[bcs.numOfStartedSecondary] - client.Pause() - log.Info("disconnected secondary feed") + bcs.secondaryClients[bcs.numOfStartedSecondary].StopAndWait() + log.Info("disconnected secondary feed", "url", bcs.secondaryURL[bcs.numOfStartedSecondary]) } } @@ -237,7 +241,7 @@ func (bcs *BroadcastClients) StopAndWait() { for _, client := range bcs.primaryClients { client.StopAndWait() } - for i := 0; i < bcs.numOfStartedSecondary; i++ { - bcs.secondaryClients[i].StopAndWait() + for _, client := range bcs.secondaryClients { + client.StopAndWait() } } diff --git a/util/stopwaiter/stopwaiter.go b/util/stopwaiter/stopwaiter.go index e279e333a4..1e70e328eb 100644 --- a/util/stopwaiter/stopwaiter.go +++ b/util/stopwaiter/stopwaiter.go @@ -116,20 +116,6 @@ func (s *StopWaiterSafe) StopAndWait() error { return s.stopAndWaitImpl(stopDelayWarningTimeout) } -// Pause calls StopAndWait but updates started and stopped booleans to default. Only call if you want to restart the stopwaiter -func (s *StopWaiterSafe) Pause() error { - if err := s.stopAndWaitImpl(stopDelayWarningTimeout); err != nil { - return err - } - - s.mutex.Lock() - defer s.mutex.Unlock() - s.started = false - s.stopped = false - - return nil -} - func getAllStackTraces() string { buf := make([]byte, 64*1024*1024) size := runtime.Stack(buf, true) @@ -340,12 +326,6 @@ func (s *StopWaiter) StopAndWait() { } } -func (s *StopWaiter) Pause() { - if err := s.StopWaiterSafe.Pause(); err != nil { - panic(err) - } -} - // If stop was already called, thread might silently not be launched func (s *StopWaiter) LaunchThread(foo func(context.Context)) { if err := s.StopWaiterSafe.LaunchThreadSafe(foo); err != nil { From 1f0ac34e128bb048882f22b10b8753b96d5e31c3 Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Fri, 27 Oct 2023 15:59:33 -0500 Subject: [PATCH 61/64] code refactor --- broadcastclients/broadcastclients.go | 33 ++++++++++++++-------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/broadcastclients/broadcastclients.go b/broadcastclients/broadcastclients.go index 7fef07be71..551dcdb462 100644 --- a/broadcastclients/broadcastclients.go +++ b/broadcastclients/broadcastclients.go @@ -40,10 +40,9 @@ func (r *Router) AddBroadcastMessages(feedMessages []*broadcaster.BroadcastFeedM } type BroadcastClients struct { - primaryClients []*broadcastclient.BroadcastClient - secondaryClients []*broadcastclient.BroadcastClient - secondaryURL []string - numOfStartedSecondary int + primaryClients []*broadcastclient.BroadcastClient + secondaryClients []*broadcastclient.BroadcastClient + secondaryURL []string primaryRouter *Router secondaryRouter *Router @@ -76,11 +75,12 @@ func NewBroadcastClients( } } clients := BroadcastClients{ - primaryRouter: newStandardRouter(), - secondaryRouter: newStandardRouter(), - secondaryURL: config.SecondaryURL, + primaryRouter: newStandardRouter(), + secondaryRouter: newStandardRouter(), + primaryClients: make([]*broadcastclient.BroadcastClient, 0, len(config.URL)), + secondaryClients: make([]*broadcastclient.BroadcastClient, 0, len(config.SecondaryURL)), + secondaryURL: config.SecondaryURL, } - var lastClientErr error makeClient = func(url string, router *Router) (*broadcastclient.BroadcastClient, error) { return broadcastclient.NewBroadcastClient( configFetcher, @@ -95,7 +95,7 @@ func NewBroadcastClients( ) } - clients.primaryClients = make([]*broadcastclient.BroadcastClient, 0, len(config.URL)) + var lastClientErr error for _, address := range config.URL { client, err := makeClient(address, clients.primaryRouter) if err != nil { @@ -211,8 +211,8 @@ func (bcs *BroadcastClients) Start(ctx context.Context) { } func (bcs *BroadcastClients) startSecondaryFeed(ctx context.Context) { - if bcs.numOfStartedSecondary < len(bcs.secondaryURL) { - pos := bcs.numOfStartedSecondary + pos := len(bcs.secondaryClients) + if pos < len(bcs.secondaryURL) { url := bcs.secondaryURL[pos] client, err := makeClient(url, bcs.secondaryRouter) if err != nil { @@ -220,7 +220,6 @@ func (bcs *BroadcastClients) startSecondaryFeed(ctx context.Context) { bcs.secondaryURL = append(bcs.secondaryURL[:pos], bcs.secondaryURL[pos+1:]...) return } - bcs.numOfStartedSecondary += 1 bcs.secondaryClients = append(bcs.secondaryClients, client) client.Start(ctx) log.Info("secondary feed started", "url", url) @@ -230,10 +229,12 @@ func (bcs *BroadcastClients) startSecondaryFeed(ctx context.Context) { } func (bcs *BroadcastClients) stopSecondaryFeed() { - if bcs.numOfStartedSecondary > 0 { - bcs.numOfStartedSecondary -= 1 - bcs.secondaryClients[bcs.numOfStartedSecondary].StopAndWait() - log.Info("disconnected secondary feed", "url", bcs.secondaryURL[bcs.numOfStartedSecondary]) + pos := len(bcs.secondaryClients) + if pos > 0 { + pos -= 1 + bcs.secondaryClients[pos].StopAndWait() + bcs.secondaryClients = bcs.secondaryClients[:pos] + log.Info("disconnected secondary feed", "url", bcs.secondaryURL[pos]) } } From be17feabe83895f8abff4a110c84d705e9f67ba4 Mon Sep 17 00:00:00 2001 From: Goran Vladika Date: Mon, 30 Oct 2023 12:51:49 +0100 Subject: [PATCH 62/64] Update nitro-contracts ref to include sha256 preimage support --- contracts | 2 +- nitro-testnode | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/contracts b/contracts index 1a94dabd80..695750067b 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 1a94dabd805673e4c85e4071662814a142b20893 +Subproject commit 695750067b2b7658556bdf61ec8cf16132d83dd0 diff --git a/nitro-testnode b/nitro-testnode index 11170fe363..3a7b2cee29 160000 --- a/nitro-testnode +++ b/nitro-testnode @@ -1 +1 @@ -Subproject commit 11170fe36318991973bea632d9f348816a64a974 +Subproject commit 3a7b2cee29cf591cc4c3f6c5ddc00bf5be367bd9 From 3f275eece332005f88142c7ef674fa987e2e577e Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Tue, 31 Oct 2023 13:17:32 -0600 Subject: [PATCH 63/64] testnode: update --- nitro-testnode | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nitro-testnode b/nitro-testnode index 3a7b2cee29..bb3f094f43 160000 --- a/nitro-testnode +++ b/nitro-testnode @@ -1 +1 @@ -Subproject commit 3a7b2cee29cf591cc4c3f6c5ddc00bf5be367bd9 +Subproject commit bb3f094f4359780c2a9aba28e15bb845be9b35a3 From c5b9eebdcd68ebe53b2d6b2d77490a86c484024c Mon Sep 17 00:00:00 2001 From: Joshua Colvin Date: Tue, 31 Oct 2023 12:38:00 -0700 Subject: [PATCH 64/64] Update dependencies for dependabot --- go.mod | 26 ++++++++++++++------------ go.sum | 47 ++++++++++++++++++++++++++--------------------- 2 files changed, 40 insertions(+), 33 deletions(-) diff --git a/go.mod b/go.mod index cdfae4df16..5ab2eda5c7 100644 --- a/go.mod +++ b/go.mod @@ -16,6 +16,7 @@ require ( github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.10 github.com/aws/aws-sdk-go-v2/service/s3 v1.26.9 github.com/cavaliergopher/grab/v3 v3.0.1 + github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 github.com/codeclysm/extract/v3 v3.0.2 github.com/dgraph-io/badger/v3 v3.2103.2 github.com/enescakir/emoji v1.0.0 @@ -36,7 +37,7 @@ require ( github.com/rivo/tview v0.0.0-20230814110005-ccc2c8119703 github.com/spf13/pflag v1.0.5 github.com/wealdtech/go-merkletree v1.0.0 - golang.org/x/term v0.6.0 + golang.org/x/term v0.13.0 golang.org/x/tools v0.7.0 gopkg.in/natefinch/lumberjack.v2 v2.0.0 ) @@ -74,7 +75,6 @@ require ( github.com/cespare/xxhash v1.1.0 // indirect github.com/cockroachdb/errors v1.9.1 // indirect github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect - github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 // indirect github.com/cockroachdb/redact v1.1.3 // indirect github.com/containerd/cgroups v1.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect @@ -106,7 +106,7 @@ require ( github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.3.0 // indirect - github.com/golang/glog v1.0.0 // indirect + github.com/golang/glog v1.1.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/mock v1.6.0 // indirect github.com/golang/protobuf v1.5.3 // indirect @@ -268,9 +268,11 @@ require ( golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect golang.org/x/mod v0.10.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect - google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect - google.golang.org/grpc v1.53.0 // indirect - google.golang.org/protobuf v1.30.0 // indirect + google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20231012201019-e917dd12ba7a // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405 // indirect + google.golang.org/grpc v1.59.0 // indirect + google.golang.org/protobuf v1.31.0 // indirect gopkg.in/square/go-jose.v2 v2.5.1 // indirect lukechampine.com/blake3 v1.1.7 // indirect nhooyr.io/websocket v1.8.7 // indirect @@ -296,7 +298,7 @@ require ( github.com/go-redis/redis/v8 v8.11.4 github.com/go-stack/stack v1.8.1 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect - github.com/google/uuid v1.3.0 // indirect + github.com/google/uuid v1.3.1 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/hashicorp/go-bexpr v0.1.10 // indirect github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect @@ -317,11 +319,11 @@ require ( github.com/tklauser/go-sysconf v0.3.5 // indirect github.com/tklauser/numcpus v0.2.2 // indirect github.com/tyler-smith/go-bip39 v1.1.0 // indirect - golang.org/x/crypto v0.7.0 - golang.org/x/net v0.8.0 // indirect - golang.org/x/sync v0.1.0 // indirect - golang.org/x/sys v0.7.0 - golang.org/x/text v0.8.0 // indirect + golang.org/x/crypto v0.14.0 + golang.org/x/net v0.17.0 // indirect + golang.org/x/sync v0.3.0 // indirect + golang.org/x/sys v0.13.0 + golang.org/x/text v0.13.0 // indirect golang.org/x/time v0.0.0-20220922220347-f3bd1da661af // indirect gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect ) diff --git a/go.sum b/go.sum index db81b3a07e..424e3508c3 100644 --- a/go.sum +++ b/go.sum @@ -441,8 +441,9 @@ github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzq github.com/golang-jwt/jwt/v4 v4.3.0 h1:kHL1vqdqWNfATmA0FNMdmZNMyZI1U6O31X4rlIPoBog= github.com/golang-jwt/jwt/v4 v4.3.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= +github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -527,8 +528,8 @@ github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= @@ -1761,8 +1762,8 @@ golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210920023735-84f357641f63/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A= -golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1860,8 +1861,8 @@ golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1883,8 +1884,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1981,14 +1982,14 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= -golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1999,8 +2000,8 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2143,8 +2144,12 @@ google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= -google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b h1:+YaDE2r2OG8t/z5qmsh7Y+XXwCbvadxxZ0YY6mTdrVA= +google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:CgAqfJo+Xmu0GwA0411Ht3OU3OntXwsGmrmjI8ioGXI= +google.golang.org/genproto/googleapis/api v0.0.0-20231012201019-e917dd12ba7a h1:myvhA4is3vrit1a6NZCWBIwN0kNEnX21DJOJX/NvIfI= +google.golang.org/genproto/googleapis/api v0.0.0-20231012201019-e917dd12ba7a/go.mod h1:SUBoKXbI1Efip18FClrQVGjWcyd0QZd8KkvdP34t7ww= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405 h1:AB/lmRny7e2pLhFEYIbl5qkDAUt2h0ZRO4wGPhZf+ik= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405/go.mod h1:67X1fPuzjcrkymZzZV1vvkFeTn2Rvc6lYF9MYFGCcwE= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= @@ -2176,8 +2181,8 @@ google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9K google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc= -google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= +google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= +google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -2192,8 +2197,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= -google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=