diff --git a/arbnode/api.go b/arbnode/api.go index 55dc92434f..c37dc90a32 100644 --- a/arbnode/api.go +++ b/arbnode/api.go @@ -7,7 +7,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/staker" @@ -58,7 +58,7 @@ func (a *BlockValidatorDebugAPI) ValidateMessageNumber( return result, err } -func (a *BlockValidatorDebugAPI) ValidationInputsAt(ctx context.Context, msgNum hexutil.Uint64, target ethdb.WasmTarget, +func (a *BlockValidatorDebugAPI) ValidationInputsAt(ctx context.Context, msgNum hexutil.Uint64, target rawdb.WasmTarget, ) (server_api.InputJSON, error) { return a.val.ValidationInputsAt(ctx, arbutil.MessageIndex(msgNum), target) } diff --git a/arbos/block_processor.go b/arbos/block_processor.go index a06034f905..66072b8e11 100644 --- a/arbos/block_processor.go +++ b/arbos/block_processor.go @@ -146,7 +146,7 @@ func ProduceBlock( chainContext core.ChainContext, chainConfig *params.ChainConfig, isMsgForPrefetch bool, - runMode core.MessageRunMode, + runCtx *core.MessageRunContext, ) (*types.Block, types.Receipts, error) { txes, err := ParseL2Transactions(message, chainConfig.ChainID) if err != nil { @@ -156,7 +156,7 @@ func ProduceBlock( hooks := NoopSequencingHooks() return ProduceBlockAdvanced( - message.Header, txes, delayedMessagesRead, lastBlockHeader, statedb, chainContext, chainConfig, hooks, isMsgForPrefetch, runMode, + message.Header, txes, delayedMessagesRead, lastBlockHeader, statedb, chainContext, chainConfig, hooks, isMsgForPrefetch, runCtx, ) } @@ -171,7 +171,7 @@ func ProduceBlockAdvanced( chainConfig *params.ChainConfig, sequencingHooks *SequencingHooks, isMsgForPrefetch bool, - runMode core.MessageRunMode, + runCtx *core.MessageRunContext, ) (*types.Block, types.Receipts, error) { arbState, err := arbosState.OpenSystemArbosState(statedb, nil, true) @@ -322,7 +322,7 @@ func ProduceBlockAdvanced( tx, &header.GasUsed, vm.Config{}, - runMode, + runCtx, func(result *core.ExecutionResult) error { return hooks.PostTxFilter(header, statedb, arbState, tx, sender, dataGas, result) }, diff --git a/arbos/l1pricing/l1pricing.go b/arbos/l1pricing/l1pricing.go index 195df3708c..65ed2cec51 100644 --- a/arbos/l1pricing/l1pricing.go +++ b/arbos/l1pricing/l1pricing.go @@ -562,7 +562,7 @@ func makeFakeTxForMessage(message *core.Message) *types.Transaction { } // During gas estimation, we don't want the gas limit variability to change the L1 cost. gas := message.GasLimit - if gas == 0 || message.TxRunMode == core.MessageGasEstimationMode { + if gas == 0 || message.TxRunContext.IsGasEstimation() { gas = RandomGas } return types.NewTx(&types.DynamicFeeTx{ diff --git a/arbos/programs/native.go b/arbos/programs/native.go index 5995d9dafe..dffb17a709 100644 --- a/arbos/programs/native.go +++ b/arbos/programs/native.go @@ -28,7 +28,6 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" @@ -71,10 +70,10 @@ func activateProgram( arbosVersionForGas uint64, debug bool, burner burn.Burner, + runCtx *core.MessageRunContext, ) (*activationInfo, error) { - targets := db.Database().WasmTargets() moduleActivationMandatory := true - info, asmMap, err := activateProgramInternal(program, codehash, wasm, page_limit, stylusVersion, arbosVersionForGas, debug, burner.GasLeft(), targets, moduleActivationMandatory) + info, asmMap, err := activateProgramInternal(program, codehash, wasm, page_limit, stylusVersion, arbosVersionForGas, debug, burner.GasLeft(), runCtx, moduleActivationMandatory) if err != nil { return nil, err } @@ -134,7 +133,7 @@ func compileNative( wasm []byte, stylusVersion uint16, debug bool, - target ethdb.WasmTarget, + target rawdb.WasmTarget, ) ([]byte, error) { output := &rustBytes{} status_asm := C.stylus_compile( @@ -160,11 +159,12 @@ func activateProgramInternal( arbosVersionForGas uint64, debug bool, gasLeft *uint64, - targets []ethdb.WasmTarget, + runCtx *core.MessageRunContext, moduleActivationMandatory bool, -) (*activationInfo, map[ethdb.WasmTarget][]byte, error) { +) (*activationInfo, map[rawdb.WasmTarget][]byte, error) { + targets := runCtx.WasmTargets() var wavmFound bool - var nativeTargets []ethdb.WasmTarget + var nativeTargets []rawdb.WasmTarget for _, target := range targets { if target == rawdb.TargetWavm { wavmFound = true @@ -173,14 +173,14 @@ func activateProgramInternal( } } type result struct { - target ethdb.WasmTarget + target rawdb.WasmTarget asm []byte err error } results := make(chan result) // info will be set in separate thread, make sure to wait before reading var info *activationInfo - asmMap := make(map[ethdb.WasmTarget][]byte, len(nativeTargets)+1) + asmMap := make(map[rawdb.WasmTarget][]byte, len(nativeTargets)+1) if moduleActivationMandatory || wavmFound { go func() { var err error @@ -233,7 +233,7 @@ func activateProgramInternal( return info, asmMap, err } -func getLocalAsm(statedb vm.StateDB, moduleHash common.Hash, addressForLogging common.Address, code []byte, codehash common.Hash, pagelimit uint16, time uint64, debugMode bool, program Program) ([]byte, error) { +func getLocalAsm(statedb vm.StateDB, moduleHash common.Hash, addressForLogging common.Address, code []byte, codehash common.Hash, pagelimit uint16, time uint64, debugMode bool, program Program, runCtx *core.MessageRunContext) ([]byte, error) { localTarget := rawdb.LocalTarget() localAsm, err := statedb.TryGetActivatedAsm(localTarget, moduleHash) if err == nil && len(localAsm) > 0 { @@ -251,10 +251,9 @@ func getLocalAsm(statedb vm.StateDB, moduleHash common.Hash, addressForLogging c zeroArbosVersion := uint64(0) zeroGas := uint64(0) - targets := statedb.Database().WasmTargets() // we know program is activated, so it must be in correct version and not use too much memory moduleActivationMandatory := false - info, asmMap, err := activateProgramInternal(addressForLogging, codehash, wasm, pagelimit, program.version, zeroArbosVersion, debugMode, &zeroGas, targets, moduleActivationMandatory) + info, asmMap, err := activateProgramInternal(addressForLogging, codehash, wasm, pagelimit, program.version, zeroArbosVersion, debugMode, &zeroGas, runCtx, moduleActivationMandatory) if err != nil { log.Error("failed to reactivate program", "address", addressForLogging, "expected moduleHash", moduleHash, "err", err) return nil, fmt.Errorf("failed to reactivate program address: %v err: %w", addressForLogging, err) @@ -281,7 +280,7 @@ func getLocalAsm(statedb vm.StateDB, moduleHash common.Hash, addressForLogging c } asm, exists := asmMap[localTarget] if !exists { - var availableTargets []ethdb.WasmTarget + var availableTargets []rawdb.WasmTarget for target := range asmMap { availableTargets = append(availableTargets, target) } @@ -302,7 +301,7 @@ func callProgram( evmData *EvmData, stylusParams *ProgParams, memoryModel *MemoryModel, - arbos_tag uint32, + runCtx *core.MessageRunContext, ) ([]byte, error) { db := interpreter.Evm().StateDB debug := stylusParams.DebugMode @@ -313,7 +312,7 @@ func callProgram( } if stateDb, ok := db.(*state.StateDB); ok { - stateDb.RecordProgram(db.Database().WasmTargets(), moduleHash) + stateDb.RecordProgram(runCtx.WasmTargets(), moduleHash) } evmApi := newApi(interpreter, tracingInfo, scope, memoryModel) @@ -329,7 +328,7 @@ func callProgram( cbool(debug), output, (*u64)(&scope.Contract.Gas), - u32(arbos_tag), + u32(runCtx.WasmCacheTag()), )) depth := interpreter.Depth() @@ -356,14 +355,14 @@ func handleReqImpl(apiId usize, req_type u32, data *rustSlice, costPtr *u64, out // Caches a program in Rust. We write a record so that we can undo on revert. // For gas estimation and eth_call, we ignore permanent updates and rely on Rust's LRU. -func cacheProgram(db vm.StateDB, module common.Hash, program Program, addressForLogging common.Address, code []byte, codehash common.Hash, params *StylusParams, debug bool, time uint64, runMode core.MessageRunMode) { - if runMode == core.MessageCommitMode { +func cacheProgram(db vm.StateDB, module common.Hash, program Program, addressForLogging common.Address, code []byte, codeHash common.Hash, params *StylusParams, debug bool, time uint64, runCtx *core.MessageRunContext) { + if runCtx.IsCommitMode() { // address is only used for logging - asm, err := getLocalAsm(db, module, addressForLogging, code, codehash, params.PageLimit, time, debug, program) + asm, err := getLocalAsm(db, module, addressForLogging, code, codeHash, params.PageLimit, time, debug, program, runCtx) if err != nil { panic("unable to recreate wasm") } - tag := db.Database().WasmCacheTag() + tag := runCtx.WasmCacheTag() state.CacheWasmRust(asm, module, program.version, tag, debug) db.RecordCacheWasm(state.CacheWasm{ModuleHash: module, Version: program.version, Tag: tag, Debug: debug}) } @@ -371,9 +370,9 @@ func cacheProgram(db vm.StateDB, module common.Hash, program Program, addressFor // Evicts a program in Rust. We write a record so that we can undo on revert, unless we don't need to (e.g. expired) // For gas estimation and eth_call, we ignore permanent updates and rely on Rust's LRU. -func evictProgram(db vm.StateDB, module common.Hash, version uint16, debug bool, runMode core.MessageRunMode, forever bool) { - if runMode == core.MessageCommitMode { - tag := db.Database().WasmCacheTag() +func evictProgram(db vm.StateDB, module common.Hash, version uint16, debug bool, runCtx *core.MessageRunContext, forever bool) { + if runCtx.IsCommitMode() { + tag := runCtx.WasmCacheTag() state.EvictWasmRust(module, version, tag, debug) if !forever { db.RecordEvictWasm(state.EvictWasm{ModuleHash: module, Version: version, Tag: tag, Debug: debug}) @@ -463,7 +462,7 @@ func GetEntrySizeEstimateBytes(module []byte, version uint16, debug bool) uint64 const DefaultTargetDescriptionArm = "arm64-linux-unknown+neon" const DefaultTargetDescriptionX86 = "x86_64-linux-unknown+sse4.2+lzcnt+bmi" -func SetTarget(name ethdb.WasmTarget, description string, native bool) error { +func SetTarget(name rawdb.WasmTarget, description string, native bool) error { output := &rustBytes{} status := userStatus(C.stylus_target_set( goSlice([]byte(name)), diff --git a/arbos/programs/programs.go b/arbos/programs/programs.go index c2fc1f68ad..0f3b52c43d 100644 --- a/arbos/programs/programs.go +++ b/arbos/programs/programs.go @@ -84,7 +84,7 @@ func (p Programs) CacheManagers() *addressSet.AddressSet { return p.cacheManagers } -func (p Programs) ActivateProgram(evm *vm.EVM, address common.Address, arbosVersion uint64, runMode core.MessageRunMode, debugMode bool) ( +func (p Programs) ActivateProgram(evm *vm.EVM, address common.Address, arbosVersion uint64, runCtx *core.MessageRunContext, debugMode bool) ( uint16, common.Hash, common.Hash, *big.Int, bool, error, ) { statedb := evm.StateDB @@ -118,7 +118,7 @@ func (p Programs) ActivateProgram(evm *vm.EVM, address common.Address, arbosVers // require the program's footprint not exceed the remaining memory budget pageLimit := am.SaturatingUSub(params.PageLimit, statedb.GetStylusPagesOpen()) - info, err := activateProgram(statedb, address, codeHash, wasm, pageLimit, stylusVersion, arbosVersion, debugMode, burner) + info, err := activateProgram(statedb, address, codeHash, wasm, pageLimit, stylusVersion, arbosVersion, debugMode, burner, runCtx) if err != nil { return 0, codeHash, common.Hash{}, nil, true, err } @@ -130,7 +130,7 @@ func (p Programs) ActivateProgram(evm *vm.EVM, address common.Address, arbosVers return 0, codeHash, common.Hash{}, nil, true, err } - evictProgram(statedb, oldModuleHash, currentVersion, debugMode, runMode, expired) + evictProgram(statedb, oldModuleHash, currentVersion, debugMode, runCtx, expired) } if err := p.moduleHashes.Set(codeHash, info.moduleHash); err != nil { return 0, codeHash, common.Hash{}, nil, true, err @@ -158,27 +158,12 @@ func (p Programs) ActivateProgram(evm *vm.EVM, address common.Address, arbosVers // replace the cached asm if cached { code := statedb.GetCode(address) - cacheProgram(statedb, info.moduleHash, programData, address, code, codeHash, params, debugMode, time, runMode) + cacheProgram(statedb, info.moduleHash, programData, address, code, codeHash, params, debugMode, time, runCtx) } return stylusVersion, codeHash, info.moduleHash, dataFee, false, p.setProgram(codeHash, programData) } -func runModeToString(runMode core.MessageRunMode) string { - switch runMode { - case core.MessageCommitMode: - return "commit_runmode" - case core.MessageGasEstimationMode: - return "gas_estimation_runmode" - case core.MessageEthcallMode: - return "eth_call_runmode" - case core.MessageReplayMode: - return "replay_runmode" - default: - return "unknown_runmode" - } -} - func (p Programs) CallProgram( scope *vm.ScopeContext, statedb vm.StateDB, @@ -187,7 +172,7 @@ func (p Programs) CallProgram( tracingInfo *util.TracingInfo, calldata []byte, reentrant bool, - runMode core.MessageRunMode, + runCtx *core.MessageRunContext, ) ([]byte, error) { evm := interpreter.Evm() contract := scope.Contract @@ -233,7 +218,7 @@ func (p Programs) CallProgram( statedb.AddStylusPages(program.footprint) defer statedb.SetStylusPagesOpen(open) - localAsm, err := getLocalAsm(statedb, moduleHash, contract.Address(), contract.Code, contract.CodeHash, params.PageLimit, evm.Context.Time, debugMode, program) + localAsm, err := getLocalAsm(statedb, moduleHash, contract.Address(), contract.Code, contract.CodeHash, params.PageLimit, evm.Context.Time, debugMode, program, runCtx) if err != nil { panic("failed to get local wasm for activated program: " + contract.Address().Hex()) } @@ -261,27 +246,22 @@ func (p Programs) CallProgram( if contract.CodeAddr != nil { address = *contract.CodeAddr } - var arbos_tag uint32 - if runMode == core.MessageCommitMode { - arbos_tag = statedb.Database().WasmCacheTag() - } - - metrics.GetOrRegisterCounter(fmt.Sprintf("arb/arbos/stylus/program_calls/%s", runModeToString(runMode)), nil).Inc(1) - ret, err := callProgram(address, moduleHash, localAsm, scope, interpreter, tracingInfo, calldata, evmData, goParams, model, arbos_tag) + metrics.GetOrRegisterCounter(fmt.Sprintf("arb/arbos/stylus/program_calls/%s", runCtx.RunModeMetricName()), nil).Inc(1) + ret, err := callProgram(address, moduleHash, localAsm, scope, interpreter, tracingInfo, calldata, evmData, goParams, model, runCtx) if len(ret) > 0 && arbosVersion >= gethParams.ArbosVersion_StylusFixes { // Ensure that return data costs as least as much as it would in the EVM. evmCost := evmMemoryCost(uint64(len(ret))) if startingGas < evmCost { contract.Gas = 0 // #nosec G115 - metrics.GetOrRegisterCounter(fmt.Sprintf("arb/arbos/stylus/gas_used/%s", runModeToString(runMode)), nil).Inc(int64(startingGas)) + metrics.GetOrRegisterCounter(fmt.Sprintf("arb/arbos/stylus/gas_used/%s", runCtx.RunModeMetricName()), nil).Inc(int64(startingGas)) return nil, vm.ErrOutOfGas } maxGasToReturn := startingGas - evmCost contract.Gas = am.MinInt(contract.Gas, maxGasToReturn) } // #nosec G115 - metrics.GetOrRegisterCounter(fmt.Sprintf("arb/arbos/stylus/gas_used/%s", runModeToString(runMode)), nil).Inc(int64(startingGas - contract.Gas)) + metrics.GetOrRegisterCounter(fmt.Sprintf("arb/arbos/stylus/gas_used/%s", runCtx.RunModeMetricName()), nil).Inc(int64(startingGas - contract.Gas)) return ret, err } @@ -419,7 +399,7 @@ func (p Programs) SetProgramCached( cache bool, time uint64, params *StylusParams, - runMode core.MessageRunMode, + runCtx *core.MessageRunContext, debug bool, ) error { program, err := p.getProgram(codeHash, time) @@ -455,9 +435,9 @@ func (p Programs) SetProgramCached( if err != nil { return err } - cacheProgram(db, moduleHash, program, address, code, codeHash, params, debug, time, runMode) + cacheProgram(db, moduleHash, program, address, code, codeHash, params, debug, time, runCtx) } else { - evictProgram(db, moduleHash, program.version, debug, runMode, expired) + evictProgram(db, moduleHash, program.version, debug, runCtx, expired) } program.cached = cache return p.setProgram(codeHash, program) diff --git a/arbos/programs/wasm.go b/arbos/programs/wasm.go index 12c23a724c..cf91a9c697 100644 --- a/arbos/programs/wasm.go +++ b/arbos/programs/wasm.go @@ -64,6 +64,7 @@ func activateProgram( arbosVersion uint64, debug bool, burner burn.Burner, + runCtx *core.MessageRunContext, ) (*activationInfo, error) { errBuf := make([]byte, 1024) debugMode := arbmath.BoolToUint32(debug) @@ -98,9 +99,9 @@ func activateProgram( } // stub any non-consensus, Rust-side caching updates -func cacheProgram(db vm.StateDB, module common.Hash, program Program, addressForLogging common.Address, code []byte, codeHash common.Hash, params *StylusParams, debug bool, time uint64, runMode core.MessageRunMode) { +func cacheProgram(db vm.StateDB, module common.Hash, program Program, addressForLogging common.Address, code []byte, codeHash common.Hash, params *StylusParams, debug bool, time uint64, runCtx *core.MessageRunContext) { } -func evictProgram(db vm.StateDB, module common.Hash, version uint16, debug bool, mode core.MessageRunMode, forever bool) { +func evictProgram(db vm.StateDB, module common.Hash, version uint16, debug bool, runCtx *core.MessageRunContext, forever bool) { } //go:wasmimport programs new_program @@ -131,7 +132,7 @@ func startProgram(module uint32) uint32 //go:wasmimport programs send_response func sendResponse(req_id uint32) uint32 -func getLocalAsm(statedb vm.StateDB, moduleHash common.Hash, addressForLogging common.Address, code []byte, codeHash common.Hash, pagelimit uint16, time uint64, debugMode bool, program Program) ([]byte, error) { +func getLocalAsm(statedb vm.StateDB, moduleHash common.Hash, addressForLogging common.Address, code []byte, codeHash common.Hash, pagelimit uint16, time uint64, debugMode bool, program Program, runCtx *core.MessageRunContext) ([]byte, error) { return nil, nil } @@ -146,7 +147,7 @@ func callProgram( evmData *EvmData, params *ProgParams, memoryModel *MemoryModel, - _arbos_tag uint32, + runCtx *core.MessageRunContext, ) ([]byte, error) { reqHandler := newApiClosures(interpreter, tracingInfo, scope, memoryModel) gasLeft, retData, err := CallProgramLoop(moduleHash, calldata, scope.Contract.Gas, evmData, params, reqHandler) diff --git a/arbos/programs/wasmstorehelper.go b/arbos/programs/wasmstorehelper.go index 1393752b72..091a534813 100644 --- a/arbos/programs/wasmstorehelper.go +++ b/arbos/programs/wasmstorehelper.go @@ -10,13 +10,14 @@ import ( "fmt" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/log" ) // SaveActiveProgramToWasmStore is used to save active stylus programs to wasm store during rebuilding -func (p Programs) SaveActiveProgramToWasmStore(statedb *state.StateDB, codeHash common.Hash, code []byte, time uint64, debugMode bool, rebuildingStartBlockTime uint64) error { +func (p Programs) SaveActiveProgramToWasmStore(statedb *state.StateDB, codeHash common.Hash, code []byte, time uint64, debugMode bool, rebuildingStartBlockTime uint64, targets []rawdb.WasmTarget) error { progParams, err := p.Params() if err != nil { return err @@ -43,7 +44,6 @@ func (p Programs) SaveActiveProgramToWasmStore(statedb *state.StateDB, codeHash return err } - targets := statedb.Database().WasmTargets() // If already in wasm store then return early _, err = statedb.TryGetActivatedAsmMap(targets, moduleHash) if err == nil { @@ -63,13 +63,13 @@ func (p Programs) SaveActiveProgramToWasmStore(statedb *state.StateDB, codeHash // We know program is activated, so it must be in correct version and not use too much memory // Empty program address is supplied because we dont have access to this during rebuilding of wasm store moduleActivationMandatory := false - info, asmMap, err := activateProgramInternal(common.Address{}, codeHash, wasm, progParams.PageLimit, program.version, zeroArbosVersion, debugMode, &zeroGas, targets, moduleActivationMandatory) + info, asmMap, err := activateProgramInternal(common.Address{}, codeHash, wasm, progParams.PageLimit, program.version, zeroArbosVersion, debugMode, &zeroGas, core.NewMessageReplayContext(targets), moduleActivationMandatory) if err != nil { log.Error("failed to reactivate program while rebuilding wasm store", "expected moduleHash", moduleHash, "err", err) return fmt.Errorf("failed to reactivate program while rebuilding wasm store: %w", err) } - if info.moduleHash != moduleHash { + if info != nil && info.moduleHash != moduleHash { log.Error("failed to reactivate program while rebuilding wasm store", "expected moduleHash", moduleHash, "got", info.moduleHash) return fmt.Errorf("failed to reactivate program while rebuilding wasm store, expected ModuleHash: %v", moduleHash) } diff --git a/arbos/tx_processor.go b/arbos/tx_processor.go index 7cebd8da37..945cf209f7 100644 --- a/arbos/tx_processor.go +++ b/arbos/tx_processor.go @@ -126,7 +126,7 @@ func (p *TxProcessor) ExecuteWASM(scope *vm.ScopeContext, input []byte, interpre tracingInfo, input, reentrant, - p.RunMode(), + p.RunContext(), ) } @@ -403,8 +403,8 @@ func (p *TxProcessor) StartTxHook() (endTxNow bool, gasUsed uint64, err error, r return false, 0, nil, nil } -func GetPosterGas(state *arbosState.ArbosState, baseFee *big.Int, runMode core.MessageRunMode, posterCost *big.Int) uint64 { - if runMode == core.MessageGasEstimationMode { +func GetPosterGas(state *arbosState.ArbosState, baseFee *big.Int, runCtx *core.MessageRunContext, posterCost *big.Int) uint64 { + if runCtx.IsGasEstimation() { // Suggest the amount of gas needed for a given amount of ETH is higher in case of congestion. // This will help the user pad the total they'll pay in case the price rises a bit. // Note, reducing the poster cost will increase share the network fee gets, not reduce the total. @@ -439,13 +439,13 @@ func (p *TxProcessor) GasChargingHook(gasRemaining *uint64) (common.Address, err } var poster common.Address - if !p.msg.TxRunMode.ExecutedOnChain() { + if !p.msg.TxRunContext.IsExecutedOnChain() { poster = l1pricing.BatchPosterAddress } else { poster = p.evm.Context.Coinbase } - if p.msg.TxRunMode.ExecutedOnChain() { + if p.msg.TxRunContext.IsExecutedOnChain() { p.msg.SkipL1Charging = false } if basefee.Sign() > 0 && !p.msg.SkipL1Charging { @@ -460,7 +460,7 @@ func (p *TxProcessor) GasChargingHook(gasRemaining *uint64) (common.Address, err if calldataUnits > 0 { p.state.Restrict(p.state.L1PricingState().AddToUnitsSinceUpdate(calldataUnits)) } - p.posterGas = GetPosterGas(p.state, basefee, p.msg.TxRunMode, posterCost) + p.posterGas = GetPosterGas(p.state, basefee, p.msg.TxRunContext, posterCost) p.PosterFee = arbmath.BigMulByUint(basefee, p.posterGas) // round down gasNeededToStartEVM = p.posterGas } @@ -471,7 +471,7 @@ func (p *TxProcessor) GasChargingHook(gasRemaining *uint64) (common.Address, err } *gasRemaining -= gasNeededToStartEVM - if p.msg.TxRunMode != core.MessageEthcallMode { + if !p.msg.TxRunContext.IsEthcall() { // If this is a real tx, limit the amount of computed based on the gas pool. // We do this by charging extra gas, and then refunding it later. gasAvailable, _ := p.state.L2PricingState().PerBlockGasLimit() @@ -483,8 +483,8 @@ func (p *TxProcessor) GasChargingHook(gasRemaining *uint64) (common.Address, err return tipReceipient, nil } -func (p *TxProcessor) RunMode() core.MessageRunMode { - return p.msg.TxRunMode +func (p *TxProcessor) RunContext() *core.MessageRunContext { + return p.msg.TxRunContext } func (p *TxProcessor) NonrefundableGas() uint64 { @@ -512,7 +512,7 @@ func (p *TxProcessor) EndTxHook(gasLeft uint64, success bool) { if underlyingTx != nil && underlyingTx.Type() == types.ArbitrumRetryTxType { inner, _ := underlyingTx.GetInner().(*types.ArbitrumRetryTx) effectiveBaseFee := inner.GasFeeCap - if p.msg.TxRunMode.ExecutedOnChain() && !arbmath.BigEquals(effectiveBaseFee, p.evm.Context.BaseFee) { + if p.msg.TxRunContext.IsExecutedOnChain() && !arbmath.BigEquals(effectiveBaseFee, p.evm.Context.BaseFee) { log.Error( "ArbitrumRetryTx GasFeeCap doesn't match basefee in commit mode", "txHash", underlyingTx.Hash(), @@ -776,6 +776,5 @@ func (p *TxProcessor) MsgIsNonMutating() bool { if p.msg == nil { return false } - mode := p.msg.TxRunMode - return mode == core.MessageGasEstimationMode || mode == core.MessageEthcallMode + return p.msg.TxRunContext.IsNonMutating() } diff --git a/cmd/nitro/init.go b/cmd/nitro/init.go index 93c51a0040..d0bce84df6 100644 --- a/cmd/nitro/init.go +++ b/cmd/nitro/init.go @@ -566,7 +566,7 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo if err := dbutil.UnfinishedConversionCheck(wasmDb); err != nil { return nil, nil, fmt.Errorf("wasm unfinished database conversion check error: %w", err) } - chainDb := rawdb.WrapDatabaseWithWasm(chainData, wasmDb, 1, targetConfig.WasmTargets()) + chainDb := rawdb.WrapDatabaseWithWasm(chainData, wasmDb) _, err = rawdb.ParseStateScheme(cacheConfig.StateScheme, chainDb) if err != nil { return nil, nil, err @@ -641,7 +641,7 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo if err := validateOrUpgradeWasmStoreSchemaVersion(wasmDb); err != nil { return nil, nil, err } - chainDb := rawdb.WrapDatabaseWithWasm(chainData, wasmDb, 1, targetConfig.WasmTargets()) + chainDb := rawdb.WrapDatabaseWithWasm(chainData, wasmDb) _, err = rawdb.ParseStateScheme(cacheConfig.StateScheme, chainDb) if err != nil { return nil, nil, err diff --git a/cmd/replay/main.go b/cmd/replay/main.go index 661040ea10..02b60cca2d 100644 --- a/cmd/replay/main.go +++ b/cmd/replay/main.go @@ -292,7 +292,7 @@ func main() { message := readMessage(chainConfig.ArbitrumChainParams.DataAvailabilityCommittee) chainContext := WavmChainContext{} - newBlock, _, err = arbos.ProduceBlock(message.Message, message.DelayedMessagesRead, lastBlockHeader, statedb, chainContext, chainConfig, false, core.MessageReplayMode) + newBlock, _, err = arbos.ProduceBlock(message.Message, message.DelayedMessagesRead, lastBlockHeader, statedb, chainContext, chainConfig, false, core.NewMessageReplayContext([]rawdb.WasmTarget{rawdb.LocalTarget()})) if err != nil { panic(err) } diff --git a/execution/gethexec/block_recorder.go b/execution/gethexec/block_recorder.go index 2e3d51fec9..4a4bff6dad 100644 --- a/execution/gethexec/block_recorder.go +++ b/execution/gethexec/block_recorder.go @@ -159,7 +159,7 @@ func (r *BlockRecorder) RecordBlockCreation( chaincontext, chainConfig, false, - core.MessageReplayMode, + core.NewMessageReplayContext(r.execEngine.wasmTargets), ) if err != nil { return nil, err diff --git a/execution/gethexec/executionengine.go b/execution/gethexec/executionengine.go index e606027419..17978d1ab1 100644 --- a/execution/gethexec/executionengine.go +++ b/execution/gethexec/executionengine.go @@ -97,6 +97,8 @@ type ExecutionEngine struct { prefetchBlock bool cachedL1PriceData *L1PriceData + + wasmTargets []rawdb.WasmTarget } func NewL1PriceData() *L1PriceData { @@ -196,6 +198,7 @@ func (s *ExecutionEngine) Initialize(rustCacheCapacityMB uint32, targetConfig *S if err := PopulateStylusTargetCache(targetConfig); err != nil { return fmt.Errorf("error populating stylus target cache: %w", err) } + s.wasmTargets = targetConfig.WasmTargets() return nil } @@ -274,7 +277,7 @@ func (s *ExecutionEngine) Reorg(count arbutil.MessageIndex, newMessages []arbost return nil, nil } - tag := s.bc.StateCache().WasmCacheTag() + tag := core.NewMessageCommitContext(nil).WasmCacheTag() // we don't pass any targets, we just want the tag // reorg Rust-side VM state C.stylus_reorg_vm(C.uint64_t(blockNum), C.uint32_t(tag)) @@ -524,7 +527,7 @@ func (s *ExecutionEngine) sequenceTransactionsWithBlockMutex(header *arbostypes. s.bc.Config(), hooks, false, - core.MessageCommitMode, + core.NewMessageCommitContext(s.wasmTargets), ) if err != nil { return nil, err @@ -683,9 +686,11 @@ func (s *ExecutionEngine) createBlockFromNextMessage(msg *arbostypes.MessageWith statedb.StartPrefetcher("TransactionStreamer") defer statedb.StopPrefetcher() - runMode := core.MessageCommitMode + var runCtx *core.MessageRunContext if isMsgForPrefetch { - runMode = core.MessageReplayMode + runCtx = core.NewMessagePrefetchContext(s.wasmTargets) + } else { + runCtx = core.NewMessageCommitContext(s.wasmTargets) } block, receipts, err := arbos.ProduceBlock( msg.Message, @@ -695,7 +700,7 @@ func (s *ExecutionEngine) createBlockFromNextMessage(msg *arbostypes.MessageWith s.bc, s.bc.Config(), isMsgForPrefetch, - runMode, + runCtx, ) return block, statedb, receipts, err diff --git a/execution/gethexec/node.go b/execution/gethexec/node.go index 5030de0cfa..d36c8c81e5 100644 --- a/execution/gethexec/node.go +++ b/execution/gethexec/node.go @@ -39,24 +39,24 @@ type StylusTargetConfig struct { Host string `koanf:"host"` ExtraArchs []string `koanf:"extra-archs"` - wasmTargets []ethdb.WasmTarget + wasmTargets []rawdb.WasmTarget } -func (c *StylusTargetConfig) WasmTargets() []ethdb.WasmTarget { +func (c *StylusTargetConfig) WasmTargets() []rawdb.WasmTarget { return c.wasmTargets } func (c *StylusTargetConfig) Validate() error { - targetsSet := make(map[ethdb.WasmTarget]bool, len(c.ExtraArchs)) + targetsSet := make(map[rawdb.WasmTarget]bool, len(c.ExtraArchs)) for _, arch := range c.ExtraArchs { - target := ethdb.WasmTarget(arch) + target := rawdb.WasmTarget(arch) if !rawdb.IsSupportedWasmTarget(target) { return fmt.Errorf("unsupported architecture: %v, possible values: %s, %s, %s, %s", arch, rawdb.TargetWavm, rawdb.TargetArm64, rawdb.TargetAmd64, rawdb.TargetHost) } targetsSet[target] = true } targetsSet[rawdb.LocalTarget()] = true - targets := make([]ethdb.WasmTarget, 0, len(c.ExtraArchs)+1) + targets := make([]rawdb.WasmTarget, 0, len(c.ExtraArchs)+1) for target := range targetsSet { targets = append(targets, target) } diff --git a/execution/gethexec/wasmstorerebuilder.go b/execution/gethexec/wasmstorerebuilder.go index b40a7cd128..e16778be85 100644 --- a/execution/gethexec/wasmstorerebuilder.go +++ b/execution/gethexec/wasmstorerebuilder.go @@ -67,6 +67,7 @@ func RebuildWasmStore(ctx context.Context, wasmStore ethdb.KeyValueStore, chainD if err := PopulateStylusTargetCache(targetConfig); err != nil { return fmt.Errorf("error populating stylus target cache: %w", err) } + targets := targetConfig.WasmTargets() latestHeader := l2Blockchain.CurrentBlock() // Attempt to get state at the start block when rebuilding commenced, if not available (in case of non-archival nodes) use latest state @@ -93,7 +94,7 @@ func RebuildWasmStore(ctx context.Context, wasmStore ethdb.KeyValueStore, chainD codeHash := common.BytesToHash(codeHashBytes) code := iter.Value() if state.IsStylusProgram(code) { - if err := programs.SaveActiveProgramToWasmStore(stateDb, codeHash, code, latestHeader.Time, l2Blockchain.Config().DebugMode(), rebuildingStartHeader.Time); err != nil { + if err := programs.SaveActiveProgramToWasmStore(stateDb, codeHash, code, latestHeader.Time, l2Blockchain.Config().DebugMode(), rebuildingStartHeader.Time, targets); err != nil { return fmt.Errorf("error while rebuilding of wasm store, aborting rebuilding: %w", err) } } diff --git a/execution/nodeInterface/NodeInterface.go b/execution/nodeInterface/NodeInterface.go index 189640d5d1..232080c992 100644 --- a/execution/nodeInterface/NodeInterface.go +++ b/execution/nodeInterface/NodeInterface.go @@ -215,7 +215,7 @@ func (n NodeInterface) EstimateRetryableTicket( } // ArbitrumSubmitRetryableTx is unsigned so the following won't panic - msg, err := core.TransactionToMessage(types.NewTx(submitTx), types.NewArbitrumSigner(nil), nil, core.MessageGasEstimationMode) + msg, err := core.TransactionToMessage(types.NewTx(submitTx), types.NewArbitrumSigner(nil), nil, core.NewMessageGasEstimationContext()) if err != nil { return err } @@ -530,7 +530,7 @@ func (n NodeInterface) GasEstimateL1Component( if !ok { return 0, nil, nil, errors.New("failed to cast to stateDB") } - msg := args.ToMessage(evm.Context.BaseFee, randomGas, n.header, sdb, core.MessageEthcallMode) + msg := args.ToMessage(evm.Context.BaseFee, randomGas, n.header, sdb, core.NewMessageEthcallContext()) pricing := c.State.L1PricingState() l1BaseFeeEstimate, err := pricing.PricePerUnit() @@ -590,7 +590,7 @@ func (n NodeInterface) GasEstimateComponents( if !ok { return 0, 0, nil, nil, errors.New("failed to cast to stateDB") } - msg := args.ToMessage(evm.Context.BaseFee, gasCap, n.header, sdb, core.MessageGasEstimationMode) + msg := args.ToMessage(evm.Context.BaseFee, gasCap, n.header, sdb, core.NewMessageGasEstimationContext()) brotliCompressionLevel, err := c.State.BrotliCompressionLevel() if err != nil { return 0, 0, nil, nil, fmt.Errorf("failed to get brotli compression level: %w", err) @@ -607,7 +607,7 @@ func (n NodeInterface) GasEstimateComponents( } // Compute the fee paid for L1 in L2 terms - gasForL1 := arbos.GetPosterGas(c.State, baseFee, core.MessageGasEstimationMode, feeForL1) + gasForL1 := arbos.GetPosterGas(c.State, baseFee, core.NewMessageGasEstimationContext(), feeForL1) return total, gasForL1, baseFee, l1BaseFeeEstimate, nil } diff --git a/execution/nodeInterface/virtual-contracts.go b/execution/nodeInterface/virtual-contracts.go index 5b9f4b3474..e5e5a1b27c 100644 --- a/execution/nodeInterface/virtual-contracts.go +++ b/execution/nodeInterface/virtual-contracts.go @@ -136,7 +136,7 @@ func init() { } posterCost, _ := state.L1PricingState().PosterDataCost(msg, l1pricing.BatchPosterAddress, brotliCompressionLevel) // Use estimate mode because this is used to raise the gas cap, so we don't want to underestimate. - return arbos.GetPosterGas(state, header.BaseFee, core.MessageGasEstimationMode, posterCost), nil + return arbos.GetPosterGas(state, header.BaseFee, core.NewMessageGasEstimationContext(), posterCost), nil } core.GetArbOSSpeedLimitPerSecond = func(statedb *state.StateDB) (uint64, error) { diff --git a/go-ethereum b/go-ethereum index 779b669ac0..74029ae159 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 779b669ac0d0020099a67a1c39fbaf66b901c1a5 +Subproject commit 74029ae159ab6526a9a14be8d4b8ccdc99501647 diff --git a/precompiles/ArbAddressTable_test.go b/precompiles/ArbAddressTable_test.go index 3feaca6279..002892fad9 100644 --- a/precompiles/ArbAddressTable_test.go +++ b/precompiles/ArbAddressTable_test.go @@ -167,9 +167,9 @@ func newMockEVMForTesting() *vm.EVM { return newMockEVMForTestingWithVersion(nil) } -func newMockEVMForTestingWithVersionAndRunMode(version *uint64, runMode core.MessageRunMode) *vm.EVM { +func newMockEVMForTestingWithVersionAndRunMode(version *uint64, runCtx *core.MessageRunContext) *vm.EVM { evm := newMockEVMForTestingWithVersion(version) - evm.ProcessingHook = arbos.NewTxProcessor(evm, &core.Message{TxRunMode: runMode}) + evm.ProcessingHook = arbos.NewTxProcessor(evm, &core.Message{TxRunContext: runCtx}) return evm } diff --git a/precompiles/ArbOwner_test.go b/precompiles/ArbOwner_test.go index 74b29a79b5..8c3df6576f 100644 --- a/precompiles/ArbOwner_test.go +++ b/precompiles/ArbOwner_test.go @@ -174,7 +174,7 @@ func TestArbOwner(t *testing.T) { } func TestArbOwnerSetChainConfig(t *testing.T) { - evm := newMockEVMForTestingWithVersionAndRunMode(nil, core.MessageGasEstimationMode) + evm := newMockEVMForTestingWithVersionAndRunMode(nil, core.NewMessageGasEstimationContext()) caller := common.BytesToAddress(crypto.Keccak256([]byte{})[:20]) tracer := util.NewTracingInfo(evm, testhelpers.RandomAddress(), types.ArbosAddress, util.TracingDuringEVM) state, err := arbosState.OpenArbosState(evm.StateDB, burn.NewSystemBurner(tracer, false)) diff --git a/precompiles/ArbWasm.go b/precompiles/ArbWasm.go index eecca35ce6..94fb8d3fa3 100644 --- a/precompiles/ArbWasm.go +++ b/precompiles/ArbWasm.go @@ -33,7 +33,7 @@ type ArbWasm struct { // Compile a wasm program with the latest instrumentation func (con ArbWasm) ActivateProgram(c ctx, evm mech, value huge, program addr) (uint16, huge, error) { debug := evm.ChainConfig().DebugMode() - runMode := c.txProcessor.RunMode() + runCtx := c.txProcessor.RunContext() programs := c.State.Programs() arbosVersion := c.State.ArbOSVersion() @@ -41,7 +41,7 @@ func (con ArbWasm) ActivateProgram(c ctx, evm mech, value huge, program addr) (u if err := c.Burn(1659168); err != nil { return 0, nil, err } - version, codeHash, moduleHash, dataFee, takeAllGas, err := programs.ActivateProgram(evm, program, arbosVersion, runMode, debug) + version, codeHash, moduleHash, dataFee, takeAllGas, err := programs.ActivateProgram(evm, program, arbosVersion, runCtx, debug) if takeAllGas { _ = c.BurnOut() } diff --git a/precompiles/ArbWasmCache.go b/precompiles/ArbWasmCache.go index 3cada9dd70..8da784b152 100644 --- a/precompiles/ArbWasmCache.go +++ b/precompiles/ArbWasmCache.go @@ -57,12 +57,12 @@ func (con ArbWasmCache) setProgramCached(c ctx, evm mech, address addr, codehash return err } debugMode := evm.ChainConfig().DebugMode() - txRunMode := c.txProcessor.RunMode() + runCtx := c.txProcessor.RunContext() emitEvent := func() error { return con.UpdateProgramCache(c, evm, c.caller, codehash, cached) } return programs.SetProgramCached( - emitEvent, evm.StateDB, codehash, address, cached, evm.Context.Time, params, txRunMode, debugMode, + emitEvent, evm.StateDB, codehash, address, cached, evm.Context.Time, params, runCtx, debugMode, ) } diff --git a/staker/block_validator.go b/staker/block_validator.go index 43e5c7d28f..64ac2f7c74 100644 --- a/staker/block_validator.go +++ b/staker/block_validator.go @@ -529,7 +529,7 @@ func (v *BlockValidator) sendRecord(s *validationStatus) error { //nolint:gosec func (v *BlockValidator) writeToFile(validationEntry *validationEntry) error { - input, err := validationEntry.ToInput([]ethdb.WasmTarget{rawdb.TargetWavm}) + input, err := validationEntry.ToInput([]rawdb.WasmTarget{rawdb.TargetWavm}) if err != nil { return err } diff --git a/staker/bold/bold_state_provider.go b/staker/bold/bold_state_provider.go index 48b7cbd91e..d7ac4f7095 100644 --- a/staker/bold/bold_state_provider.go +++ b/staker/bold/bold_state_provider.go @@ -14,7 +14,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" @@ -365,7 +364,7 @@ func (s *BOLDStateProvider) CollectMachineHashes( if err != nil { return nil, err } - input, err := entry.ToInput([]ethdb.WasmTarget{rawdb.TargetWavm}) + input, err := entry.ToInput([]rawdb.WasmTarget{rawdb.TargetWavm}) if err != nil { return nil, err } @@ -516,7 +515,7 @@ func (s *BOLDStateProvider) CollectProof( if err != nil { return nil, err } - input, err := entry.ToInput([]ethdb.WasmTarget{rawdb.TargetWavm}) + input, err := entry.ToInput([]rawdb.WasmTarget{rawdb.TargetWavm}) if err != nil { return nil, err } diff --git a/staker/legacy/challenge_manager.go b/staker/legacy/challenge_manager.go index 1aa13a9e05..e7010ad413 100644 --- a/staker/legacy/challenge_manager.go +++ b/staker/legacy/challenge_manager.go @@ -16,7 +16,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rpc" @@ -471,7 +470,7 @@ func (m *ChallengeManager) createExecutionBackend(ctx context.Context, step uint if err != nil { return fmt.Errorf("error creating validation entry for challenge %v msg %v for execution challenge: %w", m.challengeIndex, initialCount, err) } - input, err := entry.ToInput([]ethdb.WasmTarget{rawdb.TargetWavm}) + input, err := entry.ToInput([]rawdb.WasmTarget{rawdb.TargetWavm}) if err != nil { return fmt.Errorf("error getting validation entry input of challenge %v msg %v: %w", m.challengeIndex, initialCount, err) } diff --git a/staker/stateless_block_validator.go b/staker/stateless_block_validator.go index 62e772d5f8..3aef069d97 100644 --- a/staker/stateless_block_validator.go +++ b/staker/stateless_block_validator.go @@ -10,6 +10,7 @@ import ( "testing" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" @@ -142,7 +143,7 @@ type validationEntry struct { DelayedMsg []byte } -func (e *validationEntry) ToInput(stylusArchs []ethdb.WasmTarget) (*validator.ValidationInput, error) { +func (e *validationEntry) ToInput(stylusArchs []rawdb.WasmTarget) (*validator.ValidationInput, error) { if e.Stage != Ready { return nil, errors.New("cannot create input from non-ready entry") } @@ -151,7 +152,7 @@ func (e *validationEntry) ToInput(stylusArchs []ethdb.WasmTarget) (*validator.Va HasDelayedMsg: e.HasDelayedMsg, DelayedMsgNr: e.DelayedMsgNr, Preimages: e.Preimages, - UserWasms: make(map[ethdb.WasmTarget]map[common.Hash][]byte, len(e.UserWasms)), + UserWasms: make(map[rawdb.WasmTarget]map[common.Hash][]byte, len(e.UserWasms)), BatchInfo: e.BatchInfo, DelayedMsg: e.DelayedMsg, StartState: e.Start, @@ -527,7 +528,7 @@ func (v *StatelessBlockValidator) ValidateResult( return true, &entry.End, nil } -func (v *StatelessBlockValidator) ValidationInputsAt(ctx context.Context, pos arbutil.MessageIndex, targets ...ethdb.WasmTarget) (server_api.InputJSON, error) { +func (v *StatelessBlockValidator) ValidationInputsAt(ctx context.Context, pos arbutil.MessageIndex, targets ...rawdb.WasmTarget) (server_api.InputJSON, error) { entry, err := v.CreateReadyValidationEntry(ctx, pos) if err != nil { return server_api.InputJSON{}, err diff --git a/system_tests/common_test.go b/system_tests/common_test.go index d3d4b33ab9..7467efaa4b 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -92,13 +92,12 @@ import ( type info = *BlockchainTestInfo type SecondNodeParams struct { - nodeConfig *arbnode.Config - execConfig *gethexec.Config - stackConfig *node.Config - dasConfig *das.DataAvailabilityConfig - initData *statetransfer.ArbosInitializationInfo - addresses *chaininfo.RollupAddresses - wasmCacheTag uint32 + nodeConfig *arbnode.Config + execConfig *gethexec.Config + stackConfig *node.Config + dasConfig *das.DataAvailabilityConfig + initData *statetransfer.ArbosInitializationInfo + addresses *chaininfo.RollupAddresses } type TestClient struct { @@ -257,7 +256,6 @@ type NodeBuilder struct { initMessage *arbostypes.ParsedInitMessage l3InitMessage *arbostypes.ParsedInitMessage withProdConfirmPeriodBlocks bool - wasmCacheTag uint32 delayBufferThreshold uint64 // Created nodes @@ -367,15 +365,6 @@ func (b *NodeBuilder) WithExtraArchs(targets []string) *NodeBuilder { return b } -func (b *NodeBuilder) WithStylusLongTermCache(enabled bool) *NodeBuilder { - if enabled { - b.wasmCacheTag = 1 - } else { - b.wasmCacheTag = 0 - } - return b -} - // WithDelayBuffer sets the delay-buffer threshold, which is the number of blocks the batch-poster // is allowed to delay a batch with a delayed message. // Setting the threshold to zero disabled the delay buffer (default behaviour). @@ -459,8 +448,6 @@ func buildOnParentChain( initMessage *arbostypes.ParsedInitMessage, addresses *chaininfo.RollupAddresses, - - wasmCacheTag uint32, ) *TestClient { if parentChainTestClient == nil { t.Fatal("must build parent chain before building chain") @@ -472,7 +459,7 @@ func buildOnParentChain( var arbDb ethdb.Database var blockchain *core.BlockChain _, chainTestClient.Stack, chainDb, arbDb, blockchain = createNonL1BlockChainWithStackConfig( - t, chainInfo, dataDir, chainConfig, initMessage, stackConfig, execConfig, wasmCacheTag) + t, chainInfo, dataDir, chainConfig, initMessage, stackConfig, execConfig) var sequencerTxOptsPtr *bind.TransactOpts var dataSigner signature.DataSignerFunc @@ -562,8 +549,6 @@ func (b *NodeBuilder) BuildL3OnL2(t *testing.T) func() { b.l3InitMessage, b.l3Addresses, - - b.wasmCacheTag, ) return func() { @@ -592,8 +577,6 @@ func (b *NodeBuilder) BuildL2OnL1(t *testing.T) func() { b.initMessage, b.addresses, - - b.wasmCacheTag, ) return func() { @@ -615,7 +598,7 @@ func (b *NodeBuilder) BuildL2(t *testing.T) func() { var arbDb ethdb.Database var blockchain *core.BlockChain b.L2Info, b.L2.Stack, chainDb, arbDb, blockchain = createL2BlockChain( - t, b.L2Info, b.dataDir, b.chainConfig, b.execConfig, b.wasmCacheTag) + t, b.L2Info, b.dataDir, b.chainConfig, b.execConfig) Require(t, b.execConfig.Validate()) execConfig := b.execConfig @@ -666,7 +649,7 @@ func (b *NodeBuilder) RestartL2Node(t *testing.T) { } b.L2.cleanup() - l2info, stack, chainDb, arbDb, blockchain := createNonL1BlockChainWithStackConfig(t, b.L2Info, b.dataDir, b.chainConfig, b.initMessage, b.l2StackConfig, b.execConfig, b.wasmCacheTag) + l2info, stack, chainDb, arbDb, blockchain := createNonL1BlockChainWithStackConfig(t, b.L2Info, b.dataDir, b.chainConfig, b.initMessage, b.l2StackConfig, b.execConfig) execConfigFetcher := func() *gethexec.Config { return b.execConfig } execNode, err := gethexec.CreateExecutionNode(b.ctx, stack, chainDb, blockchain, nil, execConfigFetcher) @@ -743,7 +726,7 @@ func build2ndNode( testClient := NewTestClient(ctx) testClient.Client, testClient.ConsensusNode = - Create2ndNodeWithConfig(t, ctx, firstNodeTestClient.ConsensusNode, parentChainTestClient.Stack, parentChainInfo, params.initData, params.nodeConfig, params.execConfig, params.stackConfig, valnodeConfig, params.addresses, initMessage, params.wasmCacheTag) + Create2ndNodeWithConfig(t, ctx, firstNodeTestClient.ConsensusNode, parentChainTestClient.Stack, parentChainInfo, params.initData, params.nodeConfig, params.execConfig, params.stackConfig, valnodeConfig, params.addresses, initMessage) testClient.ExecNode = getExecNode(t, testClient.ConsensusNode) testClient.cleanup = func() { testClient.ConsensusNode.StopAndWait() } return testClient, func() { testClient.cleanup() } @@ -1411,13 +1394,13 @@ func deployOnParentChain( } func createL2BlockChain( - t *testing.T, l2info *BlockchainTestInfo, dataDir string, chainConfig *params.ChainConfig, execConfig *gethexec.Config, wasmCacheTag uint32, + t *testing.T, l2info *BlockchainTestInfo, dataDir string, chainConfig *params.ChainConfig, execConfig *gethexec.Config, ) (*BlockchainTestInfo, *node.Node, ethdb.Database, ethdb.Database, *core.BlockChain) { - return createNonL1BlockChainWithStackConfig(t, l2info, dataDir, chainConfig, nil, nil, execConfig, wasmCacheTag) + return createNonL1BlockChainWithStackConfig(t, l2info, dataDir, chainConfig, nil, nil, execConfig) } func createNonL1BlockChainWithStackConfig( - t *testing.T, info *BlockchainTestInfo, dataDir string, chainConfig *params.ChainConfig, initMessage *arbostypes.ParsedInitMessage, stackConfig *node.Config, execConfig *gethexec.Config, wasmCacheTag uint32, + t *testing.T, info *BlockchainTestInfo, dataDir string, chainConfig *params.ChainConfig, initMessage *arbostypes.ParsedInitMessage, stackConfig *node.Config, execConfig *gethexec.Config, ) (*BlockchainTestInfo, *node.Node, ethdb.Database, ethdb.Database, *core.BlockChain) { if info == nil { info = NewArbTestInfo(t, chainConfig.ChainID) @@ -1437,7 +1420,7 @@ func createNonL1BlockChainWithStackConfig( Require(t, err) wasmData, err := stack.OpenDatabaseWithExtraOptions("wasm", 0, 0, "wasm/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions("wasm")) Require(t, err) - chainDb := rawdb.WrapDatabaseWithWasm(chainData, wasmData, wasmCacheTag, execConfig.StylusTarget.WasmTargets()) + chainDb := rawdb.WrapDatabaseWithWasm(chainData, wasmData) arbDb, err := stack.OpenDatabaseWithExtraOptions("arbitrumdata", 0, 0, "arbitrumdata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions("arbitrumdata")) Require(t, err) @@ -1508,7 +1491,6 @@ func Create2ndNodeWithConfig( valnodeConfig *valnode.Config, addresses *chaininfo.RollupAddresses, initMessage *arbostypes.ParsedInitMessage, - wasmCacheTag uint32, ) (*ethclient.Client, *arbnode.Node) { if nodeConfig == nil { nodeConfig = arbnode.ConfigDefaultL1NonSequencerTest() @@ -1532,7 +1514,7 @@ func Create2ndNodeWithConfig( Require(t, err) wasmData, err := chainStack.OpenDatabaseWithExtraOptions("wasm", 0, 0, "wasm/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions("wasm")) Require(t, err) - chainDb := rawdb.WrapDatabaseWithWasm(chainData, wasmData, wasmCacheTag, execConfig.StylusTarget.WasmTargets()) + chainDb := rawdb.WrapDatabaseWithWasm(chainData, wasmData) arbDb, err := chainStack.OpenDatabaseWithExtraOptions("arbitrumdata", 0, 0, "arbitrumdata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions("arbitrumdata")) Require(t, err) @@ -1836,7 +1818,7 @@ var ( // recordBlock writes a json file with all of the data needed to validate a block. // // This can be used as an input to the arbitrator prover to validate a block. -func recordBlock(t *testing.T, block uint64, builder *NodeBuilder, targets ...ethdb.WasmTarget) { +func recordBlock(t *testing.T, block uint64, builder *NodeBuilder, targets ...rawdb.WasmTarget) { t.Helper() flag.Parse() if !*recordBlockInputsEnable { diff --git a/system_tests/program_test.go b/system_tests/program_test.go index 053cfe859d..5b45d8ef6d 100644 --- a/system_tests/program_test.go +++ b/system_tests/program_test.go @@ -2178,14 +2178,14 @@ func readModuleHashes(t *testing.T, wasmDb ethdb.KeyValueStore) []common.Hash { return modules } -func checkWasmStoreContent(t *testing.T, wasmDb ethdb.KeyValueStore, expectedTargets []ethdb.WasmTarget, numModules int) { +func checkWasmStoreContent(t *testing.T, wasmDb ethdb.KeyValueStore, expectedTargets []rawdb.WasmTarget, numModules int) { t.Helper() modules := readModuleHashes(t, wasmDb) if len(modules) != numModules { t.Fatalf("Unexpected number of module hashes found in wasm store, want: %d, have: %d", numModules, len(modules)) } readAsm := func(module common.Hash, target string) []byte { - wasmTarget := ethdb.WasmTarget(target) + wasmTarget := rawdb.WasmTarget(target) if !rawdb.IsSupportedWasmTarget(wasmTarget) { t.Fatalf("internal test error - unsupported target passed to checkWasmStoreContent: %v", target) } @@ -2203,7 +2203,7 @@ func checkWasmStoreContent(t *testing.T, wasmDb ethdb.KeyValueStore, expectedTar for _, target := range allWasmTargets { var expected bool for _, expectedTarget := range expectedTargets { - if ethdb.WasmTarget(target) == expectedTarget { + if rawdb.WasmTarget(target) == expectedTarget { expected = true break } @@ -2355,9 +2355,7 @@ func checkLruCacheMetrics(t *testing.T, expected programs.WasmLruCacheMetrics) { } func TestWasmLongTermCache(t *testing.T) { - builder, ownerAuth, cleanup := setupProgramTest(t, true, func(builder *NodeBuilder) { - builder.WithStylusLongTermCache(true) - }) + builder, ownerAuth, cleanup := setupProgramTest(t, true) ctx := builder.ctx l2info := builder.L2Info l2client := builder.L2.Client @@ -2493,9 +2491,7 @@ func TestWasmLongTermCache(t *testing.T) { } func TestRepopulateWasmLongTermCacheFromLru(t *testing.T) { - builder, ownerAuth, cleanup := setupProgramTest(t, true, func(builder *NodeBuilder) { - builder.WithStylusLongTermCache(true) - }) + builder, ownerAuth, cleanup := setupProgramTest(t, true) ctx := builder.ctx l2info := builder.L2Info l2client := builder.L2.Client diff --git a/system_tests/state_fuzz_test.go b/system_tests/state_fuzz_test.go index 8388e8417c..ee8780c798 100644 --- a/system_tests/state_fuzz_test.go +++ b/system_tests/state_fuzz_test.go @@ -40,7 +40,7 @@ func BuildBlock( chainConfig *params.ChainConfig, inbox arbstate.InboxBackend, seqBatch []byte, - runMode core.MessageRunMode, + runCtx *core.MessageRunContext, ) (*types.Block, error) { var delayedMessagesRead uint64 if lastBlockHeader != nil { @@ -68,7 +68,7 @@ func BuildBlock( } block, _, err := arbos.ProduceBlock( - l1Message, delayedMessagesRead, lastBlockHeader, statedb, chainContext, chainConfig, false, runMode, + l1Message, delayedMessagesRead, lastBlockHeader, statedb, chainContext, chainConfig, false, runCtx, ) return block, err } @@ -132,7 +132,7 @@ func (c noopChainContext) GetHeader(common.Hash, uint64) *types.Header { } func FuzzStateTransition(f *testing.F) { - f.Fuzz(func(t *testing.T, compressSeqMsg bool, seqMsg []byte, delayedMsg []byte, runModeSeed uint8) { + f.Fuzz(func(t *testing.T, compressSeqMsg bool, seqMsg []byte, delayedMsg []byte, runCtxSeed uint8) { if len(seqMsg) > 0 && daprovider.IsL1AuthenticatedMessageHeaderByte(seqMsg[0]) { return } @@ -206,9 +206,21 @@ func FuzzStateTransition(f *testing.F) { positionWithinMessage: 0, delayedMessages: delayedMessages, } - numberOfMessageRunModes := uint8(core.MessageReplayMode) + 1 // TODO update number of run modes when new mode is added - runMode := core.MessageRunMode(runModeSeed % numberOfMessageRunModes) - _, err = BuildBlock(statedb, genesis, noopChainContext{}, chaininfo.ArbitrumOneChainConfig(), inbox, seqBatch, runMode) + runCtxNumber := runCtxSeed % 5 + var runCtx *core.MessageRunContext + switch runCtxNumber { + case 0: + runCtx = core.NewMessageCommitContext(nil) + case 1: + runCtx = core.NewMessageReplayContext(nil) + case 2: + runCtx = core.NewMessagePrefetchContext(nil) + case 3: + runCtx = core.NewMessageEthcallContext() + case 4: + runCtx = core.NewMessageGasEstimationContext() + } + _, err = BuildBlock(statedb, genesis, noopChainContext{}, chaininfo.ArbitrumOneChainConfig(), inbox, seqBatch, runCtx) if err != nil { // With the fixed header it shouldn't be possible to read a delayed message, // and no other type of error should be possible. diff --git a/system_tests/validation_mock_test.go b/system_tests/validation_mock_test.go index 98dab7ad39..5e58cc59f7 100644 --- a/system_tests/validation_mock_test.go +++ b/system_tests/validation_mock_test.go @@ -8,9 +8,9 @@ import ( "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/rpc" @@ -61,8 +61,8 @@ func (s *mockSpawner) WasmModuleRoots() ([]common.Hash, error) { return mockWasmModuleRoots, nil } -func (s *mockSpawner) StylusArchs() []ethdb.WasmTarget { - return []ethdb.WasmTarget{"mock"} +func (s *mockSpawner) StylusArchs() []rawdb.WasmTarget { + return []rawdb.WasmTarget{"mock"} } func (s *mockSpawner) Launch(entry *validator.ValidationInput, moduleRoot common.Hash) validator.ValidationRun { diff --git a/validator/client/redis/producer.go b/validator/client/redis/producer.go index 4bfb721f59..26f1c59d7f 100644 --- a/validator/client/redis/producer.go +++ b/validator/client/redis/producer.go @@ -10,7 +10,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/pubsub" @@ -38,7 +37,7 @@ func (c ValidationClientConfig) Enabled() bool { func (c ValidationClientConfig) Validate() error { for _, arch := range c.StylusArchs { - if !rawdb.IsSupportedWasmTarget(ethdb.WasmTarget(arch)) { + if !rawdb.IsSupportedWasmTarget(rawdb.WasmTarget(arch)) { return fmt.Errorf("Invalid stylus arch: %v", arch) } } @@ -165,10 +164,10 @@ func (c *ValidationClient) Name() string { return c.config.Name } -func (c *ValidationClient) StylusArchs() []ethdb.WasmTarget { - stylusArchs := make([]ethdb.WasmTarget, 0, len(c.config.StylusArchs)) +func (c *ValidationClient) StylusArchs() []rawdb.WasmTarget { + stylusArchs := make([]rawdb.WasmTarget, 0, len(c.config.StylusArchs)) for _, arch := range c.config.StylusArchs { - stylusArchs = append(stylusArchs, ethdb.WasmTarget(arch)) + stylusArchs = append(stylusArchs, rawdb.WasmTarget(arch)) } return stylusArchs } diff --git a/validator/client/validation_client.go b/validator/client/validation_client.go index c04817d654..b393f1243c 100644 --- a/validator/client/validation_client.go +++ b/validator/client/validation_client.go @@ -13,7 +13,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/rpc" @@ -30,7 +29,7 @@ type ValidationClient struct { stopwaiter.StopWaiter client *rpcclient.RpcClient name string - stylusArchs []ethdb.WasmTarget + stylusArchs []rawdb.WasmTarget room atomic.Int32 wasmModuleRoots []common.Hash } @@ -39,7 +38,7 @@ func NewValidationClient(config rpcclient.ClientConfigFetcher, stack *node.Node) return &ValidationClient{ client: rpcclient.NewRpcClient(config, stack), name: "not started", - stylusArchs: []ethdb.WasmTarget{"not started"}, + stylusArchs: []rawdb.WasmTarget{"not started"}, } } @@ -66,20 +65,20 @@ func (c *ValidationClient) Start(ctx context.Context) error { if len(name) == 0 { return errors.New("couldn't read name from server") } - var stylusArchs []ethdb.WasmTarget + var stylusArchs []rawdb.WasmTarget if err := c.client.CallContext(ctx, &stylusArchs, server_api.Namespace+"_stylusArchs"); err != nil { var rpcError rpc.Error ok := errors.As(err, &rpcError) if !ok || rpcError.ErrorCode() != -32601 { return fmt.Errorf("could not read stylus arch from server: %w", err) } - stylusArchs = []ethdb.WasmTarget{ethdb.WasmTarget("pre-stylus")} // invalid, will fail if trying to validate block with stylus + stylusArchs = []rawdb.WasmTarget{rawdb.WasmTarget("pre-stylus")} // invalid, will fail if trying to validate block with stylus } else { if len(stylusArchs) == 0 { return fmt.Errorf("could not read stylus archs from validation server") } for _, stylusArch := range stylusArchs { - if !rawdb.IsSupportedWasmTarget(ethdb.WasmTarget(stylusArch)) && stylusArch != "mock" { + if !rawdb.IsSupportedWasmTarget(rawdb.WasmTarget(stylusArch)) && stylusArch != "mock" { return fmt.Errorf("unsupported stylus architecture: %v", stylusArch) } } @@ -117,11 +116,11 @@ func (c *ValidationClient) WasmModuleRoots() ([]common.Hash, error) { return nil, errors.New("not started") } -func (c *ValidationClient) StylusArchs() []ethdb.WasmTarget { +func (c *ValidationClient) StylusArchs() []rawdb.WasmTarget { if c.Started() { return c.stylusArchs } - return []ethdb.WasmTarget{"not started"} + return []rawdb.WasmTarget{"not started"} } func (c *ValidationClient) Stop() { diff --git a/validator/interface.go b/validator/interface.go index 249cf1b1c3..048a78c236 100644 --- a/validator/interface.go +++ b/validator/interface.go @@ -4,7 +4,7 @@ import ( "context" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/offchainlabs/nitro/util/containers" ) @@ -15,7 +15,7 @@ type ValidationSpawner interface { Start(context.Context) error Stop() Name() string - StylusArchs() []ethdb.WasmTarget + StylusArchs() []rawdb.WasmTarget Room() int } diff --git a/validator/server_api/json.go b/validator/server_api/json.go index f56493cd92..ba812e8830 100644 --- a/validator/server_api/json.go +++ b/validator/server_api/json.go @@ -10,7 +10,7 @@ import ( "fmt" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/offchainlabs/nitro/arbcompress" "github.com/offchainlabs/nitro/arbutil" @@ -63,7 +63,7 @@ type InputJSON struct { BatchInfo []BatchInfoJson DelayedMsgB64 string StartState validator.GoGlobalState - UserWasms map[ethdb.WasmTarget]map[common.Hash]string + UserWasms map[rawdb.WasmTarget]map[common.Hash]string DebugChain bool } @@ -89,7 +89,7 @@ func ValidationInputToJson(entry *validator.ValidationInput) *InputJSON { DelayedMsgB64: base64.StdEncoding.EncodeToString(entry.DelayedMsg), StartState: entry.StartState, PreimagesB64: jsonPreimagesMap, - UserWasms: make(map[ethdb.WasmTarget]map[common.Hash]string), + UserWasms: make(map[rawdb.WasmTarget]map[common.Hash]string), DebugChain: entry.DebugChain, } for _, binfo := range entry.BatchInfo { @@ -121,7 +121,7 @@ func ValidationInputFromJson(entry *InputJSON) (*validator.ValidationInput, erro DelayedMsgNr: entry.DelayedMsgNr, StartState: entry.StartState, Preimages: preimages, - UserWasms: make(map[ethdb.WasmTarget]map[common.Hash][]byte), + UserWasms: make(map[rawdb.WasmTarget]map[common.Hash][]byte), DebugChain: entry.DebugChain, } delayed, err := base64.StdEncoding.DecodeString(entry.DelayedMsgB64) diff --git a/validator/server_arb/validator_spawner.go b/validator/server_arb/validator_spawner.go index 4c74bca695..00e491d73e 100644 --- a/validator/server_arb/validator_spawner.go +++ b/validator/server_arb/validator_spawner.go @@ -12,7 +12,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" @@ -109,8 +108,8 @@ func (s *ArbitratorSpawner) WasmModuleRoots() ([]common.Hash, error) { return s.locator.ModuleRoots(), nil } -func (s *ArbitratorSpawner) StylusArchs() []ethdb.WasmTarget { - return []ethdb.WasmTarget{rawdb.TargetWavm} +func (s *ArbitratorSpawner) StylusArchs() []rawdb.WasmTarget { + return []rawdb.WasmTarget{rawdb.TargetWavm} } func (s *ArbitratorSpawner) Name() string { diff --git a/validator/server_jit/spawner.go b/validator/server_jit/spawner.go index 476a0170a5..7bee92aae2 100644 --- a/validator/server_jit/spawner.go +++ b/validator/server_jit/spawner.go @@ -11,7 +11,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/ethdb" "github.com/offchainlabs/nitro/util/stopwaiter" "github.com/offchainlabs/nitro/validator" @@ -78,8 +77,8 @@ func (v *JitSpawner) WasmModuleRoots() ([]common.Hash, error) { return v.locator.ModuleRoots(), nil } -func (v *JitSpawner) StylusArchs() []ethdb.WasmTarget { - return []ethdb.WasmTarget{rawdb.LocalTarget()} +func (v *JitSpawner) StylusArchs() []rawdb.WasmTarget { + return []rawdb.WasmTarget{rawdb.LocalTarget()} } func (v *JitSpawner) execute( diff --git a/validator/validation_entry.go b/validator/validation_entry.go index 555a4c76c7..06f59c6798 100644 --- a/validator/validation_entry.go +++ b/validator/validation_entry.go @@ -2,7 +2,7 @@ package validator import ( "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/offchainlabs/nitro/arbutil" ) @@ -17,7 +17,7 @@ type ValidationInput struct { HasDelayedMsg bool DelayedMsgNr uint64 Preimages map[arbutil.PreimageType]map[common.Hash][]byte - UserWasms map[ethdb.WasmTarget]map[common.Hash][]byte + UserWasms map[rawdb.WasmTarget]map[common.Hash][]byte BatchInfo []BatchInfo DelayedMsg []byte StartState GoGlobalState diff --git a/validator/valnode/validation_api.go b/validator/valnode/validation_api.go index dab74f6e29..866ea7d686 100644 --- a/validator/valnode/validation_api.go +++ b/validator/valnode/validation_api.go @@ -12,7 +12,7 @@ import ( "time" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/offchainlabs/nitro/util/stopwaiter" "github.com/offchainlabs/nitro/validator" @@ -45,7 +45,7 @@ func (a *ValidationServerAPI) WasmModuleRoots() ([]common.Hash, error) { return a.spawner.WasmModuleRoots() } -func (a *ValidationServerAPI) StylusArchs() ([]ethdb.WasmTarget, error) { +func (a *ValidationServerAPI) StylusArchs() ([]rawdb.WasmTarget, error) { return a.spawner.StylusArchs(), nil }