diff --git a/Cargo.lock b/Cargo.lock
index 67eda682304..e9b84e2a181 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -372,6 +372,7 @@ dependencies = [
  "boa_string",
  "bytemuck",
  "cfg-if",
+ "cow-utils",
  "criterion",
  "dashmap",
  "either",
@@ -438,6 +439,7 @@ dependencies = [
  "boa_parser",
  "boa_runtime",
  "futures-concurrency",
+ "futures-lite 2.5.0",
  "isahc",
  "smol",
  "time",
@@ -921,6 +923,12 @@ dependencies = [
  "libm",
 ]
 
+[[package]]
+name = "cow-utils"
+version = "0.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "417bef24afe1460300965a25ff4a24b8b45ad011948302ec221e8a0a81eb2c79"
+
 [[package]]
 name = "crc32fast"
 version = "1.4.2"
diff --git a/Cargo.toml b/Cargo.toml
index 951f1f0f9a6..fd34e166fd5 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -50,6 +50,7 @@ arbitrary = "1"
 bitflags = "2.5.0"
 clap = "4.5.23"
 colored = "2.2.0"
+cow-utils = "0.1.3"
 fast-float2 = "0.2.3"
 hashbrown = "0.15.2"
 indexmap = { version = "2.7.0", default-features = false }
diff --git a/cli/src/main.rs b/cli/src/main.rs
index 45ece5bf58f..a02527343aa 100644
--- a/cli/src/main.rs
+++ b/cli/src/main.rs
@@ -13,12 +13,12 @@ mod helper;
 use boa_engine::{
     builtins::promise::PromiseState,
     context::ContextBuilder,
-    job::{FutureJob, JobQueue, NativeJob},
+    job::{Job, JobExecutor, NativeAsyncJob, PromiseJob},
     module::{Module, SimpleModuleLoader},
     optimizer::OptimizerOptions,
     script::Script,
     vm::flowgraph::{Direction, Graph},
-    Context, JsError, Source,
+    Context, JsError, JsResult, Source,
 };
 use boa_parser::source::ReadChar;
 use clap::{Parser, ValueEnum, ValueHint};
@@ -292,7 +292,7 @@ fn evaluate_file(
         );
 
         let promise = module.load_link_evaluate(context);
-        context.run_jobs();
+        context.run_jobs().map_err(|err| err.into_erased(context))?;
         let result = promise.state();
 
         return match result {
@@ -308,9 +308,9 @@ fn evaluate_file(
         Ok(v) => println!("{}", v.display()),
         Err(v) => eprintln!("Uncaught {v}"),
     }
-    context.run_jobs();
-
-    Ok(())
+    context
+        .run_jobs()
+        .map_err(|err| err.into_erased(context).into())
 }
 
 fn evaluate_files(args: &Opt, context: &mut Context, loader: &SimpleModuleLoader) {
@@ -336,10 +336,10 @@ fn main() -> Result<()> {
 
     let args = Opt::parse();
 
-    let queue = Rc::new(Jobs::default());
+    let executor = Rc::new(Executor::default());
     let loader = Rc::new(SimpleModuleLoader::new(&args.root).map_err(|e| eyre!(e.to_string()))?);
     let mut context = ContextBuilder::new()
-        .job_queue(queue)
+        .job_executor(executor)
         .module_loader(loader.clone())
         .build()
         .map_err(|e| eyre!(e.to_string()))?;
@@ -425,7 +425,9 @@ fn main() -> Result<()> {
                             eprintln!("{}: {}", "Uncaught".red(), v.to_string().red());
                         }
                     }
-                    context.run_jobs();
+                    if let Err(err) = context.run_jobs() {
+                        eprintln!("{err}");
+                    };
                 }
             }
 
@@ -453,29 +455,45 @@ fn add_runtime(context: &mut Context) {
 }
 
 #[derive(Default)]
-struct Jobs(RefCell<VecDeque<NativeJob>>);
+struct Executor {
+    promise_jobs: RefCell<VecDeque<PromiseJob>>,
+    async_jobs: RefCell<VecDeque<NativeAsyncJob>>,
+}
 
-impl JobQueue for Jobs {
-    fn enqueue_promise_job(&self, job: NativeJob, _: &mut Context) {
-        self.0.borrow_mut().push_back(job);
+impl JobExecutor for Executor {
+    fn enqueue_job(&self, job: Job, _: &mut Context) {
+        match job {
+            Job::PromiseJob(job) => self.promise_jobs.borrow_mut().push_back(job),
+            Job::AsyncJob(job) => self.async_jobs.borrow_mut().push_back(job),
+            job => eprintln!("unsupported job type {job:?}"),
+        }
     }
 
-    fn run_jobs(&self, context: &mut Context) {
+    fn run_jobs(&self, context: &mut Context) -> JsResult<()> {
         loop {
-            let jobs = std::mem::take(&mut *self.0.borrow_mut());
-            if jobs.is_empty() {
-                return;
+            if self.promise_jobs.borrow().is_empty() && self.async_jobs.borrow().is_empty() {
+                return Ok(());
             }
+
+            let jobs = std::mem::take(&mut *self.promise_jobs.borrow_mut());
             for job in jobs {
                 if let Err(e) = job.call(context) {
                     eprintln!("Uncaught {e}");
                 }
             }
-        }
-    }
 
-    fn enqueue_future_job(&self, future: FutureJob, _: &mut Context) {
-        let job = pollster::block_on(future);
-        self.0.borrow_mut().push_back(job);
+            let async_jobs = std::mem::take(&mut *self.async_jobs.borrow_mut());
+            for async_job in async_jobs {
+                if let Err(err) = pollster::block_on(async_job.call(&RefCell::new(context))) {
+                    eprintln!("Uncaught {err}");
+                }
+                let jobs = std::mem::take(&mut *self.promise_jobs.borrow_mut());
+                for job in jobs {
+                    if let Err(e) = job.call(context) {
+                        eprintln!("Uncaught {e}");
+                    }
+                }
+            }
+        }
     }
 }
diff --git a/clippy.toml b/clippy.toml
index 272425dad89..59ef99e1fe1 100644
--- a/clippy.toml
+++ b/clippy.toml
@@ -1,2 +1,5 @@
 doc-valid-idents = ['ECMAScript', 'JavaScript', 'SpiderMonkey', 'GitHub']
 allow-print-in-tests = true
+disallowed-methods = [
+  { path = "str::to_ascii_uppercase", reason = "To avoid memory allocation, use `cow_utils::CowUtils::cow_to_ascii_uppercase` instead." },
+]
diff --git a/core/engine/Cargo.toml b/core/engine/Cargo.toml
index 9a0d603e427..e6b225ebadf 100644
--- a/core/engine/Cargo.toml
+++ b/core/engine/Cargo.toml
@@ -75,6 +75,7 @@ boa_macros.workspace = true
 boa_ast.workspace = true
 boa_parser.workspace = true
 boa_string.workspace = true
+cow-utils.workspace = true
 serde = { workspace = true, features = ["derive", "rc"] }
 serde_json.workspace = true
 rand.workspace = true
diff --git a/core/engine/src/builtins/promise/mod.rs b/core/engine/src/builtins/promise/mod.rs
index 5a580b87a18..d5795f294c7 100644
--- a/core/engine/src/builtins/promise/mod.rs
+++ b/core/engine/src/builtins/promise/mod.rs
@@ -11,7 +11,7 @@ use crate::{
     builtins::{Array, BuiltInObject},
     context::intrinsics::{Intrinsics, StandardConstructor, StandardConstructors},
     error::JsNativeError,
-    job::{JobCallback, NativeJob},
+    job::{JobCallback, PromiseJob},
     js_string,
     native_function::NativeFunction,
     object::{
@@ -1888,8 +1888,8 @@ impl Promise {
 
                 //   c. Perform HostEnqueuePromiseJob(fulfillJob.[[Job]], fulfillJob.[[Realm]]).
                 context
-                    .job_queue()
-                    .enqueue_promise_job(fulfill_job, context);
+                    .job_executor()
+                    .enqueue_job(fulfill_job.into(), context);
             }
 
             // 11. Else,
@@ -1909,7 +1909,9 @@ impl Promise {
                 let reject_job = new_promise_reaction_job(reject_reaction, reason.clone(), context);
 
                 //   e. Perform HostEnqueuePromiseJob(rejectJob.[[Job]], rejectJob.[[Realm]]).
-                context.job_queue().enqueue_promise_job(reject_job, context);
+                context
+                    .job_executor()
+                    .enqueue_job(reject_job.into(), context);
 
                 // 12. Set promise.[[PromiseIsHandled]] to true.
                 promise
@@ -1985,7 +1987,7 @@ impl Promise {
                 let job = new_promise_reaction_job(reaction, argument.clone(), context);
 
                 // b. Perform HostEnqueuePromiseJob(job.[[Job]], job.[[Realm]]).
-                context.job_queue().enqueue_promise_job(job, context);
+                context.job_executor().enqueue_job(job.into(), context);
             }
             // 2. Return unused.
         }
@@ -2178,7 +2180,7 @@ impl Promise {
                     );
 
                     // 15. Perform HostEnqueuePromiseJob(job.[[Job]], job.[[Realm]]).
-                    context.job_queue().enqueue_promise_job(job, context);
+                    context.job_executor().enqueue_job(job.into(), context);
 
                     // 16. Return undefined.
                     Ok(JsValue::undefined())
@@ -2239,7 +2241,7 @@ fn new_promise_reaction_job(
     mut reaction: ReactionRecord,
     argument: JsValue,
     context: &mut Context,
-) -> NativeJob {
+) -> PromiseJob {
     // Inverting order since `job` captures `reaction` by value.
 
     // 2. Let handlerRealm be null.
@@ -2320,7 +2322,7 @@ fn new_promise_reaction_job(
     };
 
     // 4. Return the Record { [[Job]]: job, [[Realm]]: handlerRealm }.
-    NativeJob::with_realm(job, realm, context)
+    PromiseJob::with_realm(job, realm, context)
 }
 
 /// More information:
@@ -2332,7 +2334,7 @@ fn new_promise_resolve_thenable_job(
     thenable: JsValue,
     then: JobCallback,
     context: &mut Context,
-) -> NativeJob {
+) -> PromiseJob {
     // Inverting order since `job` captures variables by value.
 
     // 2. Let getThenRealmResult be Completion(GetFunctionRealm(then.[[Callback]])).
@@ -2374,5 +2376,5 @@ fn new_promise_resolve_thenable_job(
     };
 
     // 6. Return the Record { [[Job]]: job, [[Realm]]: thenRealm }.
-    NativeJob::with_realm(job, realm, context)
+    PromiseJob::with_realm(job, realm, context)
 }
diff --git a/core/engine/src/builtins/promise/tests.rs b/core/engine/src/builtins/promise/tests.rs
index 7f6c316fb93..e63fc131073 100644
--- a/core/engine/src/builtins/promise/tests.rs
+++ b/core/engine/src/builtins/promise/tests.rs
@@ -13,8 +13,7 @@ fn promise() {
                     count += 1;
                 "#}),
         TestAction::assert_eq("count", 2),
-        #[allow(clippy::redundant_closure_for_method_calls)]
-        TestAction::inspect_context(|ctx| ctx.run_jobs()),
+        TestAction::inspect_context(|ctx| ctx.run_jobs().unwrap()),
         TestAction::assert_eq("count", 3),
     ]);
 }
diff --git a/core/engine/src/builtins/temporal/time_zone/mod.rs b/core/engine/src/builtins/temporal/time_zone/mod.rs
index a1d499e5829..2e73f0a2e0c 100644
--- a/core/engine/src/builtins/temporal/time_zone/mod.rs
+++ b/core/engine/src/builtins/temporal/time_zone/mod.rs
@@ -2,7 +2,7 @@
 #![allow(dead_code)]
 
 use crate::{builtins::temporal::to_zero_padded_decimal_string, Context};
-
+use cow_utils::CowUtils;
 // -- TimeZone Abstract Operations --
 
 /// Abstract operation `DefaultTimeZone ( )`
@@ -96,7 +96,7 @@ fn canonicalize_time_zone_name(time_zone: &str) -> String {
     // do not include local political rules for any time zones performs the following steps when
     // called:
     // 1. Assert: timeZone is an ASCII-case-insensitive match for "UTC".
-    assert_eq!(time_zone.to_ascii_uppercase(), "UTC");
+    assert_eq!(time_zone.cow_to_ascii_uppercase(), "UTC");
     // 2. Return "UTC".
     "UTC".to_owned()
 }
diff --git a/core/engine/src/context/mod.rs b/core/engine/src/context/mod.rs
index a5dfd173070..ccd57b4065c 100644
--- a/core/engine/src/context/mod.rs
+++ b/core/engine/src/context/mod.rs
@@ -1,5 +1,6 @@
 //! The ECMAScript context.
 
+use std::cell::RefCell;
 use std::{cell::Cell, path::Path, rc::Rc};
 
 use boa_ast::StatementList;
@@ -13,11 +14,12 @@ use intrinsics::Intrinsics;
 #[cfg(feature = "temporal")]
 use temporal_rs::tzdb::FsTzdbProvider;
 
+use crate::job::Job;
 use crate::vm::RuntimeLimits;
 use crate::{
     builtins,
     class::{Class, ClassBuilder},
-    job::{JobQueue, NativeJob, SimpleJobQueue},
+    job::{JobExecutor, SimpleJobExecutor},
     js_string,
     module::{IdleModuleLoader, ModuleLoader, SimpleModuleLoader},
     native_function::NativeFunction,
@@ -111,7 +113,7 @@ pub struct Context {
 
     host_hooks: &'static dyn HostHooks,
 
-    job_queue: Rc<dyn JobQueue>,
+    job_executor: Rc<dyn JobExecutor>,
 
     module_loader: Rc<dyn ModuleLoader>,
 
@@ -133,7 +135,7 @@ impl std::fmt::Debug for Context {
             .field("interner", &self.interner)
             .field("vm", &self.vm)
             .field("strict", &self.strict)
-            .field("promise_job_queue", &"JobQueue")
+            .field("job_executor", &"JobExecutor")
             .field("hooks", &"HostHooks")
             .field("module_loader", &"ModuleLoader")
             .field("optimizer_options", &self.optimizer_options);
@@ -186,7 +188,7 @@ impl Context {
     /// ```
     ///
     /// Note that this won't run any scheduled promise jobs; you need to call [`Context::run_jobs`]
-    /// on the context or [`JobQueue::run_jobs`] on the provided queue to run them.
+    /// on the context or [`JobExecutor::run_jobs`] on the provided queue to run them.
     #[allow(clippy::unit_arg, dropping_copy_types)]
     pub fn eval<R: ReadChar>(&mut self, src: Source<'_, R>) -> JsResult<JsValue> {
         let main_timer = Profiler::global().start_event("Script evaluation", "Main");
@@ -467,30 +469,35 @@ impl Context {
         self.strict = strict;
     }
 
-    /// Enqueues a [`NativeJob`] on the [`JobQueue`].
+    /// Enqueues a [`Job`] on the [`JobExecutor`].
     #[inline]
-    pub fn enqueue_job(&mut self, job: NativeJob) {
-        self.job_queue().enqueue_promise_job(job, self);
+    pub fn enqueue_job(&mut self, job: Job) {
+        self.job_executor().enqueue_job(job, self);
     }
 
-    /// Runs all the jobs in the job queue.
+    /// Runs all the jobs with the provided job executor.
     #[inline]
-    pub fn run_jobs(&mut self) {
-        self.job_queue().run_jobs(self);
+    pub fn run_jobs(&mut self) -> JsResult<()> {
+        let result = self.job_executor().run_jobs(self);
         self.clear_kept_objects();
+        result
     }
 
-    /// Asynchronously runs all the jobs in the job queue.
+    /// Asynchronously runs all the jobs with the provided job executor.
     ///
     /// # Note
     ///
     /// Concurrent job execution cannot be guaranteed by the engine, since this depends on the
-    /// specific handling of each [`JobQueue`]. If you want to execute jobs concurrently, you must
-    /// provide a custom implementor of `JobQueue` to the context.
+    /// specific handling of each [`JobExecutor`]. If you want to execute jobs concurrently, you must
+    /// provide a custom implementatin of `JobExecutor` to the context.
     #[allow(clippy::future_not_send)]
-    pub async fn run_jobs_async(&mut self) {
-        self.job_queue().run_jobs_async(self).await;
+    pub async fn run_jobs_async(&mut self) -> JsResult<()> {
+        let result = self
+            .job_executor()
+            .run_jobs_async(&RefCell::new(self))
+            .await;
         self.clear_kept_objects();
+        result
     }
 
     /// Abstract operation [`ClearKeptObjects`][clear].
@@ -546,11 +553,11 @@ impl Context {
         self.host_hooks
     }
 
-    /// Gets the job queue.
+    /// Gets the job executor.
     #[inline]
     #[must_use]
-    pub fn job_queue(&self) -> Rc<dyn JobQueue> {
-        self.job_queue.clone()
+    pub fn job_executor(&self) -> Rc<dyn JobExecutor> {
+        self.job_executor.clone()
     }
 
     /// Gets the module loader.
@@ -881,7 +888,7 @@ impl Context {
 pub struct ContextBuilder {
     interner: Option<Interner>,
     host_hooks: Option<&'static dyn HostHooks>,
-    job_queue: Option<Rc<dyn JobQueue>>,
+    job_executor: Option<Rc<dyn JobExecutor>>,
     module_loader: Option<Rc<dyn ModuleLoader>>,
     can_block: bool,
     #[cfg(feature = "intl")]
@@ -893,7 +900,7 @@ pub struct ContextBuilder {
 impl std::fmt::Debug for ContextBuilder {
     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
         #[derive(Clone, Copy, Debug)]
-        struct JobQueue;
+        struct JobExecutor;
         #[derive(Clone, Copy, Debug)]
         struct HostHooks;
         #[derive(Clone, Copy, Debug)]
@@ -903,7 +910,10 @@ impl std::fmt::Debug for ContextBuilder {
 
         out.field("interner", &self.interner)
             .field("host_hooks", &self.host_hooks.as_ref().map(|_| HostHooks))
-            .field("job_queue", &self.job_queue.as_ref().map(|_| JobQueue))
+            .field(
+                "job_executor",
+                &self.job_executor.as_ref().map(|_| JobExecutor),
+            )
             .field(
                 "module_loader",
                 &self.module_loader.as_ref().map(|_| ModuleLoader),
@@ -1016,10 +1026,10 @@ impl ContextBuilder {
         self
     }
 
-    /// Initializes the [`JobQueue`] for the context.
+    /// Initializes the [`JobExecutor`] for the context.
     #[must_use]
-    pub fn job_queue<Q: JobQueue + 'static>(mut self, job_queue: Rc<Q>) -> Self {
-        self.job_queue = Some(job_queue);
+    pub fn job_executor<Q: JobExecutor + 'static>(mut self, job_executor: Rc<Q>) -> Self {
+        self.job_executor = Some(job_executor);
         self
     }
 
@@ -1090,9 +1100,9 @@ impl ContextBuilder {
             Rc::new(IdleModuleLoader)
         };
 
-        let job_queue = self
-            .job_queue
-            .unwrap_or_else(|| Rc::new(SimpleJobQueue::new()));
+        let job_executor = self
+            .job_executor
+            .unwrap_or_else(|| Rc::new(SimpleJobExecutor::new()));
 
         let mut context = Context {
             interner: self.interner.unwrap_or_default(),
@@ -1119,7 +1129,7 @@ impl ContextBuilder {
             instructions_remaining: self.instructions_remaining,
             kept_alive: Vec::new(),
             host_hooks,
-            job_queue,
+            job_executor,
             module_loader,
             optimizer_options: OptimizerOptions::OPTIMIZE_ALL,
             root_shape,
diff --git a/core/engine/src/job.rs b/core/engine/src/job.rs
index 64f9b1722ce..8838356b688 100644
--- a/core/engine/src/job.rs
+++ b/core/engine/src/job.rs
@@ -1,21 +1,33 @@
 //! Boa's API to create and customize `ECMAScript` jobs and job queues.
 //!
-//! [`NativeJob`] is an ECMAScript [Job], or a closure that runs an `ECMAScript` computation when
-//! there's no other computation running.
+//! [`Job`] is an ECMAScript [Job], or a closure that runs an `ECMAScript` computation when
+//! there's no other computation running. The module defines several type of jobs:
+//! - [`PromiseJob`] for Promise related jobs.
+//! - [`NativeAsyncJob`] for jobs that support [`Future`].
+//! - [`NativeJob`] for generic jobs that aren't related to Promises.
 //!
 //! [`JobCallback`] is an ECMAScript [`JobCallback`] record, containing an `ECMAScript` function
 //! that is executed when a promise is either fulfilled or rejected.
 //!
-//! [`JobQueue`] is a trait encompassing the required functionality for a job queue; this allows
+//! [`JobExecutor`] is a trait encompassing the required functionality for a job executor; this allows
 //! implementing custom event loops, custom handling of Jobs or other fun things.
 //! This trait is also accompanied by two implementors of the trait:
-//! - [`IdleJobQueue`], which is a queue that does nothing, and the default queue if no queue is
+//! - [`IdleJobExecutor`], which is an executor that does nothing, and the default executor if no executor is
 //!   provided. Useful for hosts that want to disable promises.
-//! - [`SimpleJobQueue`], which is a simple FIFO queue that runs all jobs to completion, bailing
+//! - [`SimpleJobExecutor`], which is a simple FIFO queue that runs all jobs to completion, bailing
 //!   on the first error encountered.
 //!
+//! ## [`Trace`]?
+//!
+//! Most of the types defined in this module don't implement `Trace`. This is because most jobs can only
+//! be run once, and putting a `JobExecutor` on a garbage collected object is not allowed.
+//!
+//! In addition to that, not implementing `Trace` makes it so that the garbage collector can consider
+//! any captured variables inside jobs as roots, since you cannot store jobs within a [`Gc`].
+//!
 //! [Job]: https://tc39.es/ecma262/#sec-jobs
 //! [JobCallback]: https://tc39.es/ecma262/#sec-jobcallback-records
+//! [`Gc`]: boa_gc::Gc
 
 use std::{cell::RefCell, collections::VecDeque, fmt::Debug, future::Future, pin::Pin};
 
@@ -26,44 +38,14 @@ use crate::{
 };
 use boa_gc::{Finalize, Trace};
 
-/// The [`Future`] job passed to the [`JobQueue::enqueue_future_job`] operation.
-pub type FutureJob = Pin<Box<dyn Future<Output = NativeJob> + 'static>>;
-
-/// An ECMAScript [Job] closure.
-///
-/// The specification allows scheduling any [`NativeJob`] closure by the host into the job queue.
-/// However, host-defined jobs must abide to a set of requirements.
-///
-/// ### Requirements
-///
-/// - At some future point in time, when there is no running execution context and the execution
-///   context stack is empty, the implementation must:
-///     - Perform any host-defined preparation steps.
-///     - Invoke the Job Abstract Closure.
-///     - Perform any host-defined cleanup steps, after which the execution context stack must be empty.
-/// - Only one Job may be actively undergoing evaluation at any point in time.
-/// - Once evaluation of a Job starts, it must run to completion before evaluation of any other Job starts.
-/// - The Abstract Closure must return a normal completion, implementing its own handling of errors.
-///
-/// `NativeJob`s API differs slightly on the last requirement, since it allows closures returning
-/// [`JsResult`], but it's okay because `NativeJob`s are handled by the host anyways; a host could
-/// pass a closure returning `Err` and handle the error on [`JobQueue::run_jobs`], making the closure
-/// effectively run as if it never returned `Err`.
-///
-/// ## [`Trace`]?
-///
-/// `NativeJob` doesn't implement `Trace` because it doesn't need to; all jobs can only be run once
-/// and putting a [`JobQueue`] on a garbage collected object is not allowed.
+/// An ECMAScript [Job Abstract Closure].
 ///
-/// On the other hand, it seems like this type breaks all the safety checks of the
-/// [`NativeFunction`] API, since you can capture any `Trace` variable into the closure... but it
-/// doesn't!
-/// The garbage collector doesn't need to trace the captured variables because the closures
-/// are always stored on the [`JobQueue`], which is always rooted, which means the captured variables
-/// are also rooted, allowing us to capture any variable in the closure for free!
+/// This is basically a synchronous task that needs to be run to progress [`Promise`] objects,
+/// or unblock threads waiting on [`Atomics.waitAsync`].
 ///
 /// [Job]: https://tc39.es/ecma262/#sec-jobs
-/// [`NativeFunction`]: crate::native_function::NativeFunction
+/// [`Promise`]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Promise
+/// [`Atomics.waitAsync`]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Atomics/waitAsync
 pub struct NativeJob {
     #[allow(clippy::type_complexity)]
     f: Box<dyn FnOnce(&mut Context) -> JsResult<JsValue>>,
@@ -72,7 +54,7 @@ pub struct NativeJob {
 
 impl Debug for NativeJob {
     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
-        f.debug_struct("NativeJob").field("f", &"Closure").finish()
+        f.debug_struct("NativeJob").finish_non_exhaustive()
     }
 }
 
@@ -133,6 +115,168 @@ impl NativeJob {
     }
 }
 
+/// The [`Future`] job returned by a [`NativeAsyncJob`] operation.
+pub type BoxedFuture<'a> = Pin<Box<dyn Future<Output = JsResult<JsValue>> + 'a>>;
+
+/// An ECMAScript [Job] that can be run asynchronously.
+///
+/// This is an additional type of job that is not defined by the specification, enabling running `Future` tasks
+/// created by ECMAScript code in an easier way.
+#[allow(clippy::type_complexity)]
+pub struct NativeAsyncJob {
+    f: Box<dyn for<'a> FnOnce(&'a RefCell<&mut Context>) -> BoxedFuture<'a>>,
+    realm: Option<Realm>,
+}
+
+impl Debug for NativeAsyncJob {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        f.debug_struct("NativeAsyncJob")
+            .field("f", &"Closure")
+            .finish()
+    }
+}
+
+impl NativeAsyncJob {
+    /// Creates a new `NativeAsyncJob` from a closure.
+    pub fn new<F>(f: F) -> Self
+    where
+        F: for<'a> FnOnce(&'a RefCell<&mut Context>) -> BoxedFuture<'a> + 'static,
+    {
+        Self {
+            f: Box::new(f),
+            realm: None,
+        }
+    }
+
+    /// Creates a new `NativeAsyncJob` from a closure and an execution realm.
+    pub fn with_realm<F>(f: F, realm: Realm) -> Self
+    where
+        F: for<'a> FnOnce(&'a RefCell<&mut Context>) -> BoxedFuture<'a> + 'static,
+    {
+        Self {
+            f: Box::new(f),
+            realm: Some(realm),
+        }
+    }
+
+    /// Gets a reference to the execution realm of the job.
+    #[must_use]
+    pub const fn realm(&self) -> Option<&Realm> {
+        self.realm.as_ref()
+    }
+
+    /// Calls the native async job with the specified [`Context`].
+    ///
+    /// # Note
+    ///
+    /// If the native async job has an execution realm defined, this sets the running execution
+    /// context to the realm's before calling the inner closure, and resets it after execution.
+    pub fn call<'a, 'b>(
+        self,
+        context: &'a RefCell<&'b mut Context>,
+        // We can make our users assume `Unpin` because `self.f` is already boxed, so we shouldn't
+        // need pin at all.
+    ) -> impl Future<Output = JsResult<JsValue>> + Unpin + use<'a, 'b> {
+        // If realm is not null, each time job is invoked the implementation must perform
+        // implementation-defined steps such that execution is prepared to evaluate ECMAScript
+        // code at the time of job's invocation.
+        let realm = self.realm;
+
+        let mut future = if let Some(realm) = &realm {
+            let old_realm = context.borrow_mut().enter_realm(realm.clone());
+
+            // Let scriptOrModule be GetActiveScriptOrModule() at the time HostEnqueuePromiseJob is
+            // invoked. If realm is not null, each time job is invoked the implementation must
+            // perform implementation-defined steps such that scriptOrModule is the active script or
+            // module at the time of job's invocation.
+            let result = (self.f)(context);
+
+            context.borrow_mut().enter_realm(old_realm);
+            result
+        } else {
+            (self.f)(context)
+        };
+
+        std::future::poll_fn(move |cx| {
+            // We need to do the same dance again since the inner code could assume we're still
+            // on the same realm.
+            if let Some(realm) = &realm {
+                let old_realm = context.borrow_mut().enter_realm(realm.clone());
+
+                let poll_result = future.as_mut().poll(cx);
+
+                context.borrow_mut().enter_realm(old_realm);
+                poll_result
+            } else {
+                future.as_mut().poll(cx)
+            }
+        })
+    }
+}
+
+/// An ECMAScript [Job Abstract Closure] executing code related to [`Promise`] objects.
+///
+/// This represents the [`HostEnqueuePromiseJob`] operation from the specification.
+///
+/// ### [Requirements]
+///
+/// - If realm is not null, each time job is invoked the implementation must perform implementation-defined
+///   steps such that execution is prepared to evaluate ECMAScript code at the time of job's invocation.
+/// - Let `scriptOrModule` be [`GetActiveScriptOrModule()`] at the time `HostEnqueuePromiseJob` is invoked.
+///   If realm is not null, each time job is invoked the implementation must perform implementation-defined steps
+///   such that `scriptOrModule` is the active script or module at the time of job's invocation.
+/// - Jobs must run in the same order as the `HostEnqueuePromiseJob` invocations that scheduled them.
+///
+/// Of all the requirements, Boa guarantees the first two by its internal implementation of `NativeJob`, meaning
+/// implementations of [`JobExecutor`] must only guarantee that jobs are run in the same order as they're enqueued.
+///
+/// [`Promise`]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Promise
+/// [`HostEnqueuePromiseJob`]: https://tc39.es/ecma262/#sec-hostenqueuepromisejob
+/// [Job Abstract Closure]: https://tc39.es/ecma262/#sec-jobs
+/// [Requirements]: https://tc39.es/ecma262/multipage/executable-code-and-execution-contexts.html#sec-hostenqueuepromisejob
+/// [`GetActiveScriptOrModule()`]: https://tc39.es/ecma262/multipage/executable-code-and-execution-contexts.html#sec-getactivescriptormodule
+pub struct PromiseJob(NativeJob);
+
+impl Debug for PromiseJob {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        f.debug_struct("PromiseJob").finish_non_exhaustive()
+    }
+}
+
+impl PromiseJob {
+    /// Creates a new `PromiseJob` from a closure.
+    pub fn new<F>(f: F) -> Self
+    where
+        F: FnOnce(&mut Context) -> JsResult<JsValue> + 'static,
+    {
+        Self(NativeJob::new(f))
+    }
+
+    /// Creates a new `PromiseJob` from a closure and an execution realm.
+    pub fn with_realm<F>(f: F, realm: Realm, context: &mut Context) -> Self
+    where
+        F: FnOnce(&mut Context) -> JsResult<JsValue> + 'static,
+    {
+        Self(NativeJob::with_realm(f, realm, context))
+    }
+
+    /// Gets a reference to the execution realm of the `PromiseJob`.
+    #[must_use]
+    pub const fn realm(&self) -> Option<&Realm> {
+        self.0.realm()
+    }
+
+    /// Calls the `PromiseJob` with the specified [`Context`].
+    ///
+    /// # Note
+    ///
+    /// If the job has an execution realm defined, this sets the running execution
+    /// context to the realm's before calling the inner closure, and resets it after execution.
+    pub fn call(self, context: &mut Context) -> JsResult<JsValue> {
+        self.0.call(context)
+    }
+}
+
 /// [`JobCallback`][spec] records.
 ///
 /// [spec]: https://tc39.es/ecma262/#sec-jobcallback-records
@@ -182,140 +326,174 @@ impl JobCallback {
     }
 }
 
-/// A queue of `ECMAscript` [Jobs].
+/// A job that needs to be handled by a [`JobExecutor`].
 ///
-/// This is the main API that allows creating custom event loops with custom job queues.
+/// # Requirements
 ///
-/// [Jobs]: https://tc39.es/ecma262/#sec-jobs
-pub trait JobQueue {
-    /// [`HostEnqueuePromiseJob ( job, realm )`][spec].
-    ///
-    /// Enqueues a [`NativeJob`] on the job queue.
-    ///
-    /// # Requirements
-    ///
-    /// Per the [spec]:
-    /// > An implementation of `HostEnqueuePromiseJob` must conform to the requirements in [9.5][Jobs] as well as the
-    /// > following:
-    /// > - If `realm` is not null, each time `job` is invoked the implementation must perform implementation-defined steps
-    ///   > such that execution is prepared to evaluate ECMAScript code at the time of job's invocation.
-    /// > - Let `scriptOrModule` be `GetActiveScriptOrModule()` at the time `HostEnqueuePromiseJob` is invoked. If realm
-    ///   > is not null, each time job is invoked the implementation must perform implementation-defined steps such that
-    ///   > `scriptOrModule` is the active script or module at the time of job's invocation.
-    /// > - Jobs must run in the same order as the `HostEnqueuePromiseJob` invocations that scheduled them.
+/// The specification defines many types of jobs, but all of them must adhere to a set of requirements:
+///
+/// - At some future point in time, when there is no running execution context and the execution
+///   context stack is empty, the implementation must:
+///     - Perform any host-defined preparation steps.
+///     - Invoke the Job Abstract Closure.
+///     - Perform any host-defined cleanup steps, after which the execution context stack must be empty.
+/// - Only one Job may be actively undergoing evaluation at any point in time.
+/// - Once evaluation of a Job starts, it must run to completion before evaluation of any other Job starts.
+/// - The Abstract Closure must return a normal completion, implementing its own handling of errors.
+///
+/// Boa is a little bit flexible on the last requirement, since it allows jobs to return either
+/// values or errors, but the rest of the requirements must be followed for all conformant implementations.
+///
+/// Additionally, each job type can have additional requirements that must also be followed in addition
+/// to the previous ones.
+#[non_exhaustive]
+#[derive(Debug)]
+pub enum Job {
+    /// A `Promise`-related job.
     ///
-    /// Of all the requirements, Boa guarantees the first two by its internal implementation of `NativeJob`, meaning
-    /// the implementer must only guarantee that jobs are run in the same order as they're enqueued.
+    /// See [`PromiseJob`] for more information.
+    PromiseJob(PromiseJob),
+    /// A [`Future`]-related job.
     ///
-    /// [spec]: https://tc39.es/ecma262/#sec-hostenqueuepromisejob
-    /// [Jobs]: https://tc39.es/ecma262/#sec-jobs
-    fn enqueue_promise_job(&self, job: NativeJob, context: &mut Context);
+    /// See [`NativeAsyncJob`] for more information.
+    AsyncJob(NativeAsyncJob),
+}
 
-    /// Runs all jobs in the queue.
-    ///
-    /// Running a job could enqueue more jobs in the queue. The implementor of the trait
-    /// determines if the method should loop until there are no more queued jobs or if
-    /// it should only run one iteration of the queue.
-    fn run_jobs(&self, context: &mut Context);
+impl From<NativeAsyncJob> for Job {
+    fn from(native_async_job: NativeAsyncJob) -> Self {
+        Job::AsyncJob(native_async_job)
+    }
+}
 
-    /// Enqueues a new [`Future`] job on the job queue.
-    ///
-    /// On completion, `future` returns a new [`NativeJob`] that needs to be enqueued into the
-    /// job queue to update the state of the inner `Promise`, which is what ECMAScript sees. Failing
-    /// to do this will leave the inner `Promise` in the `pending` state, which won't call any `then`
-    /// or `catch` handlers, even if `future` was already completed.
-    fn enqueue_future_job(&self, future: FutureJob, context: &mut Context);
+impl From<PromiseJob> for Job {
+    fn from(promise_job: PromiseJob) -> Self {
+        Job::PromiseJob(promise_job)
+    }
+}
 
-    /// Asynchronously runs all jobs in the queue.
+/// An executor of `ECMAscript` [Jobs].
+///
+/// This is the main API that allows creating custom event loops.
+///
+/// [Jobs]: https://tc39.es/ecma262/#sec-jobs
+pub trait JobExecutor {
+    /// Enqueues a `Job` on the executor.
     ///
-    /// Running a job could enqueue more jobs in the queue. The implementor of the trait
-    /// determines if the method should loop until there are no more queued jobs or if
-    /// it should only run one iteration of the queue.
+    /// This method combines all the host-defined job enqueueing operations into a single method.
+    /// See the [spec] for more information on the requirements that each operation must follow.
     ///
-    /// By default forwards to [`JobQueue::run_jobs`]. Implementors using async should override this
+    /// [spec]: https://tc39.es/ecma262/#sec-jobs
+    fn enqueue_job(&self, job: Job, context: &mut Context);
+
+    /// Runs all jobs in the executor.
+    fn run_jobs(&self, context: &mut Context) -> JsResult<()>;
+
+    /// Asynchronously runs all jobs in the executor.
+    ///
+    /// By default forwards to [`JobExecutor::run_jobs`]. Implementors using async should override this
     /// with a proper algorithm to run jobs asynchronously.
-    fn run_jobs_async<'a, 'ctx, 'fut>(
+    fn run_jobs_async<'a, 'b, 'fut>(
         &'a self,
-        context: &'ctx mut Context,
-    ) -> Pin<Box<dyn Future<Output = ()> + 'fut>>
+        context: &'b RefCell<&mut Context>,
+    ) -> Pin<Box<dyn Future<Output = JsResult<()>> + 'fut>>
     where
         'a: 'fut,
-        'ctx: 'fut,
+        'b: 'fut,
     {
-        Box::pin(async { self.run_jobs(context) })
+        Box::pin(async { self.run_jobs(&mut context.borrow_mut()) })
     }
 }
 
-/// A job queue that does nothing.
+/// A job executor that does nothing.
 ///
-/// This queue is mostly useful if you want to disable the promise capabilities of the engine. This
+/// This executor is mostly useful if you want to disable the promise capabilities of the engine. This
 /// can be done by passing it to the [`ContextBuilder`]:
 ///
 /// ```
 /// use boa_engine::{
 ///     context::ContextBuilder,
-///     job::{IdleJobQueue, JobQueue},
+///     job::{IdleJobExecutor, JobExecutor},
 /// };
 /// use std::rc::Rc;
 ///
-/// let queue = Rc::new(IdleJobQueue);
-/// let context = ContextBuilder::new().job_queue(queue).build();
+/// let executor = Rc::new(IdleJobExecutor);
+/// let context = ContextBuilder::new().job_executor(executor).build();
 /// ```
 ///
 /// [`ContextBuilder`]: crate::context::ContextBuilder
 #[derive(Debug, Clone, Copy)]
-pub struct IdleJobQueue;
+pub struct IdleJobExecutor;
 
-impl JobQueue for IdleJobQueue {
-    fn enqueue_promise_job(&self, _: NativeJob, _: &mut Context) {}
+impl JobExecutor for IdleJobExecutor {
+    fn enqueue_job(&self, _: Job, _: &mut Context) {}
 
-    fn run_jobs(&self, _: &mut Context) {}
-
-    fn enqueue_future_job(&self, _: FutureJob, _: &mut Context) {}
+    fn run_jobs(&self, _: &mut Context) -> JsResult<()> {
+        Ok(())
+    }
 }
 
-/// A simple FIFO job queue that bails on the first error.
+/// A simple FIFO executor that bails on the first error.
 ///
-/// This is the default job queue for the [`Context`], but it is mostly pretty limited for
-/// custom event queues.
+/// This is the default job executor for the [`Context`], but it is mostly pretty limited for
+/// custom event loop.
 ///
-/// To disable running promise jobs on the engine, see [`IdleJobQueue`].
+/// To disable running promise jobs on the engine, see [`IdleJobExecutor`].
 #[derive(Default)]
-pub struct SimpleJobQueue(RefCell<VecDeque<NativeJob>>);
+pub struct SimpleJobExecutor {
+    promise_jobs: RefCell<VecDeque<PromiseJob>>,
+    async_jobs: RefCell<VecDeque<NativeAsyncJob>>,
+}
 
-impl Debug for SimpleJobQueue {
+impl Debug for SimpleJobExecutor {
     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
-        f.debug_tuple("SimpleQueue").field(&"..").finish()
+        f.debug_struct("SimpleJobExecutor").finish_non_exhaustive()
     }
 }
 
-impl SimpleJobQueue {
-    /// Creates an empty `SimpleJobQueue`.
+impl SimpleJobExecutor {
+    /// Creates a new `SimpleJobExecutor`.
     #[must_use]
     pub fn new() -> Self {
         Self::default()
     }
 }
 
-impl JobQueue for SimpleJobQueue {
-    fn enqueue_promise_job(&self, job: NativeJob, _: &mut Context) {
-        self.0.borrow_mut().push_back(job);
-    }
-
-    fn run_jobs(&self, context: &mut Context) {
-        // Make sure we take all jobs out of the queue before running them.
-        // If a job enqueues another job we'll wait for the next run to pick it up.
-        let jobs_to_run = self.0.borrow_mut().drain(..).collect::<Vec<_>>();
-        for job in jobs_to_run {
-            if job.call(context).is_err() {
-                self.0.borrow_mut().clear();
-                return;
-            };
+impl JobExecutor for SimpleJobExecutor {
+    fn enqueue_job(&self, job: Job, _: &mut Context) {
+        match job {
+            Job::PromiseJob(p) => self.promise_jobs.borrow_mut().push_back(p),
+            Job::AsyncJob(a) => self.async_jobs.borrow_mut().push_back(a),
         }
     }
 
-    fn enqueue_future_job(&self, future: FutureJob, context: &mut Context) {
-        let job = pollster::block_on(future);
-        self.enqueue_promise_job(job, context);
+    fn run_jobs(&self, context: &mut Context) -> JsResult<()> {
+        let context = RefCell::new(context);
+        loop {
+            let mut next_job = self.async_jobs.borrow_mut().pop_front();
+            while let Some(job) = next_job {
+                if let Err(err) = pollster::block_on(job.call(&context)) {
+                    self.async_jobs.borrow_mut().clear();
+                    self.promise_jobs.borrow_mut().clear();
+                    return Err(err);
+                };
+                next_job = self.async_jobs.borrow_mut().pop_front();
+            }
+
+            // Yeah, I have no idea why Rust extends the lifetime of a `RefCell` that should be immediately
+            // dropped after calling `pop_front`.
+            let mut next_job = self.promise_jobs.borrow_mut().pop_front();
+            while let Some(job) = next_job {
+                if let Err(err) = job.call(&mut context.borrow_mut()) {
+                    self.async_jobs.borrow_mut().clear();
+                    self.promise_jobs.borrow_mut().clear();
+                    return Err(err);
+                };
+                next_job = self.promise_jobs.borrow_mut().pop_front();
+            }
+
+            if self.async_jobs.borrow().is_empty() && self.promise_jobs.borrow().is_empty() {
+                return Ok(());
+            }
+        }
     }
 }
diff --git a/core/engine/src/object/builtins/jspromise.rs b/core/engine/src/object/builtins/jspromise.rs
index 777113b8cec..a42370033cc 100644
--- a/core/engine/src/object/builtins/jspromise.rs
+++ b/core/engine/src/object/builtins/jspromise.rs
@@ -8,7 +8,7 @@ use crate::{
         promise::{PromiseState, ResolvingFunctions},
         Promise,
     },
-    job::NativeJob,
+    job::NativeAsyncJob,
     object::JsObject,
     value::TryFromJs,
     Context, JsArgs, JsError, JsNativeError, JsResult, JsValue, NativeFunction,
@@ -292,21 +292,23 @@ impl JsPromise {
     {
         let (promise, resolvers) = Self::new_pending(context);
 
-        let future = async move {
-            let result = future.await;
-
-            NativeJob::new(move |context| match result {
-                Ok(v) => resolvers.resolve.call(&JsValue::undefined(), &[v], context),
-                Err(e) => {
-                    let e = e.to_opaque(context);
-                    resolvers.reject.call(&JsValue::undefined(), &[e], context)
-                }
+        context.enqueue_job(
+            NativeAsyncJob::new(move |context| {
+                Box::pin(async move {
+                    let result = future.await;
+
+                    let context = &mut context.borrow_mut();
+                    match result {
+                        Ok(v) => resolvers.resolve.call(&JsValue::undefined(), &[v], context),
+                        Err(e) => {
+                            let e = e.to_opaque(context);
+                            resolvers.reject.call(&JsValue::undefined(), &[e], context)
+                        }
+                    }
+                })
             })
-        };
-
-        context
-            .job_queue()
-            .enqueue_future_job(Box::pin(future), context);
+            .into(),
+        );
 
         promise
     }
@@ -1083,7 +1085,7 @@ impl JsPromise {
 
     /// Run jobs until this promise is resolved or rejected. This could
     /// result in an infinite loop if the promise is never resolved or
-    /// rejected (e.g. with a [`boa_engine::job::JobQueue`] that does
+    /// rejected (e.g. with a [`boa_engine::job::JobExecutor`] that does
     /// not prioritize properly). If you need more control over how
     /// the promise handles timing out, consider using
     /// [`Context::run_jobs`] directly.
@@ -1141,14 +1143,14 @@ impl JsPromise {
     /// // Uncommenting the following line would panic.
     /// // context.run_jobs();
     /// ```
-    pub fn await_blocking(&self, context: &mut Context) -> Result<JsValue, JsValue> {
+    pub fn await_blocking(&self, context: &mut Context) -> Result<JsValue, JsError> {
         loop {
             match self.state() {
                 PromiseState::Pending => {
-                    context.run_jobs();
+                    context.run_jobs()?;
                 }
                 PromiseState::Fulfilled(f) => break Ok(f),
-                PromiseState::Rejected(r) => break Err(r),
+                PromiseState::Rejected(r) => break Err(JsError::from_opaque(r)),
             }
         }
     }
diff --git a/core/engine/src/script.rs b/core/engine/src/script.rs
index 52c59110e22..4adf0118d91 100644
--- a/core/engine/src/script.rs
+++ b/core/engine/src/script.rs
@@ -160,9 +160,9 @@ impl Script {
     /// Evaluates this script and returns its result.
     ///
     /// Note that this won't run any scheduled promise jobs; you need to call [`Context::run_jobs`]
-    /// on the context or [`JobQueue::run_jobs`] on the provided queue to run them.
+    /// on the context or [`JobExecutor::run_jobs`] on the provided queue to run them.
     ///
-    /// [`JobQueue::run_jobs`]: crate::job::JobQueue::run_jobs
+    /// [`JobExecutor::run_jobs`]: crate::job::JobExecutor::run_jobs
     pub fn evaluate(&self, context: &mut Context) -> JsResult<JsValue> {
         let _timer = Profiler::global().start_event("Execution", "Main");
 
diff --git a/core/engine/src/tests/async_generator.rs b/core/engine/src/tests/async_generator.rs
index 82889b1e264..54898589d13 100644
--- a/core/engine/src/tests/async_generator.rs
+++ b/core/engine/src/tests/async_generator.rs
@@ -47,7 +47,7 @@ fn return_on_then_infinite_loop() {
             });
             g.return();
         "#}),
-        TestAction::inspect_context(Context::run_jobs),
+        TestAction::inspect_context(|ctx| ctx.run_jobs().unwrap()),
         TestAction::assert_eq("count", 100),
     ]);
 }
@@ -71,7 +71,7 @@ fn return_on_then_single() {
             });
             let ret = g.return()
         "#}),
-        TestAction::inspect_context(Context::run_jobs),
+        TestAction::inspect_context(|ctx| ctx.run_jobs().unwrap()),
         TestAction::assert_eq("first", false),
         TestAction::assert_with_op("ret", |ret, context| {
             assert_promise_iter_value(&ret, &JsValue::undefined(), true, context);
@@ -104,7 +104,7 @@ fn return_on_then_queue() {
             let second = g.next();
             let ret = g.return();
         "#}),
-        TestAction::inspect_context(Context::run_jobs),
+        TestAction::inspect_context(|ctx| ctx.run_jobs().unwrap()),
         TestAction::assert_with_op("first", |first, context| {
             assert_promise_iter_value(&first, &JsValue::from(1), false, context);
             true
diff --git a/core/engine/src/tests/iterators.rs b/core/engine/src/tests/iterators.rs
index eb341f1f986..2ddddd2fc50 100644
--- a/core/engine/src/tests/iterators.rs
+++ b/core/engine/src/tests/iterators.rs
@@ -46,8 +46,7 @@ fn iterator_close_in_continue_before_jobs() {
                 actual.push("async fn end");
             }();
         "#}),
-        #[allow(clippy::redundant_closure_for_method_calls)]
-        TestAction::inspect_context(|ctx| ctx.run_jobs()),
+        TestAction::inspect_context(|ctx| ctx.run_jobs().unwrap()),
         TestAction::assert(indoc! {r#"
             arrayEquals(
                 actual,
@@ -110,8 +109,7 @@ fn async_iterator_close_in_continue_is_awaited() {
                 actual.push("async fn end");
             }();
         "#}),
-        #[allow(clippy::redundant_closure_for_method_calls)]
-        TestAction::inspect_context(|ctx| ctx.run_jobs()),
+        TestAction::inspect_context(|ctx| ctx.run_jobs().unwrap()),
         TestAction::assert(indoc! {r#"
             arrayEquals(
                 actual,
@@ -198,8 +196,7 @@ fn mixed_iterators_close_in_continue() {
                 actual.push("async fn end");
             }();
         "#}),
-        #[allow(clippy::redundant_closure_for_method_calls)]
-        TestAction::inspect_context(|ctx| ctx.run_jobs()),
+        TestAction::inspect_context(|ctx| ctx.run_jobs().unwrap()),
         TestAction::assert(indoc! {r#"
             arrayEquals(
                 actual,
diff --git a/core/engine/src/tests/promise.rs b/core/engine/src/tests/promise.rs
index c8d64b8809e..a5f48f51521 100644
--- a/core/engine/src/tests/promise.rs
+++ b/core/engine/src/tests/promise.rs
@@ -31,7 +31,7 @@ fn issue_2658() {
                     genTwo.next().then(v => { result2 = v; });
                 "#
         }),
-        TestAction::inspect_context(|ctx| ctx.run_jobs()),
+        TestAction::inspect_context(|ctx| ctx.run_jobs().unwrap()),
         TestAction::assert("!result1.done"),
         TestAction::assert_eq("result1.value", 5),
         TestAction::assert("!result2.done"),
diff --git a/core/engine/tests/imports.rs b/core/engine/tests/imports.rs
index 36def275947..a4feca921d4 100644
--- a/core/engine/tests/imports.rs
+++ b/core/engine/tests/imports.rs
@@ -23,7 +23,7 @@ fn subdirectories() {
     let module = boa_engine::Module::parse(source, None, &mut context).unwrap();
     let result = module.load_link_evaluate(&mut context);
 
-    context.run_jobs();
+    context.run_jobs().unwrap();
     match result.state() {
         PromiseState::Pending => {}
         PromiseState::Fulfilled(v) => {
diff --git a/core/engine/tests/module.rs b/core/engine/tests/module.rs
index 09ec3522588..93db4609812 100644
--- a/core/engine/tests/module.rs
+++ b/core/engine/tests/module.rs
@@ -41,7 +41,7 @@ fn test_json_module_from_str() {
 
     let module = Module::parse(source, None, &mut context).unwrap();
     let promise = module.load_link_evaluate(&mut context);
-    context.run_jobs();
+    context.run_jobs().unwrap();
 
     match promise.state() {
         PromiseState::Pending => {}
diff --git a/core/interop/src/lib.rs b/core/interop/src/lib.rs
index 436680d29db..e3b4bff3745 100644
--- a/core/interop/src/lib.rs
+++ b/core/interop/src/lib.rs
@@ -565,7 +565,7 @@ fn into_js_module() {
     let root_module = Module::parse(source, None, &mut context).unwrap();
 
     let promise_result = root_module.load_link_evaluate(&mut context);
-    context.run_jobs();
+    context.run_jobs().unwrap();
 
     // Checking if the final promise didn't return an error.
     assert!(
@@ -617,7 +617,7 @@ fn can_throw_exception() {
     let root_module = Module::parse(source, None, &mut context).unwrap();
 
     let promise_result = root_module.load_link_evaluate(&mut context);
-    context.run_jobs();
+    context.run_jobs().unwrap();
 
     // Checking if the final promise didn't return an error.
     assert_eq!(
@@ -721,7 +721,7 @@ fn class() {
     let root_module = Module::parse(source, None, &mut context).unwrap();
 
     let promise_result = root_module.load_link_evaluate(&mut context);
-    context.run_jobs();
+    context.run_jobs().unwrap();
 
     // Checking if the final promise didn't return an error.
     assert!(
diff --git a/core/interop/src/macros.rs b/core/interop/src/macros.rs
index 9be07ef765d..5fe2d585052 100644
--- a/core/interop/src/macros.rs
+++ b/core/interop/src/macros.rs
@@ -525,7 +525,7 @@ fn js_class_test() {
     let root_module = Module::parse(source, None, &mut context).unwrap();
 
     let promise_result = root_module.load_link_evaluate(&mut context);
-    context.run_jobs();
+    context.run_jobs().unwrap();
 
     // Checking if the final promise didn't return an error.
     assert!(
diff --git a/core/interop/tests/embedded.rs b/core/interop/tests/embedded.rs
index 04b35a9f6ad..d0a8501d65e 100644
--- a/core/interop/tests/embedded.rs
+++ b/core/interop/tests/embedded.rs
@@ -35,7 +35,7 @@ fn simple() {
     )
     .expect("failed to parse module");
     let promise = module.load_link_evaluate(&mut context);
-    context.run_jobs();
+    context.run_jobs().unwrap();
 
     match promise.state() {
         PromiseState::Fulfilled(value) => {
diff --git a/examples/Cargo.toml b/examples/Cargo.toml
index f9a3990ce17..de0b103ecea 100644
--- a/examples/Cargo.toml
+++ b/examples/Cargo.toml
@@ -19,6 +19,7 @@ boa_runtime.workspace = true
 time.workspace = true
 smol.workspace = true
 futures-concurrency.workspace = true
+futures-lite.workspace = true
 isahc.workspace = true
 tokio = { workspace = true, features = ["rt", "rt-multi-thread", "time", "macros"] }
 
diff --git a/examples/src/bin/module_fetch_async.rs b/examples/src/bin/module_fetch_async.rs
index 6f9b2a7b09d..abc7c90e982 100644
--- a/examples/src/bin/module_fetch_async.rs
+++ b/examples/src/bin/module_fetch_async.rs
@@ -2,7 +2,7 @@ use std::{cell::RefCell, collections::VecDeque, future::Future, pin::Pin, rc::Rc
 
 use boa_engine::{
     builtins::promise::PromiseState,
-    job::{FutureJob, JobQueue, NativeJob},
+    job::{Job, JobExecutor, NativeAsyncJob, PromiseJob},
     js_string,
     module::ModuleLoader,
     Context, JsNativeError, JsResult, JsString, JsValue, Module,
@@ -28,60 +28,62 @@ impl ModuleLoader for HttpModuleLoader {
     ) {
         let url = specifier.to_std_string_escaped();
 
-        let fetch = async move {
-            // Adding some prints to show the non-deterministic nature of the async fetches.
-            // Try to run the example several times to see how sometimes the fetches start in order
-            // but finish in disorder.
-            println!("Fetching `{url}`...");
-            // This could also retry fetching in case there's an error while requesting the module.
-            let body: Result<_, isahc::Error> = async {
-                let mut response = Request::get(&url)
-                    .redirect_policy(RedirectPolicy::Limit(5))
-                    .body(())?
-                    .send_async()
-                    .await?;
-
-                Ok(response.text().await?)
-            }
-            .await;
-            println!("Finished fetching `{url}`");
-
-            // Since the async context cannot take the `context` by ref, we have to continue
-            // parsing inside a new `NativeJob` that will be enqueued into the promise job queue.
-            NativeJob::new(move |context| -> JsResult<JsValue> {
-                let body = match body {
-                    Ok(body) => body,
-                    Err(err) => {
-                        // On error we always call `finish_load` to notify the load promise about the
-                        // error.
-                        finish_load(
-                            Err(JsNativeError::typ().with_message(err.to_string()).into()),
-                            context,
-                        );
-
-                        // Just returns anything to comply with `NativeJob::new`'s signature.
-                        return Ok(JsValue::undefined());
-                    }
-                };
-
-                // Could also add a path if needed.
-                let source = Source::from_bytes(body.as_bytes());
-
-                let module = Module::parse(source, None, context);
-
-                // We don't do any error handling, `finish_load` takes care of that for us.
-                finish_load(module, context);
-
-                // Also needed to match `NativeJob::new`.
-                Ok(JsValue::undefined())
-            })
-        };
-
         // Just enqueue the future for now. We'll advance all the enqueued futures inside our custom
-        // `JobQueue`.
-        context
-            .job_queue()
-            .enqueue_future_job(Box::pin(fetch), context)
+        // `JobExecutor`.
+        context.enqueue_job(
+            NativeAsyncJob::with_realm(
+                move |context| {
+                    Box::pin(async move {
+                        // Adding some prints to show the non-deterministic nature of the async fetches.
+                        // Try to run the example several times to see how sometimes the fetches start in order
+                        // but finish in disorder.
+                        println!("Fetching `{url}`...");
+
+                        // This could also retry fetching in case there's an error while requesting the module.
+                        let body: Result<_, isahc::Error> = async {
+                            let mut response = Request::get(&url)
+                                .redirect_policy(RedirectPolicy::Limit(5))
+                                .body(())?
+                                .send_async()
+                                .await?;
+
+                            Ok(response.text().await?)
+                        }
+                        .await;
+
+                        println!("Finished fetching `{url}`");
+
+                        let body = match body {
+                            Ok(body) => body,
+                            Err(err) => {
+                                // On error we always call `finish_load` to notify the load promise about the
+                                // error.
+                                finish_load(
+                                    Err(JsNativeError::typ().with_message(err.to_string()).into()),
+                                    &mut context.borrow_mut(),
+                                );
+
+                                // Just returns anything to comply with `NativeAsyncJob::new`'s signature.
+                                return Ok(JsValue::undefined());
+                            }
+                        };
+
+                        // Could also add a path if needed.
+                        let source = Source::from_bytes(body.as_bytes());
+
+                        let module = Module::parse(source, None, &mut context.borrow_mut());
+
+                        // We don't do any error handling, `finish_load` takes care of that for us.
+                        finish_load(module, &mut context.borrow_mut());
+
+                        // Also needed to match `NativeAsyncJob::new`.
+                        Ok(JsValue::undefined())
+                    })
+                },
+                context.realm().clone(),
+            )
+            .into(),
+        );
     }
 }
 
@@ -109,7 +111,7 @@ fn main() -> JsResult<()> {
     "#;
 
     let context = &mut Context::builder()
-        .job_queue(Rc::new(Queue::new()))
+        .job_executor(Rc::new(Queue::new()))
         // NEW: sets the context module loader to our custom loader
         .module_loader(Rc::new(HttpModuleLoader))
         .build()?;
@@ -122,7 +124,7 @@ fn main() -> JsResult<()> {
 
     // Important to call `Context::run_jobs`, or else all the futures and promises won't be
     // pushed forward by the job queue.
-    context.run_jobs();
+    context.run_jobs()?;
 
     match promise.state() {
         // Our job queue guarantees that all promises and futures are finished after returning
@@ -170,20 +172,20 @@ fn main() -> JsResult<()> {
 // Taken from the `smol_event_loop.rs` example.
 /// An event queue using smol to drive futures to completion.
 struct Queue {
-    futures: RefCell<Vec<FutureJob>>,
-    jobs: RefCell<VecDeque<NativeJob>>,
+    async_jobs: RefCell<VecDeque<NativeAsyncJob>>,
+    promise_jobs: RefCell<VecDeque<PromiseJob>>,
 }
 
 impl Queue {
     fn new() -> Self {
         Self {
-            futures: RefCell::default(),
-            jobs: RefCell::default(),
+            async_jobs: RefCell::default(),
+            promise_jobs: RefCell::default(),
         }
     }
 
     fn drain_jobs(&self, context: &mut Context) {
-        let jobs = std::mem::take(&mut *self.jobs.borrow_mut());
+        let jobs = std::mem::take(&mut *self.promise_jobs.borrow_mut());
         for job in jobs {
             if let Err(e) = job.call(context) {
                 eprintln!("Uncaught {e}");
@@ -192,67 +194,67 @@ impl Queue {
     }
 }
 
-impl JobQueue for Queue {
-    fn enqueue_promise_job(&self, job: NativeJob, _context: &mut Context) {
-        self.jobs.borrow_mut().push_back(job);
-    }
-
-    fn enqueue_future_job(&self, future: FutureJob, _context: &mut Context) {
-        self.futures.borrow_mut().push(future);
+impl JobExecutor for Queue {
+    fn enqueue_job(&self, job: Job, _context: &mut Context) {
+        match job {
+            Job::PromiseJob(job) => self.promise_jobs.borrow_mut().push_back(job),
+            Job::AsyncJob(job) => self.async_jobs.borrow_mut().push_back(job),
+            _ => panic!("unsupported job type"),
+        }
     }
 
     // While the sync flavor of `run_jobs` will block the current thread until all the jobs have finished...
-    fn run_jobs(&self, context: &mut Context) {
-        smol::block_on(smol::LocalExecutor::new().run(self.run_jobs_async(context)));
+    fn run_jobs(&self, context: &mut Context) -> JsResult<()> {
+        smol::block_on(smol::LocalExecutor::new().run(self.run_jobs_async(&RefCell::new(context))))
     }
 
     // ...the async flavor won't, which allows concurrent execution with external async tasks.
-    fn run_jobs_async<'a, 'ctx, 'fut>(
+    fn run_jobs_async<'a, 'b, 'fut>(
         &'a self,
-        context: &'ctx mut Context,
-    ) -> Pin<Box<dyn Future<Output = ()> + 'fut>>
+        context: &'b RefCell<&mut Context>,
+    ) -> Pin<Box<dyn Future<Output = JsResult<()>> + 'fut>>
     where
         'a: 'fut,
-        'ctx: 'fut,
+        'b: 'fut,
     {
         Box::pin(async move {
             // Early return in case there were no jobs scheduled.
-            if self.jobs.borrow().is_empty() && self.futures.borrow().is_empty() {
-                return;
+            if self.promise_jobs.borrow().is_empty() && self.async_jobs.borrow().is_empty() {
+                return Ok(());
             }
             let mut group = FutureGroup::new();
             loop {
-                group.extend(std::mem::take(&mut *self.futures.borrow_mut()));
+                for job in std::mem::take(&mut *self.async_jobs.borrow_mut()) {
+                    group.insert(job.call(context));
+                }
 
-                if self.jobs.borrow().is_empty() {
-                    let Some(job) = group.next().await else {
+                if self.promise_jobs.borrow().is_empty() {
+                    let Some(result) = group.next().await else {
                         // Both queues are empty. We can exit.
-                        return;
+                        return Ok(());
                     };
 
-                    // Important to schedule the returned `job` into the job queue, since that's
-                    // what allows updating the `Promise` seen by ECMAScript for when the future
-                    // completes.
-                    self.enqueue_promise_job(job, context);
+                    if let Err(err) = result {
+                        eprintln!("Uncaught {err}");
+                    }
                     continue;
                 }
 
                 // We have some jobs pending on the microtask queue. Try to poll the pending
                 // tasks once to see if any of them finished, and run the pending microtasks
                 // otherwise.
-                let Some(job) = future::poll_once(group.next()).await.flatten() else {
+                let Some(result) = future::poll_once(group.next()).await.flatten() else {
                     // No completed jobs. Run the microtask queue once.
-                    self.drain_jobs(context);
+                    self.drain_jobs(&mut context.borrow_mut());
                     continue;
                 };
 
-                // Important to schedule the returned `job` into the job queue, since that's
-                // what allows updating the `Promise` seen by ECMAScript for when the future
-                // completes.
-                self.enqueue_promise_job(job, context);
+                if let Err(err) = result {
+                    eprintln!("Uncaught {err}");
+                }
 
                 // Only one macrotask can be executed before the next drain of the microtask queue.
-                self.drain_jobs(context);
+                self.drain_jobs(&mut context.borrow_mut());
             }
         })
     }
diff --git a/examples/src/bin/modules.rs b/examples/src/bin/modules.rs
index 68037590f3e..eec64471c1d 100644
--- a/examples/src/bin/modules.rs
+++ b/examples/src/bin/modules.rs
@@ -87,7 +87,7 @@ fn main() -> Result<(), Box<dyn Error>> {
         );
 
     // Very important to push forward the job queue after queueing promises.
-    context.run_jobs();
+    context.run_jobs()?;
 
     // Checking if the final promise didn't return an error.
     match promise_result.state() {
diff --git a/examples/src/bin/smol_event_loop.rs b/examples/src/bin/smol_event_loop.rs
index 45b9ade8914..749be95444c 100644
--- a/examples/src/bin/smol_event_loop.rs
+++ b/examples/src/bin/smol_event_loop.rs
@@ -2,17 +2,18 @@ use std::{
     cell::RefCell,
     collections::VecDeque,
     future::Future,
+    pin::Pin,
     rc::Rc,
     time::{Duration, Instant},
 };
 
 use boa_engine::{
     context::ContextBuilder,
-    job::{FutureJob, JobQueue, NativeJob},
+    job::{Job, JobExecutor, NativeAsyncJob, PromiseJob},
     js_string,
     native_function::NativeFunction,
     property::Attribute,
-    Context, JsArgs, JsResult, JsValue, Script, Source,
+    Context, JsArgs, JsNativeError, JsResult, JsValue, Script, Source,
 };
 use boa_runtime::Console;
 use futures_concurrency::future::FutureGroup;
@@ -20,32 +21,33 @@ use smol::{future, stream::StreamExt};
 
 // This example shows how to create an event loop using the smol runtime.
 // The example contains two "flavors" of event loops:
-fn main() {
+fn main() -> JsResult<()> {
     // An internally async event loop. This event loop blocks the execution of the thread
     // while executing tasks, but internally uses async to run its tasks.
-    internally_async_event_loop();
+    internally_async_event_loop()?;
 
     // An externally async event loop. This event loop can yield to the runtime to concurrently
     // run tasks with it.
-    externally_async_event_loop();
+    externally_async_event_loop()
 }
 
+// Taken from the `smol_event_loop.rs` example.
 /// An event queue using smol to drive futures to completion.
 struct Queue {
-    futures: RefCell<Vec<FutureJob>>,
-    jobs: RefCell<VecDeque<NativeJob>>,
+    async_jobs: RefCell<VecDeque<NativeAsyncJob>>,
+    promise_jobs: RefCell<VecDeque<PromiseJob>>,
 }
 
 impl Queue {
     fn new() -> Self {
         Self {
-            futures: RefCell::default(),
-            jobs: RefCell::default(),
+            async_jobs: RefCell::default(),
+            promise_jobs: RefCell::default(),
         }
     }
 
     fn drain_jobs(&self, context: &mut Context) {
-        let jobs = std::mem::take(&mut *self.jobs.borrow_mut());
+        let jobs = std::mem::take(&mut *self.promise_jobs.borrow_mut());
         for job in jobs {
             if let Err(e) = job.call(context) {
                 eprintln!("Uncaught {e}");
@@ -54,67 +56,67 @@ impl Queue {
     }
 }
 
-impl JobQueue for Queue {
-    fn enqueue_promise_job(&self, job: NativeJob, _context: &mut Context) {
-        self.jobs.borrow_mut().push_back(job);
-    }
-
-    fn enqueue_future_job(&self, future: FutureJob, _context: &mut Context) {
-        self.futures.borrow_mut().push(future);
+impl JobExecutor for Queue {
+    fn enqueue_job(&self, job: Job, _context: &mut Context) {
+        match job {
+            Job::PromiseJob(job) => self.promise_jobs.borrow_mut().push_back(job),
+            Job::AsyncJob(job) => self.async_jobs.borrow_mut().push_back(job),
+            _ => panic!("unsupported job type"),
+        }
     }
 
     // While the sync flavor of `run_jobs` will block the current thread until all the jobs have finished...
-    fn run_jobs(&self, context: &mut Context) {
-        smol::block_on(smol::LocalExecutor::new().run(self.run_jobs_async(context)));
+    fn run_jobs(&self, context: &mut Context) -> JsResult<()> {
+        smol::block_on(smol::LocalExecutor::new().run(self.run_jobs_async(&RefCell::new(context))))
     }
 
     // ...the async flavor won't, which allows concurrent execution with external async tasks.
-    fn run_jobs_async<'a, 'ctx, 'fut>(
+    fn run_jobs_async<'a, 'b, 'fut>(
         &'a self,
-        context: &'ctx mut Context,
-    ) -> std::pin::Pin<Box<dyn Future<Output = ()> + 'fut>>
+        context: &'b RefCell<&mut Context>,
+    ) -> Pin<Box<dyn Future<Output = JsResult<()>> + 'fut>>
     where
         'a: 'fut,
-        'ctx: 'fut,
+        'b: 'fut,
     {
         Box::pin(async move {
             // Early return in case there were no jobs scheduled.
-            if self.jobs.borrow().is_empty() && self.futures.borrow().is_empty() {
-                return;
+            if self.promise_jobs.borrow().is_empty() && self.async_jobs.borrow().is_empty() {
+                return Ok(());
             }
             let mut group = FutureGroup::new();
             loop {
-                group.extend(std::mem::take(&mut *self.futures.borrow_mut()));
+                for job in std::mem::take(&mut *self.async_jobs.borrow_mut()) {
+                    group.insert(job.call(context));
+                }
 
-                if self.jobs.borrow().is_empty() {
-                    let Some(job) = group.next().await else {
+                if self.promise_jobs.borrow().is_empty() {
+                    let Some(result) = group.next().await else {
                         // Both queues are empty. We can exit.
-                        return;
+                        return Ok(());
                     };
 
-                    // Important to schedule the returned `job` into the job queue, since that's
-                    // what allows updating the `Promise` seen by ECMAScript for when the future
-                    // completes.
-                    self.enqueue_promise_job(job, context);
+                    if let Err(err) = result {
+                        eprintln!("Uncaught {err}");
+                    }
                     continue;
                 }
 
                 // We have some jobs pending on the microtask queue. Try to poll the pending
                 // tasks once to see if any of them finished, and run the pending microtasks
                 // otherwise.
-                let Some(job) = future::poll_once(group.next()).await.flatten() else {
+                let Some(result) = future::poll_once(group.next()).await.flatten() else {
                     // No completed jobs. Run the microtask queue once.
-                    self.drain_jobs(context);
+                    self.drain_jobs(&mut context.borrow_mut());
                     continue;
                 };
 
-                // Important to schedule the returned `job` into the job queue, since that's
-                // what allows updating the `Promise` seen by ECMAScript for when the future
-                // completes.
-                self.enqueue_promise_job(job, context);
+                if let Err(err) = result {
+                    eprintln!("Uncaught {err}");
+                }
 
                 // Only one macrotask can be executed before the next drain of the microtask queue.
-                self.drain_jobs(context);
+                self.drain_jobs(&mut context.borrow_mut());
             }
         })
     }
@@ -138,6 +140,41 @@ fn delay(
     }
 }
 
+// Example interval function. We cannot use a function returning async in this case since it would
+// borrow the context for too long, but using a `NativeAsyncJob` we can!
+fn interval(this: &JsValue, args: &[JsValue], context: &mut Context) -> JsResult<JsValue> {
+    let Some(function) = args.get_or_undefined(0).as_callable().cloned() else {
+        return Err(JsNativeError::typ()
+            .with_message("arg must be a callable")
+            .into());
+    };
+
+    let this = this.clone();
+    let delay = args.get_or_undefined(1).to_u32(context)?;
+    let args = args.get(2..).unwrap_or_default().to_vec();
+
+    context.enqueue_job(
+        NativeAsyncJob::with_realm(
+            move |context| {
+                Box::pin(async move {
+                    let mut timer = smol::Timer::interval(Duration::from_millis(u64::from(delay)));
+                    for _ in 0..10 {
+                        timer.next().await;
+                        if let Err(err) = function.call(&this, &args, &mut context.borrow_mut()) {
+                            eprintln!("Uncaught {err}");
+                        }
+                    }
+                    Ok(JsValue::undefined())
+                })
+            },
+            context.realm().clone(),
+        )
+        .into(),
+    );
+
+    Ok(JsValue::undefined())
+}
+
 /// Adds the custom runtime to the context.
 fn add_runtime(context: &mut Context) {
     // First add the `console` object, to be able to call `console.log()`.
@@ -154,19 +191,37 @@ fn add_runtime(context: &mut Context) {
             NativeFunction::from_async_fn(delay),
         )
         .expect("the delay builtin shouldn't exist");
+
+    // Finally, bind the defined async job to the ECMAScript function "interval".
+    context
+        .register_global_builtin_callable(
+            js_string!("interval"),
+            1,
+            NativeFunction::from_fn_ptr(interval),
+        )
+        .expect("the delay builtin shouldn't exist");
 }
 
 // Script that does multiple calls to multiple async timers.
 const SCRIPT: &str = r"
     function print(elapsed) {
-        console.log(`Finished delay. Elapsed time: ${elapsed * 1000} ms`)
+        console.log(`Finished delay. Elapsed time: ${elapsed * 1000} ms`);
     }
+
     delay(1000).then(print);
     delay(500).then(print);
     delay(200).then(print);
     delay(600).then(print);
     delay(30).then(print);
 
+    let i = 0;
+    function counter() {
+        console.log(`Iteration number ${i} for JS interval`);
+        i += 1;
+    }
+
+    interval(counter, 100);
+
     for(let i = 0; i <= 100000; i++) {
         // Emulate a long-running evaluation of a script.
     }
@@ -175,13 +230,13 @@ const SCRIPT: &str = r"
 // This flavor is most recommended when you have an application that:
 //  - Needs to wait until the engine finishes executing; depends on the execution result to continue.
 //  - Delegates the execution of the application to the engine's event loop.
-fn internally_async_event_loop() {
+fn internally_async_event_loop() -> JsResult<()> {
     println!("====== Internally async event loop. ======");
 
     // Initialize the queue and the context
     let queue = Queue::new();
     let context = &mut ContextBuilder::new()
-        .job_queue(Rc::new(queue))
+        .job_executor(Rc::new(queue))
         .build()
         .unwrap();
 
@@ -194,15 +249,16 @@ fn internally_async_event_loop() {
 
     // Important to run this after evaluating, since this is what triggers to run the enqueued jobs.
     println!("Running jobs...");
-    context.run_jobs();
+    context.run_jobs()?;
 
     println!("Total elapsed time: {:?}\n", now.elapsed());
+    Ok(())
 }
 
 // This flavor is most recommended when you have an application that:
 //  - Cannot afford to block until the engine finishes executing.
 //  - Needs to process IO requests between executions that will be consumed by the engine.
-fn externally_async_event_loop() {
+fn externally_async_event_loop() -> JsResult<()> {
     println!("====== Externally async event loop. ======");
     let executor = smol::Executor::new();
 
@@ -210,7 +266,7 @@ fn externally_async_event_loop() {
         // Initialize the queue and the context
         let queue = Queue::new();
         let context = &mut ContextBuilder::new()
-            .job_queue(Rc::new(queue))
+            .job_executor(Rc::new(queue))
             .build()
             .unwrap();
 
@@ -227,7 +283,7 @@ fn externally_async_event_loop() {
                 interval.next().await;
                 println!("Executed interval tick {i}");
             }
-            println!("Finished smol interval job...")
+            println!("Finished smol interval job...");
         });
 
         let engine = async {
@@ -240,11 +296,13 @@ fn externally_async_event_loop() {
 
             // Run the jobs asynchronously, which avoids blocking the main thread.
             println!("Running jobs...");
-            context.run_jobs_async().await;
+            context.run_jobs_async().await
         };
 
-        future::zip(counter, engine).await;
+        future::zip(counter, engine).await.1?;
 
         println!("Total elapsed time: {:?}\n", now.elapsed());
-    }));
+
+        Ok(())
+    }))
 }
diff --git a/examples/src/bin/synthetic.rs b/examples/src/bin/synthetic.rs
index e38562cde4b..43d4955259c 100644
--- a/examples/src/bin/synthetic.rs
+++ b/examples/src/bin/synthetic.rs
@@ -62,7 +62,7 @@ fn main() -> Result<(), Box<dyn Error>> {
     let promise_result = module.load_link_evaluate(context);
 
     // Very important to push forward the job queue after queueing promises.
-    context.run_jobs();
+    context.run_jobs()?;
 
     // Checking if the final promise didn't return an error.
     match promise_result.state() {
diff --git a/examples/src/bin/tokio_event_loop.rs b/examples/src/bin/tokio_event_loop.rs
index c6e0cd248d2..0c83887c229 100644
--- a/examples/src/bin/tokio_event_loop.rs
+++ b/examples/src/bin/tokio_event_loop.rs
@@ -9,43 +9,45 @@ use std::{
 
 use boa_engine::{
     context::ContextBuilder,
-    job::{FutureJob, JobQueue, NativeJob},
+    job::{Job, JobExecutor, NativeAsyncJob, PromiseJob},
     js_string,
     native_function::NativeFunction,
     property::Attribute,
-    Context, JsArgs, JsResult, JsValue, Script, Source,
+    Context, JsArgs, JsNativeError, JsResult, JsValue, Script, Source,
 };
 use boa_runtime::Console;
+use futures_concurrency::future::FutureGroup;
+use futures_lite::{future, StreamExt};
 use tokio::{task, time};
 
 // This example shows how to create an event loop using the tokio runtime.
 // The example contains two "flavors" of event loops:
-fn main() {
+fn main() -> JsResult<()> {
     // An internally async event loop. This event loop blocks the execution of the thread
     // while executing tasks, but internally uses async to run its tasks.
-    internally_async_event_loop();
+    internally_async_event_loop()?;
 
     // An externally async event loop. This event loop can yield to the runtime to concurrently
     // run tasks with it.
-    externally_async_event_loop();
+    externally_async_event_loop()
 }
 
 /// An event queue using tokio to drive futures to completion.
 struct Queue {
-    futures: RefCell<Vec<FutureJob>>,
-    jobs: RefCell<VecDeque<NativeJob>>,
+    async_jobs: RefCell<VecDeque<NativeAsyncJob>>,
+    promise_jobs: RefCell<VecDeque<PromiseJob>>,
 }
 
 impl Queue {
     fn new() -> Self {
         Self {
-            futures: RefCell::default(),
-            jobs: RefCell::default(),
+            async_jobs: RefCell::default(),
+            promise_jobs: RefCell::default(),
         }
     }
 
     fn drain_jobs(&self, context: &mut Context) {
-        let jobs = std::mem::take(&mut *self.jobs.borrow_mut());
+        let jobs = std::mem::take(&mut *self.promise_jobs.borrow_mut());
         for job in jobs {
             if let Err(e) = job.call(context) {
                 eprintln!("Uncaught {e}");
@@ -54,57 +56,53 @@ impl Queue {
     }
 }
 
-impl JobQueue for Queue {
-    fn enqueue_promise_job(&self, job: NativeJob, _context: &mut Context) {
-        self.jobs.borrow_mut().push_back(job);
-    }
-
-    fn enqueue_future_job(&self, future: FutureJob, _context: &mut Context) {
-        self.futures.borrow_mut().push(future);
+impl JobExecutor for Queue {
+    fn enqueue_job(&self, job: Job, _context: &mut Context) {
+        match job {
+            Job::PromiseJob(job) => self.promise_jobs.borrow_mut().push_back(job),
+            Job::AsyncJob(job) => self.async_jobs.borrow_mut().push_back(job),
+            _ => panic!("unsupported job type"),
+        }
     }
 
     // While the sync flavor of `run_jobs` will block the current thread until all the jobs have finished...
-    fn run_jobs(&self, context: &mut Context) {
+    fn run_jobs(&self, context: &mut Context) -> JsResult<()> {
         let runtime = tokio::runtime::Builder::new_current_thread()
             .enable_time()
             .build()
             .unwrap();
 
-        task::LocalSet::default().block_on(&runtime, self.run_jobs_async(context));
+        task::LocalSet::default().block_on(&runtime, self.run_jobs_async(&RefCell::new(context)))
     }
 
     // ...the async flavor won't, which allows concurrent execution with external async tasks.
-    fn run_jobs_async<'a, 'ctx, 'fut>(
+    fn run_jobs_async<'a, 'b, 'fut>(
         &'a self,
-        context: &'ctx mut Context,
-    ) -> Pin<Box<dyn Future<Output = ()> + 'fut>>
+        context: &'b RefCell<&mut Context>,
+    ) -> Pin<Box<dyn Future<Output = JsResult<()>> + 'fut>>
     where
         'a: 'fut,
-        'ctx: 'fut,
+        'b: 'fut,
     {
         Box::pin(async move {
             // Early return in case there were no jobs scheduled.
-            if self.jobs.borrow().is_empty() && self.futures.borrow().is_empty() {
-                return;
+            if self.promise_jobs.borrow().is_empty() && self.async_jobs.borrow().is_empty() {
+                return Ok(());
             }
-            let mut join_set = task::JoinSet::new();
+            let mut group = FutureGroup::new();
             loop {
-                for future in std::mem::take(&mut *self.futures.borrow_mut()) {
-                    join_set.spawn_local(future);
+                for job in std::mem::take(&mut *self.async_jobs.borrow_mut()) {
+                    group.insert(job.call(context));
                 }
 
-                if self.jobs.borrow().is_empty() {
-                    let Some(job) = join_set.join_next().await else {
+                if self.promise_jobs.borrow().is_empty() {
+                    let Some(result) = group.next().await else {
                         // Both queues are empty. We can exit.
-                        return;
+                        return Ok(());
                     };
 
-                    // Important to schedule the returned `job` into the job queue, since that's
-                    // what allows updating the `Promise` seen by ECMAScript for when the future
-                    // completes.
-                    match job {
-                        Ok(job) => self.enqueue_promise_job(job, context),
-                        Err(e) => eprintln!("{e}"),
+                    if let Err(err) = result {
+                        eprintln!("Uncaught {err}");
                     }
 
                     continue;
@@ -113,24 +111,20 @@ impl JobQueue for Queue {
                 // We have some jobs pending on the microtask queue. Try to poll the pending
                 // tasks once to see if any of them finished, and run the pending microtasks
                 // otherwise.
-                let Some(job) = join_set.try_join_next() else {
+                let Some(result) = future::poll_once(group.next()).await.flatten() else {
                     // No completed jobs. Run the microtask queue once.
-                    self.drain_jobs(context);
+                    self.drain_jobs(&mut context.borrow_mut());
 
                     task::yield_now().await;
                     continue;
                 };
 
-                // Important to schedule the returned `job` into the job queue, since that's
-                // what allows updating the `Promise` seen by ECMAScript for when the future
-                // completes.
-                match job {
-                    Ok(job) => self.enqueue_promise_job(job, context),
-                    Err(e) => eprintln!("{e}"),
+                if let Err(err) = result {
+                    eprintln!("Uncaught {err}");
                 }
 
                 // Only one macrotask can be executed before the next drain of the microtask queue.
-                self.drain_jobs(context);
+                self.drain_jobs(&mut context.borrow_mut());
             }
         })
     }
@@ -154,6 +148,41 @@ fn delay(
     }
 }
 
+// Example interval function. We cannot use a function returning async in this case since it would
+// borrow the context for too long, but using a `NativeAsyncJob` we can!
+fn interval(this: &JsValue, args: &[JsValue], context: &mut Context) -> JsResult<JsValue> {
+    let Some(function) = args.get_or_undefined(0).as_callable().cloned() else {
+        return Err(JsNativeError::typ()
+            .with_message("arg must be a callable")
+            .into());
+    };
+
+    let this = this.clone();
+    let delay = args.get_or_undefined(1).to_u32(context)?;
+    let args = args.get(2..).unwrap_or_default().to_vec();
+
+    context.enqueue_job(
+        NativeAsyncJob::with_realm(
+            move |context| {
+                Box::pin(async move {
+                    let mut timer = time::interval(Duration::from_millis(u64::from(delay)));
+                    for _ in 0..10 {
+                        timer.tick().await;
+                        if let Err(err) = function.call(&this, &args, &mut context.borrow_mut()) {
+                            eprintln!("Uncaught {err}");
+                        }
+                    }
+                    Ok(JsValue::undefined())
+                })
+            },
+            context.realm().clone(),
+        )
+        .into(),
+    );
+
+    Ok(JsValue::undefined())
+}
+
 /// Adds the custom runtime to the context.
 fn add_runtime(context: &mut Context) {
     // First add the `console` object, to be able to call `console.log()`.
@@ -170,19 +199,37 @@ fn add_runtime(context: &mut Context) {
             NativeFunction::from_async_fn(delay),
         )
         .expect("the delay builtin shouldn't exist");
+
+    // Finally, bind the defined async job to the ECMAScript function "interval".
+    context
+        .register_global_builtin_callable(
+            js_string!("interval"),
+            1,
+            NativeFunction::from_fn_ptr(interval),
+        )
+        .expect("the delay builtin shouldn't exist");
 }
 
 // Script that does multiple calls to multiple async timers.
 const SCRIPT: &str = r"
     function print(elapsed) {
-        console.log(`Finished delay. Elapsed time: ${elapsed * 1000} ms`)
+        console.log(`Finished delay. Elapsed time: ${elapsed * 1000} ms`);
     }
+
     delay(1000).then(print);
     delay(500).then(print);
     delay(200).then(print);
     delay(600).then(print);
     delay(30).then(print);
 
+    let i = 0;
+    function counter() {
+        console.log(`Iteration number ${i} for JS interval`);
+        i += 1;
+    }
+
+    interval(counter, 100);
+
     for(let i = 0; i <= 100000; i++) {
         // Emulate a long-running evaluation of a script.
     }
@@ -191,13 +238,13 @@ const SCRIPT: &str = r"
 // This flavor is most recommended when you have an application that:
 //  - Needs to wait until the engine finishes executing; depends on the execution result to continue.
 //  - Delegates the execution of the application to the engine's event loop.
-fn internally_async_event_loop() {
+fn internally_async_event_loop() -> JsResult<()> {
     println!("====== Internally async event loop. ======");
 
     // Initialize the queue and the context
     let queue = Queue::new();
     let context = &mut ContextBuilder::new()
-        .job_queue(Rc::new(queue))
+        .job_executor(Rc::new(queue))
         .build()
         .unwrap();
 
@@ -210,21 +257,23 @@ fn internally_async_event_loop() {
 
     // Important to run this after evaluating, since this is what triggers to run the enqueued jobs.
     println!("Running jobs...");
-    context.run_jobs();
+    context.run_jobs()?;
 
     println!("Total elapsed time: {:?}\n", now.elapsed());
+
+    Ok(())
 }
 
 // This flavor is most recommended when you have an application that:
 //  - Cannot afford to block until the engine finishes executing.
 //  - Needs to process IO requests between executions that will be consumed by the engine.
 #[tokio::main]
-async fn externally_async_event_loop() {
+async fn externally_async_event_loop() -> JsResult<()> {
     println!("====== Externally async event loop. ======");
     // Initialize the queue and the context
     let queue = Queue::new();
     let context = &mut ContextBuilder::new()
-        .job_queue(Rc::new(queue))
+        .job_executor(Rc::new(queue))
         .build()
         .unwrap();
 
@@ -234,15 +283,19 @@ async fn externally_async_event_loop() {
     let now = Instant::now();
 
     // Example of an asynchronous workload that must be run alongside the engine.
-    let counter = tokio::spawn(async {
-        let mut interval = time::interval(Duration::from_millis(100));
-        println!("Starting tokio interval job...");
-        for i in 0..10 {
-            interval.tick().await;
-            println!("Executed interval tick {i}");
-        }
-        println!("Finished tokio interval job...")
-    });
+    let counter = async {
+        tokio::spawn(async {
+            let mut interval = time::interval(Duration::from_millis(100));
+            println!("Starting tokio interval job...");
+            for i in 0..10 {
+                interval.tick().await;
+                println!("Executed interval tick {i}");
+            }
+            println!("Finished tokio interval job...")
+        })
+        .await
+        .map_err(|err| JsNativeError::typ().with_message(err.to_string()).into())
+    };
 
     let local_set = &mut task::LocalSet::default();
     let engine = local_set.run_until(async {
@@ -255,11 +308,12 @@ async fn externally_async_event_loop() {
 
         // Run the jobs asynchronously, which avoids blocking the main thread.
         println!("Running jobs...");
-        context.run_jobs_async().await;
-        Ok(())
+        context.run_jobs_async().await
     });
 
-    tokio::try_join!(counter, engine).unwrap();
+    tokio::try_join!(counter, engine)?;
 
     println!("Total elapsed time: {:?}\n", now.elapsed());
+
+    Ok(())
 }
diff --git a/tests/tester/src/exec/mod.rs b/tests/tester/src/exec/mod.rs
index e5ce0553cfd..6bca5c1f163 100644
--- a/tests/tester/src/exec/mod.rs
+++ b/tests/tester/src/exec/mod.rs
@@ -289,7 +289,9 @@ impl Test {
 
                     let promise = module.load_link_evaluate(context);
 
-                    context.run_jobs();
+                    if let Err(err) = context.run_jobs() {
+                        return (false, format!("Uncaught {err}"));
+                    };
 
                     match promise.state() {
                         PromiseState::Pending => {
@@ -322,7 +324,9 @@ impl Test {
                     }
                 };
 
-                context.run_jobs();
+                if let Err(err) = context.run_jobs() {
+                    return (false, format!("Uncaught {err}"));
+                };
 
                 match *async_result.inner.borrow() {
                     UninitResult::Err(ref e) => return (false, format!("Uncaught {e}")),
@@ -388,7 +392,9 @@ impl Test {
 
                 let promise = module.load(context);
 
-                context.run_jobs();
+                if let Err(err) = context.run_jobs() {
+                    return (false, format!("Uncaught {err}"));
+                };
 
                 match promise.state() {
                     PromiseState::Pending => {
@@ -433,7 +439,9 @@ impl Test {
 
                     let promise = module.load(context);
 
-                    context.run_jobs();
+                    if let Err(err) = context.run_jobs() {
+                        return (false, format!("Uncaught {err}"));
+                    };
 
                     match promise.state() {
                         PromiseState::Pending => {
@@ -451,7 +459,9 @@ impl Test {
 
                     let promise = module.evaluate(context);
 
-                    context.run_jobs();
+                    if let Err(err) = context.run_jobs() {
+                        return (false, format!("Uncaught {err}"));
+                    };
 
                     match promise.state() {
                         PromiseState::Pending => {