From f037e8c02b660351af1ba99d24c867e20349d73d Mon Sep 17 00:00:00 2001 From: David Haim <59602013+David-Haim@users.noreply.github.com> Date: Wed, 24 Feb 2021 13:23:24 +0200 Subject: [PATCH] Version 0.1.0 (#39) --- CMakeLists.txt | 6 +- README.md | 1850 ++++++++++------- cmake/concurrencppInjectTSAN.cmake | 3 +- include/concurrencpp/concurrencpp.h | 2 + include/concurrencpp/executors/executor.h | 1 + .../concurrencpp/executors/inline_executor.h | 6 +- .../concurrencpp/executors/manual_executor.h | 4 - .../concurrencpp/executors/thread_executor.h | 2 +- include/concurrencpp/platform_defs.h | 2 +- include/concurrencpp/results/constants.h | 22 + .../results/impl/consumer_context.h | 73 +- .../results/impl/producer_context.h | 33 +- .../concurrencpp/results/impl/result_state.h | 108 +- .../results/impl/shared_result_state.h | 176 ++ include/concurrencpp/results/make_result.h | 24 +- include/concurrencpp/results/promises.h | 73 +- include/concurrencpp/results/result.h | 58 +- .../concurrencpp/results/result_awaitable.h | 86 +- .../results/result_fwd_declerations.h | 10 + include/concurrencpp/results/shared_result.h | 132 ++ .../results/shared_result_awaitable.h | 115 + include/concurrencpp/results/when_result.h | 94 +- include/concurrencpp/runtime/constants.h | 4 +- include/concurrencpp/utils/bind.h | 10 +- source/executors/manual_executor.cpp | 6 +- source/results/impl/consumer_context.cpp | 186 +- source/results/impl/result_state.cpp | 56 +- source/results/impl/shared_result_state.cpp | 75 + source/results/promises.cpp | 11 +- source/task.cpp | 4 +- source/threads/thread.cpp | 2 +- test/CMakeLists.txt | 25 +- test/include/tests/all_tests.h | 4 + .../tests/test_utils/make_result_array.h | 137 ++ .../tests/test_utils/test_ready_result.h | 88 + test/source/tests/all_tests.cpp | 5 + .../derivable_executor_tests.cpp | 339 --- .../executor_tests/inline_executor_tests.cpp | 36 +- .../executor_tests/manual_executor_tests.cpp | 29 +- .../executor_tests/thread_executor_tests.cpp | 36 +- .../thread_pool_executor_tests.cpp | 24 +- .../worker_thread_executor_tests.cpp | 31 +- .../tests/result_tests/result_await_tests.cpp | 20 +- .../result_tests/result_resolve_tests.cpp | 20 +- .../tests/result_tests/result_tests.cpp | 42 +- .../shared_result_await_tests.cpp | 769 +++++++ .../shared_result_resolve_tests.cpp | 660 ++++++ .../result_tests/shared_result_tests.cpp | 895 ++++++++ test/source/thread_sanitizer/executors.cpp | 626 +++++- .../{fibbonacci.cpp => fibonacci.cpp} | 30 +- .../matrix_multiplication.cpp | 31 +- test/source/thread_sanitizer/quick_sort.cpp | 17 +- test/source/thread_sanitizer/result.cpp | 293 ++- .../source/thread_sanitizer/shared_result.cpp | 258 +++ test/source/thread_sanitizer/when_all.cpp | 346 ++- test/source/thread_sanitizer/when_any.cpp | 498 ++++- 56 files changed, 6583 insertions(+), 1910 deletions(-) create mode 100644 include/concurrencpp/results/impl/shared_result_state.h create mode 100644 include/concurrencpp/results/shared_result.h create mode 100644 include/concurrencpp/results/shared_result_awaitable.h create mode 100644 source/results/impl/shared_result_state.cpp create mode 100644 test/include/tests/test_utils/make_result_array.h delete mode 100644 test/source/tests/executor_tests/derivable_executor_tests.cpp create mode 100644 test/source/tests/result_tests/shared_result_await_tests.cpp create mode 100644 test/source/tests/result_tests/shared_result_resolve_tests.cpp create mode 100644 test/source/tests/result_tests/shared_result_tests.cpp rename test/source/thread_sanitizer/{fibbonacci.cpp => fibonacci.cpp} (54%) create mode 100644 test/source/thread_sanitizer/shared_result.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 70b7e579..5c607ff1 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,7 +1,7 @@ cmake_minimum_required(VERSION 3.16) project(concurrencpp - VERSION 0.0.9 + VERSION 0.1.0 LANGUAGES CXX) include(cmake/coroutineOptions.cmake) @@ -25,6 +25,7 @@ set(concurrencpp_sources source/executors/worker_thread_executor.cpp source/results/impl/consumer_context.cpp source/results/impl/result_state.cpp + source/results/impl/shared_result_state.cpp source/results/promises.cpp source/runtime/runtime.cpp source/threads/thread.cpp @@ -50,11 +51,14 @@ set(concurrencpp_headers include/concurrencpp/results/impl/consumer_context.h include/concurrencpp/results/impl/producer_context.h include/concurrencpp/results/impl/result_state.h + include/concurrencpp/results/impl/shared_result_state.h include/concurrencpp/results/constants.h include/concurrencpp/results/make_result.h include/concurrencpp/results/promises.h include/concurrencpp/results/result.h + include/concurrencpp/results/shared_result.h include/concurrencpp/results/result_awaitable.h + include/concurrencpp/results/shared_result_awaitable.h include/concurrencpp/results/result_fwd_declerations.h include/concurrencpp/results/when_result.h include/concurrencpp/runtime/constants.h diff --git a/README.md b/README.md index 2c3ed6c9..ca69dfd0 100644 --- a/README.md +++ b/README.md @@ -1,57 +1,63 @@ + # concurrencpp, the C++ concurrency library ![Latest Release](https://img.shields.io/github/v/release/David-Haim/concurrencpp.svg) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) concurrencpp is a tasking library for C++ allowing developers to write highly concurrent applications easily and safely by using tasks, executors and coroutines. -By using concurrencpp applications can break down big procedures that need to be processed asynchronously into smaller tasks that run concurrently and work in a co-operative manner to achieve the wanted result. +By using concurrencpp applications can break down big procedures that need to be processed asynchronously into smaller tasks that run concurrently and work in a co-operative manner to achieve the wanted result. concurrencpp also allows applications to write parallel algorithms easily by using parallel coroutines. -concurrencpp main advantages are: +concurrencpp main advantages are: * Being able to write modern concurrency code without having to rely on low-level concurrency primitives like locks and condition variables. * Being able to write highly concurrent and parallel applications that scale automatically to use all hardware resources, as needed. * Being able to write non-blocking, synchronous-like code easily by using C++20 coroutines and the `co_await` keyword. * Reducing the possibility of race conditions, data races and deadlocks by using high-level objects with built-in synchronization. -* concurrencpp provides various types of commonly used executors with a complete coroutine integration. +* concurrencpp provides various types of commonly used executors with a complete coroutine integration. * Applications can extend the library by implementing their own provided executors. ---- ### Table of contents * [concurrencpp overview](#concurrencpp-overview) * [Tasks](#tasks) - * [concurrencpp coroutines](#concurrencpp-coroutines ) + * [concurrencpp coroutines](#concurrencpp-coroutines ) * [Executors](#executors) - * [`executor` API](#executor-api) - * [Executor types](#executor-types) - * [Using executors](#using-executors) + * [`executor` API](#executor-api) + * [Executor types](#executor-types) + * [Using executors](#using-executors) + * [`thread_pool_executor` API](#thread_pool_executor-api) + * [`manual_executor` API](#thread_pool_executor-api) * [Result objects](#result-objects) * [`result` API](#result-api) * [Parallel coroutines](#parallel-coroutines) * [Parallel Fibonacci example](#parallel-fibonacci-example) -* [Result-promises](#result-promises) - * [`result_promise` API](#result_promise-api) - * [Result-promise example](#example-marshaling-asynchronous-result-using-result_promise) +* [Result-promises](#result-promises) + * [`result_promise` API](#result_promise-api) + * [`result_promise` example](#example-marshaling-asynchronous-result-using-result_promise) +* [Shared result objects](#shared-result-objects) + * [`shared_result` API](#shared_result-api) + * [`shared_result` example](#shared_result-example) * [Summery: using tasks and coroutines](#summery-using-tasks-and-coroutines) * [Result auxiliary functions](#result-auxiliary-functions) * [Timers and Timer queues](#timers-and-timer-queues) - * [`timer_queue` API](#timer_queue-api) - * [`timer` API](#timer-api) - * [Regular timer example](#regular-timer-example) - * [Oneshot timers](#oneshot-timers) - * [Oneshot timer example](#oneshot-timer-example) - * [Delay objects](#delay-objects) - * [Delay object example](#delay-object-example) -* [The runtime object](#the-runtime-object) - * [`runtime` API](#runtime-api) - * [Creating user-defined executors](#creating-user-defined-executors) - * [`task` API](#task-api) - * [Using a user-defined executor example](#example-using-a-user-defined-executor) + * [`timer_queue` API](#timer_queue-api) + * [`timer` API](#timer-api) + * [Regular timer example](#regular-timer-example) + * [Oneshot timers](#oneshot-timers) + * [Oneshot timer example](#oneshot-timer-example) + * [Delay objects](#delay-objects) + * [Delay object example](#delay-object-example) +* [The runtime object](#the-runtime-object) + * [`runtime` API](#runtime-api) + * [Creating user-defined executors](#creating-user-defined-executors) + * [`task` API](#task-api) + * [Using a user-defined executor example](#example-using-a-user-defined-executor) * [Supported platforms and tools](#supported-platforms-and-tools) ---- -### concurrencpp overview +### concurrencpp overview -concurrencpp is a task-centric library. A task is an asynchronous operation. Tasks offer a higher level of abstraction for concurrent code than traditional thread-centric approaches. Tasks can be chained together, meaning that tasks pass their asynchronous result from one to another, where the result of one task is used as if it were a parameter or an intermediate value of another ongoing task. Tasks allow applications to utilize available hardware resources better and scale much more than using raw threads, since tasks can be suspended, waiting for another task to produce a result, without blocking underlying OS-threads. Tasks bring much more productivity to developers by allowing them to focus more on business-logic and less on low-level concepts like thread management and inter-thread synchronization. +concurrencpp is a task-centric library. A task is an asynchronous operation. Tasks offer a higher level of abstraction for concurrent code than traditional thread-centric approaches. Tasks can be chained together, meaning that tasks pass their asynchronous result from one to another, where the result of one task is used as if it were a parameter or an intermediate value of another ongoing task. Tasks allow applications to utilize available hardware resources better and scale much more than using raw threads, since tasks can be suspended, waiting for another task to produce a result, without blocking underlying OS-threads. Tasks bring much more productivity to developers by allowing them to focus more on business-logic and less on low-level concepts like thread management and inter-thread synchronization. While tasks specify *what* actions have to be executed, *executors* are worker-objects that specify *where and how* to execute tasks. Executors spare applications the managing of thread pools and task queues themselves. Executors also decouple those concepts away from application code, by providing a unified API for creating and scheduling tasks. @@ -60,7 +66,7 @@ Tasks communicate with each other using *result objects*. A result object is an These 3 concepts - the task, the executor and the associated result are the building blocks of concurrencpp. Executors run tasks that communicate with each-other by sending results through result-objects. Tasks, executors and result objects work together symbiotically to produce concurrent code which is fast and clean. concurrencpp is built around the RAII concept. In order to use tasks and executors, applications create a `runtime` instance in the beginning of the `main` function. The runtime is then used to acquire existing executors and register new user-defined executors. Executors are used to create and schedule tasks to run, and they might return a `result` object that can be used to marshal the asynchronous result to another task that acts as its consumer. -When the runtime is destroyed, it iterates over every stored executor and calls its `shutdown` method. Every executor then exits gracefully. Unscheduled tasks are destroyed, and attempts to create new tasks will throw an exception. +When the runtime is destroyed, it iterates over every stored executor and calls its `shutdown` method. Every executor then exits gracefully. Unscheduled tasks are destroyed, and attempts to create new tasks will throw an exception. #### *"Hello world" program using concurrencpp:* @@ -69,18 +75,18 @@ When the runtime is destroyed, it iterates over every stored executor and calls #include int main() { - concurrencpp::runtime runtime; - auto result = runtime.thread_executor()->submit([] { - std::cout << "hello world" << std::endl; - }); + concurrencpp::runtime runtime; + auto result = runtime.thread_executor()->submit([] { + std::cout << "hello world" << std::endl; + }); - result.get(); - return 0; + result.get(); + return 0; } ``` In this basic example, we created a runtime object, then we acquired the thread executor from the runtime. We used `submit` to pass a lambda as our given callable. This lambda returns `void`, hence, the executor returns a `result` object that marshals the asynchronous result back to the caller. `main` calls `get` which blocks the main thread until the result becomes ready. If no exception was thrown, `get` returns `void`. If an exception was thrown, `get` re-throws it. Asynchronously, `thread_executor` launches a new thread of execution and runs the given lambda. It implicitly `co_return void` and the task is finished. `main` is then unblocked. - + #### *Concurrent even-number counting:* ```cpp @@ -148,21 +154,21 @@ In this example, we start the program by creating a runtime object. We create a `max_concurrency_level` returns the maximum amount of workers that the executor supports, In the threadpool executor case, the number of workers is calculated from the number of cores. We then partition the array to match the number of workers and send every chunk to be processed in its own task. Asynchronously, the workers count how many even numbers each chunk contains, and `co_return` the result. -`count_even` sums every result by pulling the count using `co_await`, the final result is then `co_return`ed. -The main thread, which was blocked by calling `get` is unblocked and the total count is returned. +`count_even` sums every result by pulling the count using `co_await`, the final result is then `co_return`ed. +The main thread, which was blocked by calling `get` is unblocked and the total count is returned. main prints the number of even numbers and the program terminates gracefully. ### Tasks Every big or complex operation can be broken down to smaller and chainable steps. Tasks are asynchronous operations implementing those computational steps. Tasks can run anywhere with the help of executors. While tasks can be created from regular callables (such as functors and lambdas), Tasks are mostly used with coroutines, which allow smooth suspension and resumption. In concurrencpp, the task concept is represented by the `concurrencpp::task` class. Although the task concept is central to concurrenpp, applications will rarely have to create and manipulate task objects themselves, as task objects are created and scheduled by the runtime with no external help. -#### concurrencpp coroutines +#### concurrencpp coroutines -concurrencpp allows applications to produce and consume coroutines as the main way of creating tasks. concurrencpp coroutines are eager and start to run the moment they are invoked (as opposed to lazy coroutines, which start to run only when `co_await`ed). concurrencpp coroutines can return any of `concurrencpp::result` or `concurrencpp::null_result`. +concurrencpp allows applications to produce and consume coroutines as the main way of creating tasks. concurrencpp coroutines are eager and start to run the moment they are invoked (as opposed to lazy coroutines, which start to run only when `co_await`ed). concurrencpp coroutines can return any of `concurrencpp::result` or `concurrencpp::null_result`. `concurrencpp::result` tells the coroutine to marshal the returned value or the thrown exception while `concurrencpp::null_result` tells the coroutine to drop and ignore any of them. -When a function returns any of `concurrencpp::result` or `concurrencpp::null_result`and contains at least one `co_await` or `co_return` in it's body, the function is a concurrencpp coroutine. Every valid concurrencpp coroutine is a valid task. In our count-even example above, `count_even` is such coroutine. We first spawned `count_even`, then inside it the threadpool executor spawned more child tasks (that are created from regular callables), that were eventually joined using `co_await`. +When a function returns any of `concurrencpp::result` or `concurrencpp::null_result`and contains at least one `co_await` or `co_return` in it's body, the function is a concurrencpp coroutine. Every valid concurrencpp coroutine is a valid task. In our count-even example above, `count_even` is such a coroutine. We first spawned `count_even`, then inside it the threadpool executor spawned more child tasks (that are created from regular callables), that were eventually joined using `co_await`. Coroutines can start to run synchronously, in the caller thread. This kind of coroutines is called "regular coroutines". Concurrencpp coroutines can also start to run in parallel, inside a given executor, this kind of coroutines is called "parallel coroutines". @@ -177,83 +183,83 @@ Executors provide a unified way of scheduling and executing tasks, since they al ```cpp class executor { - /* - Initializes a new executor and gives it a name. - */ - executor(std::string_view name); - - /* - Destroys this executor. - */ - virtual ~executor() noexcept = default; - - /* - The name of the executor, used for logging and debugging. - */ - const std::string name; - - /* - Schedules a task to run in this executor. - Throws concurrencpp::errors::executor_shutdown exception if shutdown was called before. - */ - virtual void enqueue(concurrencpp::task task) = 0; - - /* - Schedules a range of tasks to run in this executor. - Throws concurrencpp::errors::executor_shutdown exception if shutdown was called before. - */ - virtual void enqueue(std::span tasks) = 0; - - /* - Returns the maximum count of real OS threads this executor supports. - The actual count of threads this executor is running might be smaller than this number. - returns numeric_limits::max if the executor does not have a limit for OS threads. - */ - virtual int max_concurrency_level() const noexcept = 0; - - /* - Returns true if shutdown was called before, false otherwise. - */ - virtual bool shutdown_requested() const noexcept = 0; - - /* - Shuts down the executor: - - Tells underlying threads to exit their work loop and joins them. - - Destroyes unexecuted coroutines. - - Makes subsequent calls to enqueue, post, submit, bulk_post and - bulk_submit to throw concurrencpp::errors::executor_shutdown exception. - - Makes shutdown_requested return true. - */ - virtual void shutdown() noexcept = 0; - - /* - Turns a callable and its arguments into a task object and schedules it to run in this executor using enqueue. - Arguments are passed to the task by decaying them first. - Throws errors::executor_shutdown exception if shutdown has been called before. - */ - template - void post(callable_type&& callable, argument_types&& ... arguments); - - /* - Like post, but returns a result object that marshals the asynchronous result. - Throws errors::executor_shutdown exception if shutdown has been called before. - */ - template - result submit(callable_type&& callable, argument_types&& ... arguments); - - /* - Turns an array of callables into an array of tasks and schedules them to run in this executor using enqueue. - Throws errors::executor_shutdown exception if shutdown has been called before. - */ - template - void bulk_post(std::span callable_list); - - /* - Like bulk_post, but returns an array of result objects that marshal the asynchronous results. - Throws errors::executor_shutdown exception if shutdown has been called before. - */ - template - std::vector> bulk_submit(std::span callable_list); + /* + Initializes a new executor and gives it a name. + */ + executor(std::string_view name); + + /* + Destroys this executor. + */ + virtual ~executor() noexcept = default; + + /* + The name of the executor, used for logging and debugging. + */ + const std::string name; + + /* + Schedules a task to run in this executor. + Throws concurrencpp::errors::executor_shutdown exception if shutdown was called before. + */ + virtual void enqueue(concurrencpp::task task) = 0; + + /* + Schedules a range of tasks to run in this executor. + Throws concurrencpp::errors::executor_shutdown exception if shutdown was called before. + */ + virtual void enqueue(std::span tasks) = 0; + + /* + Returns the maximum count of real OS threads this executor supports. + The actual count of threads this executor is running might be smaller than this number. + returns numeric_limits::max if the executor does not have a limit for OS threads. + */ + virtual int max_concurrency_level() const noexcept = 0; + + /* + Returns true if shutdown was called before, false otherwise. + */ + virtual bool shutdown_requested() const noexcept = 0; + + /* + Shuts down the executor: + - Tells underlying threads to exit their work loop and joins them. + - Destroys unexecuted coroutines. + - Makes subsequent calls to enqueue, post, submit, bulk_post and + bulk_submit to throw concurrencpp::errors::executor_shutdown exception. + - Makes shutdown_requested return true. + */ + virtual void shutdown() noexcept = 0; + + /* + Turns a callable and its arguments into a task object and schedules it to run in this executor using enqueue. + Arguments are passed to the task by decaying them first. + Throws errors::executor_shutdown exception if shutdown has been called before. + */ + template + void post(callable_type&& callable, argument_types&& ... arguments); + + /* + Like post, but returns a result object that marshals the asynchronous result. + Throws errors::executor_shutdown exception if shutdown has been called before. + */ + template + result submit(callable_type&& callable, argument_types&& ... arguments); + + /* + Turns an array of callables into an array of tasks and schedules them to run in this executor using enqueue. + Throws errors::executor_shutdown exception if shutdown has been called before. + */ + template + void bulk_post(std::span callable_list); + + /* + Like bulk_post, but returns an array of result objects that marshal the asynchronous results. + Throws errors::executor_shutdown exception if shutdown has been called before. + */ + template + std::vector> bulk_submit(std::span callable_list); }; ``` @@ -261,13 +267,13 @@ class executor { As mentioned above, concurrencpp provides commonly used executors. These executor types are: -* **thread pool executor** - a general purpose executor that maintains a pool of threads. -The thread pool executor is suitable for short cpu-bound tasks that don't block. Applications are encouraged to use this executor as the default executor for non-blocking tasks. +* **thread pool executor** - a general purpose executor that maintains a pool of threads. +The thread pool executor is suitable for short cpu-bound tasks that don't block. Applications are encouraged to use this executor as the default executor for non-blocking tasks. The concurrencpp thread pool provides dynamic thread injection and dynamic work balancing. -* **blocking executor** - a threadpool executor with a larger pool of threads. Suitable for launching short blocking tasks like file io and db queries. +* **background executor** - a threadpool executor with a larger pool of threads. Suitable for launching short blocking tasks like file io and db queries. -* **thread executor** - an executor that launches each enqueued task to run on a new thread of execution. Threads are not reused. +* **thread executor** - an executor that launches each enqueued task to run on a new thread of execution. Threads are not reused. This executor is good for long running tasks, like objects that run a work loop, or long blocking operations. * **worker thread executor** - a single thread executor that maintains a single task queue. Suitable when applications want a dedicated thread that executes many related tasks. @@ -281,24 +287,191 @@ This executor is good for long running tasks, like objects that run a work loop, #### Using executors The bare mechanism of an executor is encapsulated in its `enqueue` method. -This method enqueues a task for execution and has two overloads: +This method enqueues a task for execution and has two overloads: One overload receives a single task object as an argument, and another that receives a span of task objects. The second overload is used to enqueue a batch of tasks. This allows better scheduling heuristics and decreased contention. -Applications don't have to rely on `enqueue` alone, `concurrencpp::executor` provides an API for scheduling user callables by converting them to task objects behind the scenes. -Applications can request executors to return a result object that marshals the asynchronous result of the provided callable. This is done by calling `executor::submit` and `execuor::bulk_submit`. -`submit` gets a callable, and returns a result object. `executor::bulk_submit` gets a `span` of callables and returns a `vector`of result objects in a similar way `submit` works. -In many cases, applications are not interested in the asynchronous value or exception. In this case, applications can use `executor:::post` and `executor::bulk_post` to schedule a callable or a `span` of callables to be executed, but also tells the task to drop any returned value or thrown exception. Not marshaling the asynchronous result is faster than marshaling, but then we have no way of knowing the status or the result of the ongoing task. +Applications don't have to rely on `enqueue` alone, `concurrencpp::executor` provides an API for scheduling user callables by converting them to task objects behind the scenes. +Applications can request executors to return a result object that marshals the asynchronous result of the provided callable. This is done by calling `executor::submit` and `executor::bulk_submit`. +`submit` gets a callable, and returns a result object. `executor::bulk_submit` gets a `span` of callables and returns a `vector`of result objects in a similar way `submit` works. +In many cases, applications are not interested in the asynchronous value or exception. In this case, applications can use `executor:::post` and `executor::bulk_post` to schedule a callable or a `span` of callables to be executed, but also tells the task to drop any returned value or thrown exception. Not marshaling the asynchronous result is faster than marshaling, but then we have no way of knowing the status or the result of the ongoing task. `post`, `bulk_post`, `submit` and `bulk_submit` use `enqueue` behind the scenes for the underlying scheduling mechanism. + +#### `thread_pool_executor` API + +Aside from `post`, `submit`, `bulk_post` and `bulk_submit`, the `thread_pool_executor` provides additional methods. + +```cpp +class thread_pool_executor { + + /* + Returns the number of milliseconds each thread-pool worker remains idle (without any task to execute) before exiting. + This constant can be set by passing a runtime_options object to the constructor of the runtime class. + */ + std::chrono::milliseconds max_worker_idle_time() const noexcept; + +}; +``` +#### `manual_executor` API + +Aside from `post`, `submit`, `bulk_post` and `bulk_submit`, the `manual_executor` provides additional methods. + +```cpp +class manual_executor { + + /* + Destructor. Equivalent to clear. + */ + ~manual_executor() noexcept; + + /* + Returns the number of enqueued tasks at the moment of invocation. + This number can change quickly by the time the application handles it, it should be used as a hint. + This method is thread safe. + */ + size_t size() const noexcept; + + /* + Queries whether the executor is empty from tasks at the moment of invocation. + This value can change quickly by the time the application handles it, it should be used as a hint. + This method is thread safe. + */ + bool empty() const noexcept; + + /* + Clears the executor from any enqueued but yet to-be-executed tasks, and returns the number of cleared tasks. + Tasks enqueued to this executor by (post_)submit method are resumed and errors::broken_task exception is thrown inside them. + Ongoing tasks that are being executed by loop_once(_XXX) or loop(_XXX) are uneffected. + This method is thread safe. + */ + size_t clear(); + + /* + Tries to execute a single task. If at the moment of invocation the executor is empty, the method does nothing. + Returns true if a task was executed, false otherwise. + This method is thread safe. + */ + bool loop_once(); + + /* + Tries to execute a single task. + This method returns when either a task was executed or max_waiting_time (in milliseconds) has reached. + If max_waiting_time is 0, the method is equivalent to loop_once. + If shutdown is called from another thread, this method returns and throws errors::shutdown_exception. + This method is thread safe. + */ + bool loop_once_for(std::chrono::milliseconds max_waiting_time); + + /* + Tries to execute a single task. + This method returns when either a task was executed or timeout_time has reached. + If timeout_time has already expired, this method is equivalent to loop_once. + If shutdown is called from another thread, this method returns and throws errors::shutdown_exception. + This method is thread safe. + */ + template + bool loop_once_until(std::chrono::time_point timeout_time); + + /* + Tries to execute max_count enqueued tasks and returns the number of tasks that were executed. + This method does not wait: it returns when the executor becomes empty from tasks or max_count tasks have been executed. + This method is thread safe. + */ + size_t loop(size_t max_count); + + /* + Tries to execute max_count tasks. + This method returns when either max_count tasks were executed or a total amount of max_waiting_time has passed. + If max_waiting_time is 0, the method is equivalent to loop. + Returns the actual amount of tasks that were executed. + If shutdown is called from another thread, this method returns and throws errors::shutdown_exception. + This method is thread safe. + */ + size_t loop_for(size_t max_count, std::chrono::milliseconds max_waiting_time); + + /* + Tries to execute max_count tasks. + This method returns when either max_count tasks were executed or timeout_time has reached. + If timeout_time has already expired, the method is equivalent to loop. + Returns the actual amount of tasks that were executed. + If shutdown is called from another thread, this method returns and throws errors::shutdown_exception. + This method is thread safe. + */ + template + size_t loop_until(size_t max_count, std::chrono::time_point timeout_time); + + /* + Waits for at least one task to be available for execution. + This method should be used as a hint, as other threads (calling loop, for example) might empty the executor, + before this thread has a chance to do something with the newly enqueued tasks. + If shutdown is called from another thread, this method returns and throws errors::shutdown_exception. + This method is thread safe. + */ + void wait_for_task(); + + /* + This method returns when one or more tasks are available for execution or max_waiting_time has passed. + Returns true if at at least one task is available for execution, false otherwise. + This method should be used as a hint, as other threads (calling loop, for example) might empty the executor, + before this thread has a chance to do something with the newly enqueued tasks. + If shutdown is called from another thread, this method returns and throws errors::shutdown_exception. + This method is thread safe. + */ + bool wait_for_task_for(std::chrono::milliseconds max_waiting_time); + + /* + This method returns when one or more tasks are available for execution or timeout_time has reached. + Returns true if at at least one task is available for execution, false otherwise. + This method should be used as a hint, as other threads (calling loop, for example) might empty the executor, + before this thread has a chance to do something with the newly enqueued tasks. + If shutdown is called from another thread, this method returns and throws errors::shutdown_exception. + This method is thread safe. + */ + template + bool wait_for_task_until(std::chrono::time_point timeout_time); + + /* + This method returns when max_count or more tasks are available for execution. + This method should be used as a hint, as other threads (calling loop, for example) might empty the executor, + before this thread has a chance to do something with the newly enqueued tasks. + If shutdown is called from another thread, this method returns and throws errors::shutdown_exception. + This method is thread safe. + */ + void wait_for_tasks(size_t max_count); + + /* + This method returns when max_count or more tasks are available for execution or max_waiting_time (in milliseconds) has passed. + Returns the number of tasks available for execution when the method returns. + This method should be used as a hint, as other threads (calling loop, for example) might empty the executor, + before this thread has a chance to do something with the newly enqueued tasks. + If shutdown is called from another thread, this method returns and throws errors::shutdown_exception. + This method is thread safe. + */ + size_t wait_for_tasks_for(size_t count, std::chrono::milliseconds max_waiting_time); + + /* + This method returns when max_count or more tasks are available for execution or timeout_time is reached. + Returns the number of tasks available for execution when the method returns. + This method should be used as a hint, as other threads (calling loop, for example) might empty the executor, + before this thread has a chance to do something with the newly enqueued tasks. + If shutdown is called from another thread, this method returns and throws errors::shutdown_exception. + This method is thread safe. + */ + template + size_t wait_for_tasks_until(size_t count, std::chrono::time_point timeout_time); + +}; +``` + ### Result objects Asynchronous values and exceptions can be consumed using the concurrencpp result objects. -A result object is a pipe for the asynchronous result, like `std::future`. -When a task finishes execution, it either returns a valid value or throws an exception. +A result object is a pipe for the asynchronous result, like `std::future`. +When a task finishes execution, it either returns a valid value or throws an exception. In either case, this asynchronous result is marshaled to the consumer of the result object. -The result status therefore, vary from `idle` (the asynchronous result or exception aren't ready yet) to `value` (the task terminated by returning a valid value) to `exception` (the task terminated by throwing an exception). +The result status therefore, varies from `idle` (the asynchronous result or exception aren't ready yet) to `value` (the task terminated by returning a valid value) to `exception` (the task terminated by throwing an exception). Result objects are a move-only type, and as such, they cannot be used after their content was moved to another result object. In this case, the result object is considered to be empty and attempts to call any method other than `operator bool` and `operator = ` will throw. After the asynchronous result has been pulled out of the result object (by calling `get`, `await` or `await_via`), the result object becomes empty. Emptiness can be tested with `operator bool`. @@ -310,158 +483,158 @@ Result objects can be polled for their status by calling `result::status`. Results can be waited by calling any of `result::wait`, `result::wait_for`, `result::wait_until` or `result::get`. Waiting a result is a blocking operation (in the case the asynchronous result is not ready), and will suspend the entire thread of execution waiting for the asynchronous result to become available. Waiting operations are generally discouraged and only allowed in root-level tasks or in contexts which allow it, like blocking the main thread waiting for the rest of the application to finish gracefully, or using `concurrencpp::blocking_executor` or `concurrencpp::thread_executor`. -Awaiting a result means to suspend the current coroutine until the asynchronous result is ready. If a valid value was returned from the associated task, it is returend from the result object. If the associated task threw an exception, it is re-thrown. -At the moment of awaiting, if the result is already ready, the current coroutine resumes immediately. Otherwise, it is resumed by the thread that sets the asynchronous result or exception. +Awaiting a result means to suspend the current coroutine until the asynchronous result is ready. If a valid value was returned from the associated task, it is returned from the result object. If the associated task throws an exception, it is re-thrown. +At the moment of awaiting, if the result is already ready, the current coroutine resumes immediately. Otherwise, it is resumed by the thread that sets the asynchronous result or exception. -The behavior of awaiting result objects can be further fine tuned by using `await_via`. +The behavior of awaiting result objects can be further fine tuned by using `await_via`. This method accepts an executor and a boolean flag (`force_rescheduling`). If, at the moment of awaiting, the result is already ready, the behavior depends on the value of `force_rescheduling`. If `force_rescheduling` is true, the current coroutine is forcefully suspended and resumed inside the given executor. If `force_rescheduling` is false, the current coroutine is resumed immediately in the calling thread. -If the asynchronous result is not ready at the moment of awaiting, the current coroutine resumed after the result is set, by scheduling it to run in the given exector. +If the asynchronous result is not ready at the moment of awaiting, the current coroutine resumes after the result is set, by scheduling it to run in the given executor. Resolving a result is similar to awaiting it. The different is that the `co_await` expression will return the result object itself, in a non empty form, in a ready state. The asynchronous result can then be pulled by using `get` or `co_await`. -Just like `await_via`, `resolve_via` fine tunes the control flow of the coroutine by passing an executor and a flag suggesting how to behave when the result is already ready. +Just like `await_via`, `resolve_via` fine tunes the control flow of the coroutine by passing an executor and a flag suggesting how to behave when the result is already ready. Awaiting a result object by using `co_await` (and by doing so, turning the current function/task into a coroutine as well) is the preferred way of consuming result objects, as it does not block underlying threads. #### `result` API - + ```cpp class result{ - /* - Creates an empty result that isn't associated with any task. - */ - result() noexcept = default; - - /* - Destroyes the result. Associated tasks are not cancelled. - The destructor does not block waiting for the asynchronous result to become ready. - */ - ~result() noexcept = default; - - /* - Moves the content of rhs to *this. After this call, rhs is empty. - */ - result(result&& rhs) noexcept = default; - - /* - Moves the content of rhs to *this. After this call, rhs is empty. Returns *this. - */ - result& operator = (result&& rhs) noexcept = default; - - /* - Returns true if this is a non-empty result. - Applications must not use this object if this->operator bool() is false. - */ - operator bool() const noexcept; - - /* - Queries the status of *this. - The return value is any of result_status::idle, result_status::value or result_status::exception. - Throws concurrencpp::errors::empty_result if *this is empty. - */ - result_status status() const; - - /* - Blocks the current thread of execution until this result is ready, when status() != result_status::idle. - Throws concurrencpp::errors::empty_result if *this is empty. - */ - void wait(); - - /* - Blocks until this result is ready or duration has passed. Returns the status of this result after unblocking. - Throws concurrencpp::errors::empty_result if *this is empty. - */ - template - result_status wait_for(std::chrono::duration duration); - - /* - Blocks until this result is ready or timeout_time has reached. Returns the status of this result after unblocking. - Throws concurrencpp::errors::empty_result if *this is empty. - */ - template< class clock, class duration > - result_status wait_until(std::chrono::time_point timeout_time); - - /* - Blocks the current thread of execution until this result is ready, when status() != result_status::idle. - If the result is a valid value, it is returned, otherwise, get rethrows the asynchronous exception. - Throws concurrencpp::errors::empty_result if *this is empty. - */ - type get(); - - /* - Returns an awaitable used to await this result. - If the result is already ready - the current coroutine resumes immediately in the calling thread of execution. - If the result is not ready yet, the current coroutine is suspended and resumed when the asynchronous result is ready, - by the thread which had set the asynchronous value or exception. - In either way, after resuming, if the result is a valid value, it is returned. - Otherwise, operator co_await rethrows the asynchronous exception. - Throws concurrencpp::errors::empty_result if *this is empty. - */ - auto operator co_await(); - - /* - Returns an awaitable used to await this result. - If the result is not ready yet, the current coroutine is suspended and resumed when the asynchronous result is ready, - by scheduling the current coroutine via executor. - If the result is already ready - the behaviour depends on the value of force_rescheduling: - If force_rescheduling = true, then the current coroutine is forcefully suspended and resumed via executor. - If force_rescheduling = false, then the current coroutine resumes immediately in the calling thread of execution. - In either way, after resuming, if the result is a valid value, it is returned. - Otherwise, operator co_await rethrows the asynchronous exception. - Throws concurrencpp::errors::empty_result if *this is empty. - Throws std::invalid_argument if executor is null. - If this result is ready and force_rescheduling=true, throws any exception that executor::enqueue may throw. - */ - auto await_via( - std::shared_ptr executor, - bool force_rescheduling = true); - - /* - Returns an awaitable used to resolve this result. - After co_await expression finishes, *this is returned in a non-empty form, in a ready state. - Throws concurrencpp::errors::empty_result if *this is empty. - */ - auto resolve(); - - /* - Returns an awaitable used to resolve this result. - If the result is not ready yet, the current coroutine is suspended and resumed when the asynchronous result is ready, - by scheduling the current coroutine via executor. - If the result is already ready - the behaviour depends on the value of force_rescheduling: - If force_rescheduling = true, then the current coroutine is forcefully suspended and resumed via executor. - If force_rescheduling = false, then the current coroutine resumes immediately in the calling thread of execution. - In either way, after resuming, *this is returned in a non-empty form and guaranteed that its status is not result_status::idle. - Throws concurrencpp::errors::empty_result if *this is empty. - Throws std::invalid_argument if executor is null. - If this result is ready and force_rescheduling=true, throws any exception that executor::enqueue may throw. - */ - auto resolve_via( - std::shared_ptr executor, - bool force_rescheduling = true); + /* + Creates an empty result that isn't associated with any task. + */ + result() noexcept = default; + + /* + Destroys the result. Associated tasks are not cancelled. + The destructor does not block waiting for the asynchronous result to become ready. + */ + ~result() noexcept = default; + + /* + Moves the content of rhs to *this. After this call, rhs is empty. + */ + result(result&& rhs) noexcept = default; + + /* + Moves the content of rhs to *this. After this call, rhs is empty. Returns *this. + */ + result& operator = (result&& rhs) noexcept = default; + + /* + Returns true if this is a non-empty result. + Applications must not use this object if this->operator bool() is false. + */ + operator bool() const noexcept; + + /* + Queries the status of *this. + The return value is any of result_status::idle, result_status::value or result_status::exception. + Throws concurrencpp::errors::empty_result if *this is empty. + */ + result_status status() const; + + /* + Blocks the current thread of execution until this result is ready, when status() != result_status::idle. + Throws concurrencpp::errors::empty_result if *this is empty. + */ + void wait(); + + /* + Blocks until this result is ready or duration has passed. Returns the status of this result after unblocking. + Throws concurrencpp::errors::empty_result if *this is empty. + */ + template + result_status wait_for(std::chrono::duration duration); + + /* + Blocks until this result is ready or timeout_time has reached. Returns the status of this result after unblocking. + Throws concurrencpp::errors::empty_result if *this is empty. + */ + template< class clock, class duration > + result_status wait_until(std::chrono::time_point timeout_time); + + /* + Blocks the current thread of execution until this result is ready, when status() != result_status::idle. + If the result is a valid value, it is returned, otherwise, get rethrows the asynchronous exception. + Throws concurrencpp::errors::empty_result if *this is empty. + */ + type get(); + + /* + Returns an awaitable used to await this result. + If the result is already ready - the current coroutine resumes immediately in the calling thread of execution. + If the result is not ready yet, the current coroutine is suspended and resumed when the asynchronous result is ready, + by the thread which had set the asynchronous value or exception. + In either way, after resuming, if the result is a valid value, it is returned. + Otherwise, operator co_await rethrows the asynchronous exception. + Throws concurrencpp::errors::empty_result if *this is empty. + */ + auto operator co_await(); + + /* + Returns an awaitable used to await this result. + If the result is not ready yet, the current coroutine is suspended and resumed when the asynchronous result is ready, + by scheduling the current coroutine via executor. + If the result is already ready - the behaviour depends on the value of force_rescheduling: + If force_rescheduling = true, then the current coroutine is forcefully suspended and resumed via executor. + If force_rescheduling = false, then the current coroutine resumes immediately in the calling thread of execution. + In either way, after resuming, if the result is a valid value, it is returned. + Otherwise, operator co_await rethrows the asynchronous exception. + Throws concurrencpp::errors::empty_result if *this is empty. + Throws std::invalid_argument if executor is null. + If this result is ready and force_rescheduling=true, throws any exception that executor::enqueue may throw. + */ + auto await_via( + std::shared_ptr executor, + bool force_rescheduling = true); + + /* + Returns an awaitable used to resolve this result. + After co_await expression finishes, *this is returned in a non-empty form, in a ready state. + Throws concurrencpp::errors::empty_result if *this is empty. + */ + auto resolve(); + + /* + Returns an awaitable used to resolve this result. + If the result is not ready yet, the current coroutine is suspended and resumed when the asynchronous result is ready, + by scheduling the current coroutine via executor. + If the result is already ready - the behaviour depends on the value of force_rescheduling: + If force_rescheduling = true, then the current coroutine is forcefully suspended and resumed via executor. + If force_rescheduling = false, then the current coroutine resumes immediately in the calling thread of execution. + In either way, after resuming, *this is returned in a non-empty form and guaranteed that its status is not result_status::idle. + Throws concurrencpp::errors::empty_result if *this is empty. + Throws std::invalid_argument if executor is null. + If this result is ready and force_rescheduling=true, throws any exception that executor::enqueue may throw. + */ + auto resolve_via( + std::shared_ptr executor, + bool force_rescheduling = true); }; ``` ### Parallel coroutines -Regular coroutines start to run synchronously in the calling thread of execution. Execution might shift to another thread of execution if the coroutine undergoes a rescheduling, for example by awaiting an unready result object inside it. -concurrencpp also provide parallel coroutines, which start to run inside a given executor, not in the invoking thread of execution. This style of scheduling coroutines is especially helpful when writing parallel algorithms, recursive algorithms and concurrent algorithms that use the fork-join model. +Regular coroutines start to run synchronously in the calling thread of execution. Execution might shift to another thread of execution if the coroutine undergoes a rescheduling, for example by awaiting an unready result object inside it. +concurrencpp also provides parallel coroutines, which start to run inside a given executor, not in the invoking thread of execution. This style of scheduling coroutines is especially helpful when writing parallel algorithms, recursive algorithms and concurrent algorithms that use the fork-join model. Every parallel coroutine must meet the following preconditions: -1. Returns any of `result` / `null_result` . +1. Returns any of `result` / `null_result` . 1. Gets `executor_tag` as its first argument . 1. Gets any of `type*` / `type&` / `std::shared_ptr`, where `type` is a concrete class of `executor` as its second argument. 1. Contains any of `co_await` or `co_return` in its body. -If all the above applies, the function is a parallel coroutine: -concurrencpp will start the coroutine suspended and immediately re-schedule it to run in the provided executor. +If all the above applies, the function is a parallel coroutine: +concurrencpp will start the coroutine suspended and immediately reschedule it to run in the provided executor. `concurrencpp::executor_tag` is a dummy placeholder to tell the concurrencpp runtime that this function is not a regular function, it needs to start running inside the given executor. -Applications can then consume the result of the parallel coroutine by using the returned result object. +Applications can then consume the result of the parallel coroutine by using the returned result object. -#### *Parallel Fibonacci example:* +#### *Parallel Fibonacci example:* ```cpp #include "concurrencpp/concurrencpp.h" #include @@ -469,42 +642,42 @@ Applications can then consume the result of the parallel coroutine by using the using namespace concurrencpp; int fibonacci_sync(int i) { - if (i == 0) { - return 0; - } + if (i == 0) { + return 0; + } - if (i == 1) { - return 1; - } + if (i == 1) { + return 1; + } - return fibonacci_sync(i - 1) + fibonacci_sync(i - 2); + return fibonacci_sync(i - 1) + fibonacci_sync(i - 2); } result fibonacci(executor_tag, std::shared_ptr tpe, const int curr) { - if (curr <= 10) { - co_return fibonacci_sync(curr); - } + if (curr <= 10) { + co_return fibonacci_sync(curr); + } - auto fib_1 = fibonacci({}, tpe, curr - 1); - auto fib_2 = fibonacci({}, tpe, curr - 2); + auto fib_1 = fibonacci({}, tpe, curr - 1); + auto fib_2 = fibonacci({}, tpe, curr - 2); - co_return co_await fib_1 + co_await fib_2; + co_return co_await fib_1 + co_await fib_2; } int main() { - concurrencpp::runtime runtime; - auto fibb_30 = fibonacci({}, runtime.thread_pool_executor(), 30).get(); - std::cout << "fibonacci(30) = " << fibb_30 << std::endl; - return 0; + concurrencpp::runtime runtime; + auto fibb_30 = fibonacci({}, runtime.thread_pool_executor(), 30).get(); + std::cout << "fibonacci(30) = " << fibb_30 << std::endl; + return 0; } ``` In this example, we calculate the 30-th member of the Fibonacci sequence in a parallel manner. We start launching each Fibonacci step in its own parallel coroutine. The first argument is a dummy `executor_tag` and the second argument is the threadpool executor. Every recursive step invokes a new parallel coroutine that runs in parallel. Each result is `co_return`ed to its parent task and acquired by using `co_await`. -When we deem the input to be small enough to be calculated synchronously (when `curr <= 10`), we stop executing each recursive step in its own task and just solve the algorithm synchronously. +When we deem the input to be small enough to be calculated synchronously (when `curr <= 10`), we stop executing each recursive step in its own task and just solve the algorithm synchronously. -To compare, this is how the same code is written without using parallel coroutines, and relying on `exector::submit` alone. +To compare, this is how the same code is written without using parallel coroutines, and relying on `executor::submit` alone. Since `fibonacci` returns a `result`, submitting it recursively via `executor::submit` will result a `result>`. ```cpp @@ -533,8 +706,7 @@ result fibonacci(std::shared_ptr tpe, const int curr) auto fib_1 = tpe->submit(fibonacci, tpe, curr - 1); auto fib_2 = tpe->submit(fibonacci, tpe, curr - 2); - co_return - co_await co_await fib_1 + + co_return co_await co_await fib_1 + co_await co_await fib_2; } @@ -550,7 +722,7 @@ int main() { Result objects are the main way to pass data between tasks in concurrencpp and we've seen how executors and coroutines produce such objects. Sometimes we want to use the capabilities of a result object with non-tasks, for example when using a third-party library. In this case, we can complete a result object by using a `result_promise`. -`result_promise` resembles a `std::promise` object - applications can manually set the asynchronous result or exception and make the associated `result` object become ready. +`result_promise` resembles a `std::promise` object - applications can manually set the asynchronous result or exception and make the associated `result` object become ready. Just like result objects, result-promises are a move only type that becomes empty after move. Similarly, after setting a result or an exception, the result promise becomes empty as well. If a result-promise gets out of scope and no result/exception has been set, the result-promise destructor sets a `concurrencpp::errors::broken_task` exception using the `set_exception` method. @@ -562,69 +734,69 @@ Result promises can convert callback style of code into `async/await` style of c ```cpp template -class result_promise { - /* - Constructs a valid result_promise. - */ - result_promise(); - - /* - Moves the content of rhs to *this. After this call, rhs is empty. - */ - result_promise(result_promise&& rhs) noexcept; - - /* - Destroys *this, possibly setting a concurrencpp::errors::broken_task exception - by calling set_exception if *this is not empty at the time of destruction. - */ - ~result_promise() noexcept; - - /* - Moves the content of rhs to *this. After this call, rhs is empty. - */ - result_promise& operator = (result_promise&& rhs) noexcept; - - /* - Returns true if this is a non-empty result-promise. - Applications must not use this object if this->operator bool() is false. - */ - explicit operator bool() const noexcept; - - /* - Sets a value by constructing <> from arguments... in-place. - Makes the associated result object become ready - tasks waiting for it to become ready are unblocked. - Suspended tasks are resumed either inline or via the executor that was provided by calling result::await_via or result::resolve_via. - After this call, *this becomes empty. - If *this is empty, a concurrencpp::errors::empty_result_promise exception is thrown. - */ - template - void set_result(argument_types&& ... arguments); - - /* - Sets an exception. - Makes the associated result object become ready - tasks waiting for it to become ready are unblocked. - Suspended tasks are resumed either inline or via the executor that was provided by calling result::await_via or result::resolve_via. - After this call, *this becomes empty. - If *this is empty, a concurrencpp::errors::empty_result_promise exception is thrown. - If exception_ptr is null, an std::invalid_argument exception is thrown. - */ - void set_exception(std::exception_ptr exception_ptr); - - /* - A convenience method that invokes a callable with arguments... and calls set_result with the result of the invocation. - If an exception is thrown, the thrown exception is caught and set instead by calling set_exception. - After this call, *this becomes empty. - If *this is empty, a concurrencpp::errors::empty_result_promise exception is thrown. - */ - template - void set_from_function(callable_type&& callable, argument_types&& ... arguments); - - /* - Gets the associated result object. - If *this is empty, a concurrencpp::errors::empty_result_promise exception is thrown. - If this method had been called before, a concurrencpp::errors::result_already_retrieved exception is thrown. - */ - result get_result(); +class result_promise { + /* + Constructs a valid result_promise. + */ + result_promise(); + + /* + Moves the content of rhs to *this. After this call, rhs is empty. + */ + result_promise(result_promise&& rhs) noexcept; + + /* + Destroys *this, possibly setting a concurrencpp::errors::broken_task exception + by calling set_exception if *this is not empty at the time of destruction. + */ + ~result_promise() noexcept; + + /* + Moves the content of rhs to *this. After this call, rhs is empty. + */ + result_promise& operator = (result_promise&& rhs) noexcept; + + /* + Returns true if this is a non-empty result-promise. + Applications must not use this object if this->operator bool() is false. + */ + explicit operator bool() const noexcept; + + /* + Sets a value by constructing <> from arguments... in-place. + Makes the associated result object become ready - tasks waiting for it to become ready are unblocked. + Suspended tasks are resumed either inline or via the executor that was provided by calling result::await_via or result::resolve_via. + After this call, *this becomes empty. + If *this is empty, a concurrencpp::errors::empty_result_promise exception is thrown. + */ + template + void set_result(argument_types&& ... arguments); + + /* + Sets an exception. + Makes the associated result object become ready - tasks waiting for it to become ready are unblocked. + Suspended tasks are resumed either inline or via the executor that was provided by calling result::await_via or result::resolve_via. + After this call, *this becomes empty. + If *this is empty, a concurrencpp::errors::empty_result_promise exception is thrown. + If exception_ptr is null, an std::invalid_argument exception is thrown. + */ + void set_exception(std::exception_ptr exception_ptr); + + /* + A convenience method that invokes a callable with arguments... and calls set_result with the result of the invocation. + If an exception is thrown, the thrown exception is caught and set instead by calling set_exception. + After this call, *this becomes empty. + If *this is empty, a concurrencpp::errors::empty_result_promise exception is thrown. + */ + template + void set_from_function(callable_type&& callable, argument_types&& ... arguments); + + /* + Gets the associated result object. + If *this is empty, a concurrencpp::errors::empty_result_promise exception is thrown. + If this method had been called before, a concurrencpp::errors::result_already_retrieved exception is thrown. + */ + result get_result(); }; ``` @@ -635,28 +807,210 @@ class result_promise { #include int main() { - concurrencpp::result_promise promise; - auto result = promise.get_result(); + concurrencpp::result_promise promise; + auto result = promise.get_result(); + + std::thread my_3_party_executor([promise = std::move(promise)] () mutable { + std::this_thread::sleep_for(std::chrono::seconds(1)); //Imitate real work + promise.set_result("hello world"); + }); + + auto asynchronous_string = result.get(); + std::cout << "result promise returned string: " << asynchronous_string << std::endl; + + my_3_party_executor.join(); +} +``` +In this example, We use `std::thread` as a third-party executor. This represents a scenario when a non-concurrencpp executor is used as part of the application life-cycle. We extract the result object before we pass the promise and block the main thread until the result becomes ready. In `my_3_party_executor`, we set a result as if we `co_return`ed it. + +### Shared result objects + +Shared results are a special kind of result objects that allow multiple consumers to access the asynchronous result, similar to `std::shared_future`. Different consumers from different threads can call functions like `await`, `get` and `resolve` in a thread safe manner. + +Shared results are built from regular result objects and unlike regular result objects, they are both copyable and movable. As such, `shared_result` behaves like an `std::shared_ptr` object. If the shared result was moved to another instance, the shared result is empty, and trying to access it will throw an exception. + +In order to support multiple consumers, the shared-result object will return a *reference* to asynchronous value instead of moving it (like a regular result object). For example, a `shared_result`will return an `int&` when `get`,`await` etc. are called. If the underlying type of the `shared_result` is `void` or a reference type (like `int&`), they are returned as usual. If the asynchronous result is a thrown-exception, it is re-thrown. + +Do note that while acquiring the asynchronous result using `shared_result` from multiple threads is thread-safe, the actual value might not be. For example, multiple threads can acquire an asynchronous integer by receiving its reference (`int&`). It *does not* make the integer itself thread safe. It is alright to mutate the asynchronous value if the asynchronous value is already thread safe. Alternatively, applications are encouraged to use `const` types to begin with (like `const int`), and acquire constant-references (like `const int&`) that prevent mutation. + +#### `shared_result` API +```cpp +class share_result { + /* + Creates an empty shared-result that isn't associated with any task. + */ + shared_result() noexcept = default; + + /* + Destroys the shared-result. Associated tasks are not cancelled. + The destructor does not block waiting for the asynchronous result to become ready. + */ + ~shared_result() noexcept = default; + + /* + Converts a regular result object to a shared-result object. + After this call, rhs is empty. + */ + shared_result(result rhs); + + /* + Copy constructor. Creates a copy of the shared result object that monitors the same task. + */ + shared_result(const shared_result&) noexcept = default; + + /* + Move constructor. Moves rhs to *this. After this call, rhs is empty. + */ + shared_result(shared_result&& rhs) noexcept = default; + + /* + Copy assignment operator. Copies rhs to *this and monitors the same task that rhs monitors. + */ + shared_result& operator=(const shared_result& rhs) noexcept; + + /* + Move assignment operator. Moves rhs to *this. After this call, rhs is empty. + */ + shared_result& operator=(shared_result&& rhs) noexcept; + + /* + Returns true if this is a non-empty shared-result. + Applications must not use this object if this->operator bool() is false. + */ + operator bool() const noexcept; + + /* + Queries the status of *this. + The return value is any of result_status::idle, result_status::value or result_status::exception. + Throws concurrencpp::errors::empty_result if *this is empty. + */ + result_status status() const; + + /* + Blocks the current thread of execution until this shared-result is ready, when status() != result_status::idle. + Throws concurrencpp::errors::empty_result if *this is empty. + */ + void wait(); + + /* + Blocks until this shared-result is ready or duration has passed. Returns the status of this shared-result after unblocking. + Throws concurrencpp::errors::empty_result if *this is empty. + */ + template + result_status wait_for(std::chrono::duration duration); + + /* + Blocks until this shared-result is ready or timeout_time has reached. Returns the status of this result after unblocking. + Throws concurrencpp::errors::empty_result if *this is empty. + */ + template + result_status wait_until(std::chrono::time_point timeout_time); + + /* + Blocks the current thread of execution until this shared-result is ready, when status() != result_status::idle. + If the result is a valid value, a reference to it is returned, otherwise, get rethrows the asynchronous exception. + Throws concurrencpp::errors::empty_result if *this is empty. + */ + std::add_lvalue_reference_t get(); + + /* + Returns an awaitable used to await this shared-result. + If the shared-result is already ready - the current coroutine resumes immediately in the calling thread of execution. + If the shared-result is not ready yet, the current coroutine is suspended and resumed when the asynchronous result is ready, + by the thread which had set the asynchronous value or exception. + In either way, after resuming, if the result is a valid value, a reference to it is returned. + Otherwise, operator co_await rethrows the asynchronous exception. + Throws concurrencpp::errors::empty_result if *this is empty. + */ + auto operator co_await(); + + + /* + Returns an awaitable used to await this shared-result. + If the shared-result is not ready yet, the current coroutine is suspended and resumed when the asynchronous result is ready, + by scheduling the current coroutine via executor. + If the result is already ready - the behaviour depends on the value of force_rescheduling: + If force_rescheduling = true, then the current coroutine is forcefully suspended and resumed via executor. + If force_rescheduling = false, then the current coroutine resumes immediately in the calling thread of execution. + In either way, after resuming, if the result is a valid value, a reference to it is returned. + Otherwise, operator co_await rethrows the asynchronous exception. + Throws concurrencpp::errors::empty_result if *this is empty. + Throws std::invalid_argument if executor is null. + If this shared-result is ready and force_rescheduling=true, throws any exception that executor::enqueue may throw. + */ + auto await_via(std::shared_ptr executor, bool force_rescheduling = true); + + + /* + Returns an awaitable used to resolve this shared-result. + After co_await expression finishes, *this is returned in a non-empty form, in a ready state. + Throws concurrencpp::errors::empty_result if *this is empty. + */ + auto resolve(); + + /* + Returns an awaitable used to resolve this shared-result. + If the shared-result is not ready yet, the current coroutine is suspended and resumed when the asynchronous result is ready, + by scheduling the current coroutine via executor. + If the result is already ready - the behaviour depends on the value of force_rescheduling: + If force_rescheduling = true, then the current coroutine is forcefully suspended and resumed via executor. + If force_rescheduling = false, then the current coroutine resumes immediately in the calling thread of execution. + In either way, after resuming, *this is returned in a non-empty form and guaranteed that its status is not result_status::idle. + Throws concurrencpp::errors::empty_result if *this is empty. + Throws std::invalid_argument if executor is null. + If this shared-result is ready and force_rescheduling=true, throws any exception that executor::enqueue may throw. + */ + auto resolve_via(std::shared_ptr executor, bool force_rescheduling = true); +}; +``` + +#### `shared_result` example +```cpp +#include "concurrencpp/concurrencpp.h" + +#include +#include + +concurrencpp::result consume_shared_result(concurrencpp::shared_result shared_result, + std::shared_ptr resume_executor) { + std::cout << "Awaiting shared_result to have a value" << std::endl; + + const auto& async_value = co_await shared_result.await_via(resume_executor); + + std::cout << "In thread id " << std::this_thread::get_id() << ", got: " << async_value << ", memory address: " << &async_value << std::endl; +} + +int main() { + concurrencpp::runtime runtime; + auto result = runtime.background_executor()->submit([] { + std::this_thread::sleep_for(std::chrono::seconds(1)); + return 100; + }); + + concurrencpp::shared_result shared_result(std::move(result)); + concurrencpp::result results[8]; - std::thread my_3_party_executor([promise = std::move(promise)] () mutable { - std::this_thread::sleep_for(std::chrono::seconds(1)); //Imitate real work - promise.set_result("hello world"); - }); + for (size_t i = 0; i < 8; i++) { + results[i] = consume_shared_result(shared_result, runtime.thread_pool_executor()); + } + + std::cout << "Main thread waiting for all consumers to finish" << std::endl; - auto asynchronous_string = result.get(); - std::cout << "result promise returned string: " << asynchronous_string << std::endl; + auto all_consumed = concurrencpp::when_all(std::begin(results), std::end(results)); + all_consumed.get(); - my_3_party_executor.join(); + std::cout << "All consumers are done, exiting" << std::endl; + return 0; } ``` -In this example, We use `std::thread` as a third-party executor. This represents a scenario when a non-concurrencpp executor is used as part of the application life-cycle. We extract the result object before we pass the promise and block the main thread until the result becomes ready. In `my_3_party_executor`, we set a result as if we `co_return`ed it. + ### Summery: using tasks and coroutines -A task is an asynchronous operation implementing an asynchronous computational step. Tasks are created by using one of the executor methods or by invoking a concurrencpp coroutine. Tasks might return a result object that is used to consume the asynchronous value or exception the task had produced. When used correctly, result objects don't block, this way we can chain tasks together, creating a bigger, asynchronous flow graph that never blocks. +A task is an asynchronous operation implementing an asynchronous computational step. Tasks are created by using one of the executor methods or by invoking a concurrencpp coroutine. Tasks might return a result object that is used to consume the asynchronous value or exception the task had produced. When used correctly, result objects don't block, this way we can chain tasks together, creating a bigger, asynchronous flow graph that never blocks. -A concurrencpp coroutine is a C++ suspendable function. It is eager, meaning it starts to run the moment it is invoked. -It returns one of `concurrencpp::result` / `concurrencpp::null_result` and contains any of `co_await` or `co_return` in its body. Parallel coroutines are a special kind of coroutines, that start run in another thread, by passing a `concurrencpp::executor_tag` and an instance of a valid concurrencpp executor as the first arguments. +A concurrencpp coroutine is a C++ suspendable function. It is eager, meaning it starts to run the moment it is invoked. +It returns one of `concurrencpp::result` / `concurrencpp::null_result` and contains any of `co_await` or `co_return` in its body. Parallel coroutines are a special kind of coroutines that start run in another thread, by passing a `concurrencpp::executor_tag` and an instance of a valid concurrencpp executor as the first arguments. ### Result auxiliary functions @@ -664,80 +1018,80 @@ For completeness, concurrencpp provides helper functions that help manipulate re ```cpp /* - Creates a ready result object by building <> from arguments&&... in-place. + Creates a ready result object by building <> from arguments&&... in-place. */ template result make_ready_result(argument_types&& ... arguments); /* - An overload for void type. + An overload for void type. */ result make_ready_result(); /* - Creates a ready result object from an exception pointer. - The returned result object will re-throw exception_ptr when calling get, await or await_via. - Throws std::invalid_argument if exception_ptr is null. + Creates a ready result object from an exception pointer. + The returned result object will re-throw exception_ptr when calling get, await or await_via. + Throws std::invalid_argument if exception_ptr is null. */ template result make_exceptional_result(std::exception_ptr exception_ptr); /* - Overload. Similar to make_exceptional_result(std::exception_ptr), - but gets an exception object directly. + Overload. Similar to make_exceptional_result(std::exception_ptr), + but gets an exception object directly. */ template result make_exceptional_result(exception_type exception); /* - Creates a result object that becomes ready when all the input results become ready. - Passed result objects are emptied and returned as a tuple. - Throws std::invalid_argument if any of the passed result objects is empty. + Creates a result object that becomes ready when all the input results become ready. + Passed result objects are emptied and returned as a tuple. + Throws std::invalid_argument if any of the passed result objects is empty. */ template result::type...>> when_all(result_types&& ... results); /* - Overload. Similar to when_all(result_types&& ...) but receives a pair of iterators referencing a range. - Passed result objects are emptied and returned as a vector. - If begin == end, the function returns immediately with an empty vector. - Throws std::invalid_argument if any of the passed result objects is empty. + Overload. Similar to when_all(result_types&& ...) but receives a pair of iterators referencing a range. + Passed result objects are emptied and returned as a vector. + If begin == end, the function returns immediately with an empty vector. + Throws std::invalid_argument if any of the passed result objects is empty. */ template -result::value_type>> +result::value_type>> when_all(iterator_type begin, iterator_type end); /* - Overload. Returns a ready result object that doesn't monitor any asynchronous result. + Overload. Returns a ready result object that doesn't monitor any asynchronous result. */ result> when_all(); /* - Helper struct returned from when_any. - index is the position of the ready result in results sequence. - results is either an std::tuple or an std::vector of the results that were passed to when_any. + Helper struct returned from when_any. + index is the position of the ready result in results sequence. + results is either an std::tuple or an std::vector of the results that were passed to when_any. */ template struct when_any_result { - std::size_t index; - sequence_type results; + std::size_t index; + sequence_type results; }; /* - Creates a result object that becomes ready when at least one of the input results is ready. - Passed result objects are emptied and returned as a tuple. - Throws std::invalid_argument if any of the passed result objects is empty. + Creates a result object that becomes ready when at least one of the input results is ready. + Passed result objects are emptied and returned as a tuple. + Throws std::invalid_argument if any of the passed result objects is empty. */ template result>> when_any(result_types&& ... results); /* - Overload. Similar to when_any(result_types&& ...) but receives a pair of iterators referencing a range. - Passed result objects are emptied and returned as a vector. - Throws std::invalid_argument if begin == end. - Throws std::invalid_argument if any of the passed result objects is empty. + Overload. Similar to when_any(result_types&& ...) but receives a pair of iterators referencing a range. + Passed result objects are emptied and returned as a vector. + Throws std::invalid_argument if begin == end. + Throws std::invalid_argument if any of the passed result objects is empty. */ template result::value_type>>> @@ -746,156 +1100,156 @@ result: ### Timers and Timer queues -concurrencpp also provides timers and timer queues. -Timers are objects that define asynchronous actions running on an executor within a well-defined interval of time. +concurrencpp also provides timers and timer queues. +Timers are objects that define asynchronous actions running on an executor within a well-defined interval of time. There are three types of timers - *regular timers*, *onshot-timers* and *delay objects*. Regular timers have four properties that define them: 1. Callable - a callable that will be scheduled to run as a task periodically. 2. Executor - an executor that schedules the callable to run periodically. -3. Due time - from the time of creation, the interval in milliseconds the timer will be scheduled to run for the first time. -4. Frequency - from the time the timer was scheduled to run for the first time, the interval in milliseconds the callable will be schedule to run periodically, until the timer is destructed or cancelled. +3. Due time - from the time of creation, the interval in milliseconds the timer will be scheduled to run for the first time. +4. Frequency - from the time the timer was scheduled to run for the first time, the interval in milliseconds the callable will be scheduled to run periodically, until the timer is destructed or cancelled. Like other objects in concurrencpp, timers are a move only type that can be empty. When a timer is destructed or `timer::cancel` is called, the timer cancels its scheduled but not yet executed tasks. Ongoing tasks are uneffected. The timer callable must be thread safe. It is recommended to set the due time and the frequency of a timer to a granularity of 50 milliseconds. A timer queue is a concurrencpp worker that manages a collection of timers and processes them in just one thread of execution. It is also the agent used to create new timers. -When a timer deadline (whether it is the timer's due-time or frequency) has reached, the timer queue "fires" the timer by scheduling its callable to run on the associated executor as a task. +When a timer deadline (whether it is the timer's due-time or frequency) has reached, the timer queue "fires" the timer by scheduling its callable to run on the associated executor as a task. -Just like executors, timer queues also adhere to the RAII concpet. When the runtime object gets out of scope, It shuts down the timer queue, cancelling all pending timers. After a timer queue has been shut down, any subsequent call to `make_timer`, `make_onshot_timer` and `make_delay_object` will throw an `errors::timer_queue_shutdown` exception. +Just like executors, timer queues also adhere to the RAII concept. When the runtime object gets out of scope, It shuts down the timer queue, cancelling all pending timers. After a timer queue has been shut down, any subsequent call to `make_timer`, `make_onshot_timer` and `make_delay_object` will throw an `errors::timer_queue_shutdown` exception. Applications must not try to shut down timer queues by themselves. -#### `timer_queue` API: +#### `timer_queue` API: ```cpp class timer_queue { - /* - Destroyes this timer_queue. - */ - ~timer_queue() noexcept; - - /* - Shuts down this timer_queue: - Tells the underlying thread of execution to quit and joins it. - Cancells all pending timers. - After this call, invocation of any method besides shutdown and shutdown_requested will throw an errors::timer_queue_shutdown. - If shutdown had been called before, this method has no effect. - */ - void shutdown() noexcept; - - /* - Returns true if shutdown had been called before, false otherwise. - */ - bool shutdown_requested() const noexcept; - - /* - Creates a new running timer where *this is associated timer_queue. - Throws std::invalid_argument if executor is null. - Throws errors::timer_queue_shutdown if shutdown had been called before. - */ - template - timer make_timer( - std::chrono::milliseconds due_time, - std::chrono::milliseconds frequency, - std::shared_ptr executor, - callable_type&& callable, - argumet_types&& ... arguments); - - /* - Creates a new one-shot timer where *this is associated timer_queue. - Throws std::invalid_argument if executor is null. - Throws errors::timer_queue_shutdown if shutdown had been called before. - */ - template - timer make_one_shot_timer( - std::chrono::milliseconds due_time, - std::shared_ptr executor, - callable_type&& callable, - argumet_types&& ... arguments); - - /* - Creates a new delay object where *this is associated timer_queue. - Throws std::invalid_argument if executor is null. - Throws errors::timer_queue_shutdown if shutdown had been called before. - */ - result make_delay_object( - std::chrono::milliseconds due_time, - std::shared_ptr executor); + /* + Destroys this timer_queue. + */ + ~timer_queue() noexcept; + + /* + Shuts down this timer_queue: + Tells the underlying thread of execution to quit and joins it. + Cancels all pending timers. + After this call, invocation of any method besides shutdown and shutdown_requested will throw an errors::timer_queue_shutdown. + If shutdown had been called before, this method has no effect. + */ + void shutdown() noexcept; + + /* + Returns true if shutdown had been called before, false otherwise. + */ + bool shutdown_requested() const noexcept; + + /* + Creates a new running timer where *this is the associated timer_queue. + Throws std::invalid_argument if executor is null. + Throws errors::timer_queue_shutdown if shutdown had been called before. + */ + template + timer make_timer( + std::chrono::milliseconds due_time, + std::chrono::milliseconds frequency, + std::shared_ptr executor, + callable_type&& callable, + argumet_types&& ... arguments); + + /* + Creates a new one-shot timer where *this is the associated timer_queue. + Throws std::invalid_argument if executor is null. + Throws errors::timer_queue_shutdown if shutdown had been called before. + */ + template + timer make_one_shot_timer( + std::chrono::milliseconds due_time, + std::shared_ptr executor, + callable_type&& callable, + argumet_types&& ... arguments); + + /* + Creates a new delay object where *this is the associated timer_queue. + Throws std::invalid_argument if executor is null. + Throws errors::timer_queue_shutdown if shutdown had been called before. + */ + result make_delay_object( + std::chrono::milliseconds due_time, + std::shared_ptr executor); }; ``` -#### `timer` API: +#### `timer` API: ```cpp class timer { - /* - Creates an empty timer. - */ - timer() noexcept = default; - - /* - Cancels the timer, if not empty. - */ - ~timer() noexcept; - - /* - Moves the content of rhs to *this. - rhs is empty after this call. - */ - timer(timer&& rhs) noexcept = default; - - /* - Moves the content of rhs to *this. - rhs is empty after this call. - Returns *this. - */ - timer& operator = (timer&& rhs) noexcept; - - /* - Cancels this timer. - After this call, the associated timer_queue will not schedule *this to run again and *this becomes empty. - Scheduled, but not yet executed tasks are cancelled. - Ongoing tasks are uneffected. - This method has no effect if *this is empty or the associated timer_queue has already expired. - */ - void cancel(); - - /* - Returns the associated executor of this timer. - Throws concurrencpp::errors::empty_timer is *this is empty. - */ - std::shared_ptr get_executor() const; - - /* - Returns the associated timer_queue of this timer. - Throws concurrencpp::errors::empty_timer is *this is empty. - */ - std::weak_ptr get_timer_queue() const; - - /* - Returns the due time of this timer. - Throws concurrencpp::errors::empty_timer is *this is empty. - */ - std::chrono::milliseconds get_due_time() const; - - /* - Returns the frequency of this timer. - Throws concurrencpp::errors::empty_timer is *this is empty. - */ - std::chrono::milliseconds get_frequency() const; - - /* - Sets new frequency for this timer. - Callables already scheduled to run at the time of invocation are not affected. - Throws concurrencpp::errors::empty_timer is *this is empty. - */ - void set_frequency(std::chrono::milliseconds new_frequency); - - /* - Returns true is *this is not an empty timer, false otherwise. - The timer should not be used if this->operator bool() is false. - */ - operator bool() const noexcept; + /* + Creates an empty timer. + */ + timer() noexcept = default; + + /* + Cancels the timer, if not empty. + */ + ~timer() noexcept; + + /* + Moves the content of rhs to *this. + rhs is empty after this call. + */ + timer(timer&& rhs) noexcept = default; + + /* + Moves the content of rhs to *this. + rhs is empty after this call. + Returns *this. + */ + timer& operator = (timer&& rhs) noexcept; + + /* + Cancels this timer. + After this call, the associated timer_queue will not schedule *this to run again and *this becomes empty. + Scheduled, but not yet executed tasks are cancelled. + Ongoing tasks are uneffected. + This method has no effect if *this is empty or the associated timer_queue has already expired. + */ + void cancel(); + + /* + Returns the associated executor of this timer. + Throws concurrencpp::errors::empty_timer is *this is empty. + */ + std::shared_ptr get_executor() const; + + /* + Returns the associated timer_queue of this timer. + Throws concurrencpp::errors::empty_timer is *this is empty. + */ + std::weak_ptr get_timer_queue() const; + + /* + Returns the due time of this timer. + Throws concurrencpp::errors::empty_timer is *this is empty. + */ + std::chrono::milliseconds get_due_time() const; + + /* + Returns the frequency of this timer. + Throws concurrencpp::errors::empty_timer is *this is empty. + */ + std::chrono::milliseconds get_frequency() const; + + /* + Sets new frequency for this timer. + Callables already scheduled to run at the time of invocation are not affected. + Throws concurrencpp::errors::empty_timer is *this is empty. + */ + void set_frequency(std::chrono::milliseconds new_frequency); + + /* + Returns true is *this is not an empty timer, false otherwise. + The timer should not be used if this->operator bool() is false. + */ + operator bool() const noexcept; }; ``` @@ -908,19 +1262,19 @@ class timer { using namespace std::chrono_literals; int main() { - concurrencpp::runtime runtime; - std::atomic_size_t counter = 1; - concurrencpp::timer timer = runtime.timer_queue()->make_timer( - 1500ms, - 2000ms, - runtime.thread_pool_executor(), - [&] { - const auto c = counter.fetch_add(1); - std::cout << "timer was invoked for the " << c << "th time" << std::endl; - }); - - std::this_thread::sleep_for(12s); - return 0; + concurrencpp::runtime runtime; + std::atomic_size_t counter = 1; + concurrencpp::timer timer = runtime.timer_queue()->make_timer( + 1500ms, + 2000ms, + runtime.thread_pool_executor(), + [&] { + const auto c = counter.fetch_add(1); + std::cout << "timer was invoked for the " << c << "th time" << std::endl; + }); + + std::this_thread::sleep_for(12s); + return 0; } ``` In this example we create a regular timer by using the timer queue. The timer schedules its callable after 1.5 seconds, then fires its callable every 2 seconds. The given callable runs in the threadpool executor. @@ -938,23 +1292,23 @@ A oneshot timer is a one-time timer with only a due time - after it schedules it using namespace std::chrono_literals; int main() { - concurrencpp::runtime runtime; - concurrencpp::timer timer = runtime.timer_queue()->make_one_shot_timer( - 3000ms, - runtime.thread_executor(), - [&] { - std::cout << "hello and goodbye" << std::endl; - }); - - std::this_thread::sleep_for(4s); - return 0; + concurrencpp::runtime runtime; + concurrencpp::timer timer = runtime.timer_queue()->make_one_shot_timer( + 3000ms, + runtime.thread_executor(), + [&] { + std::cout << "hello and goodbye" << std::endl; + }); + + std::this_thread::sleep_for(4s); + return 0; } ``` In this example, we create a timer that runs only once - after 3 seconds from its creation, the timer will schedule to run its callable on a new thread of execution (using `concurrencpp::thread_executor`). #### Delay objects -A delay object is a result object that becomes ready when its due time is reached. Applications can `co_await` this result object to delay the current coroutine in a non-blocking way. The current coroutine is resumed by the executor that was passed to `make_delay_object`. +A delay object is a result object that becomes ready when its due time is reached. Applications can `co_await` this result object to delay the current coroutine in a non-blocking way. The current coroutine is resumed by the executor that was passed to `make_delay_object`. #### *Delay object example:* ```cpp @@ -965,194 +1319,194 @@ A delay object is a result object that becomes ready when its due time is reache using namespace std::chrono_literals; concurrencpp::null_result delayed_task( - std::shared_ptr tq, - std::shared_ptr ex) { - size_t counter = 1; + std::shared_ptr tq, + std::shared_ptr ex) { + size_t counter = 1; - while(true) { - std::cout << "task was invoked " << counter << " times." << std::endl; - counter++; + while(true) { + std::cout << "task was invoked " << counter << " times." << std::endl; + counter++; - co_await tq->make_delay_object(1500ms, ex); - } + co_await tq->make_delay_object(1500ms, ex); + } } int main() { - concurrencpp::runtime runtime; - delayed_task(runtime.timer_queue(), runtime.thread_pool_executor()); + concurrencpp::runtime runtime; + delayed_task(runtime.timer_queue(), runtime.thread_pool_executor()); - std::this_thread::sleep_for(10s); - return 0; + std::this_thread::sleep_for(10s); + return 0; } ``` -In this example, we created a coroutine (that does not marshal any result or thrown exception), which delays itself in a loop by calling `co_await` on a delay object. +In this example, we created a coroutine (that does not marshal any result or thrown exception), which delays itself in a loop by calling `co_await` on a delay object. ### The runtime object The concurrencpp runtime object is the agent used to acquire, store and create new executors. -The runtime must be created as a value type as soon as the main function starts to run. +The runtime must be created as a value type as soon as the main function starts to run. When the concurrencpp runtime gets out of scope, it iterates over its stored executors and shuts them down one by one by calling `executor::shutdown`. Executors then exit their inner work loop and any subsequent attempt to schedule a new task will throw a `concurrencpp::executor_shutdown` exception. The runtime also contains the global timer queue used to create timers and delay objects. -Upon destruction, stored executors will destroy unexecuted tasks, and wait for ongoing tasks to finish. If an ongoing task tries to use an executor to spawn new tasks or schedule its own task continuation - an exception will be thrown. In this case, ongoing tasks need to quit as soon as possible, allowing their underlying executors to quit. The timer queue will also be shut down, cancelling all running timers. With this RAII style of code, no tasks can be processed before the creation of the runtime object, and while/after the runtime gets out of scope. +Upon destruction, stored executors will destroy unexecuted tasks, and wait for ongoing tasks to finish. If an ongoing task tries to use an executor to spawn new tasks or schedule its own task continuation - an exception will be thrown. In this case, ongoing tasks need to quit as soon as possible, allowing their underlying executors to quit. The timer queue will also be shut down, cancelling all running timers. With this RAII style of code, no tasks can be processed before the creation of the runtime object, and while/after the runtime gets out of scope. This frees concurrent applications from needing to communicate termination messages explicitly. Tasks are free use executors as long as the runtime object is alive. #### `runtime` API ```cpp class runtime { - /* - Creates a runtime object with default options. - */ - runtime(); - - /* - Creates a runtime object with user defined options. - */ - runtime(const concurrencpp::runtime_options& options); - - /* - Destroys this runtime object. - Calls executor::shutdown on each monitored executor. - Calls timer_queue::shutdown on the global timer queue. - */ - ~runtime() noexcept; - - /* - Returns this runtime timer queue used to create new times. - */ - std::shared_ptr timer_queue() const noexcept; - - /* - Returns this runtime concurrencpp::inline_executor - */ - std::shared_ptr inline_executor() const noexcept; - - /* - Returns this runtime concurrencpp::thread_pool_executor - */ - std::shared_ptr thread_pool_executor() const noexcept; - - /* - Returns this runtime concurrencpp::background_executor - */ - std::shared_ptr background_executor() const noexcept; - - /* - Returns this runtime concurrencpp::thread_executor - */ - std::shared_ptr thread_executor() const noexcept; - - /* - Creates a new concurrencpp::worker_thread_executor and registers it in this runtime. - Might throw std::bad_alloc or std::system_error if any underlying memory or system resource could not have been acquired. - */ - std::shared_ptr make_worker_thread_executor(); - - /* - Creates a new concurrencpp::manual_executor and registers it in this runtime. - Might throw std::bad_alloc or std::system_error if any underlying memory or system resource could not have been acquired. - */ - std::shared_ptr make_manual_executor(); - - /* - Creates a new user defined executor and registers it in this runtime. - executor_type must be a valid concrete class of concurrencpp::executor. - Might throw std::bad_alloc if no memory is available. - Might throw any exception that the constructor of <> might throw. - */ - template - std::shared_ptr make_executor(argument_types&& ... arguments); - - /* - returns the version of concurrencpp that the library was built with. - */ - static std::tuple version() noexcept; + /* + Creates a runtime object with default options. + */ + runtime(); + + /* + Creates a runtime object with user defined options. + */ + runtime(const concurrencpp::runtime_options& options); + + /* + Destroys this runtime object. + Calls executor::shutdown on each monitored executor. + Calls timer_queue::shutdown on the global timer queue. + */ + ~runtime() noexcept; + + /* + Returns this runtime timer queue used to create new times. + */ + std::shared_ptr timer_queue() const noexcept; + + /* + Returns this runtime concurrencpp::inline_executor + */ + std::shared_ptr inline_executor() const noexcept; + + /* + Returns this runtime concurrencpp::thread_pool_executor + */ + std::shared_ptr thread_pool_executor() const noexcept; + + /* + Returns this runtime concurrencpp::background_executor + */ + std::shared_ptr background_executor() const noexcept; + + /* + Returns this runtime concurrencpp::thread_executor + */ + std::shared_ptr thread_executor() const noexcept; + + /* + Creates a new concurrencpp::worker_thread_executor and registers it in this runtime. + Might throw std::bad_alloc or std::system_error if any underlying memory or system resource could not have been acquired. + */ + std::shared_ptr make_worker_thread_executor(); + + /* + Creates a new concurrencpp::manual_executor and registers it in this runtime. + Might throw std::bad_alloc or std::system_error if any underlying memory or system resource could not have been acquired. + */ + std::shared_ptr make_manual_executor(); + + /* + Creates a new user defined executor and registers it in this runtime. + executor_type must be a valid concrete class of concurrencpp::executor. + Might throw std::bad_alloc if no memory is available. + Might throw any exception that the constructor of <> might throw. + */ + template + std::shared_ptr make_executor(argument_types&& ... arguments); + + /* + returns the version of concurrencpp that the library was built with. + */ + static std::tuple version() noexcept; }; ``` #### Creating user-defined executors -As mentioned before, Applications can create their own custom executor type by inheriting the `derivable_executor` class. +As mentioned before, Applications can create their own custom executor type by inheriting the `derivable_executor` class. There are a few points to consider when implementing user defined executors: -The most important thing is to remember that executors are used from multiple threads, so implemented methods must be thread-safe. +The most important thing is to remember that executors are used from multiple threads, so implemented methods must be thread-safe. New executors can be created using `runtime::make_executor`. Applications must not create new executors with plain instantiation (such as `std::make_shared` or plain `new`), only by using `runtime::make_executor`. Also, applications must not try to re-instantiate the built-in concurrencpp executors, like the `thread_pool_executor` or the `thread_executor`, those executors must only be accessed through their existing instance in the runtime object. Another important point is to handle shutdown correctly: `shutdown`, `shutdown_requested` and `enqueue` should all monitor the executor state and behave accordingly when invoked: -* `shutdown` should tell underlying threads to quit and then join them. +* `shutdown` should tell underlying threads to quit and then join them. * `shutdown` might be called multiple times, and the method must handle this scenario by ignoring any subsequent call to `shutdown` after the first invocation. -* `enqueue` must throw a `concurrencpp::errors::executor_shutdown` exception if `shutdown` had been called before. +* `enqueue` must throw a `concurrencpp::errors::executor_shutdown` exception if `shutdown` had been called before. Implementing an executor is one of the rare cases applications need to work with `concurrencpp::task` class directly. `concurrencpp::task` is a `std::function` like object, but with a few differences. -Like `std::function`, the task object stores a callable that acts as the asynchronous operation. +Like `std::function`, the task object stores a callable that acts as the asynchronous operation. Unlike `std::function`, `concurrencpp::task` is a move only type. On invocation, task objects receive no parameters and return `void`. Moreover, every task object can be invoked only once. After the first invocation, the task object becomes empty. -Invoking an empty task object is equivalent of invoking an empty lambda (`[]{}`), and will not throw any exception. +Invoking an empty task object is equivalent to invoking an empty lambda (`[]{}`), and will not throw any exception. Task objects receive their callable as a forwarding reference (`type&&` where `type` is a template parameter), and not by copy (like `std::function`). Construction of the stored callable happens in-place. This allows task objects to contain callables that are move-only type (like `std::unique_ptr` and `concurrencpp::result`). -Task objects try to use different methods to optimize the usage of the stored types. +Task objects try to use different methods to optimize the usage of the stored types. Task objects apply the short-buffer-optimization (sbo) for regular, small callables, and will inline calls to `std::coroutine_handle` by calling them directly without virtual dispatch. #### `task` API ```cpp class task { - /* - Creates an empty task object. - */ + /* + Creates an empty task object. + */ task() noexcept; - /* - Creates a task object by moving the stored callable of rhs to *this. - If rhs is empty, then *this will also be empty after construction. - After this call, rhs is empty. + /* + Creates a task object by moving the stored callable of rhs to *this. + If rhs is empty, then *this will also be empty after construction. + After this call, rhs is empty. */ task(task&& rhs) noexcept; - /* - Creates a task object by storing callable in *this. - <::type>> will be in-place- - constructed inside *this by perfect forwarding callable. - */ + /* + Creates a task object by storing callable in *this. + <::type>> will be in-place- + constructed inside *this by perfect forwarding callable. + */ template task(callable_type&& callable); - /* - Destroyes stored callable, does nothing if empty. - */ + /* + Destroys stored callable, does nothing if empty. + */ ~task() noexcept; task(const task& rhs) = delete; task& operator=(const task&& rhs) = delete; - /* - If *this is empty, does nothing. - Invokes stored callable, and immediately destroys it. - After this call, *this is empty. - May throw any exception that the invoked callable may throw. - */ + /* + If *this is empty, does nothing. + Invokes stored callable, and immediately destroys it. + After this call, *this is empty. + May throw any exception that the invoked callable may throw. + */ void operator()(); - /* - Moves the stored callable of rhs to *this. - If rhs is empty, then *this will also be empty after this call. - If *this already contains a stored callable, operator = destroys it first. - */ + /* + Moves the stored callable of rhs to *this. + If rhs is empty, then *this will also be empty after this call. + If *this already contains a stored callable, operator = destroys it first. + */ task& operator=(task&& rhs) noexcept; - /* - If *this is not empty, task::clear destroys the stored callable and empties *this. - If *this is empty, clear does nothing. - */ + /* + If *this is not empty, task::clear destroys the stored callable and empties *this. + If *this is empty, clear does nothing. + */ void clear() noexcept; - /* - Returns true if *this stores a callable. false otherwise. - */ + /* + Returns true if *this stores a callable. false otherwise. + */ operator bool() const noexcept; - /* - Returns true if *this stores a callable, - and that stored callable has the same type as <::type>> - */ + /* + Returns true if *this stores a callable, + and that stored callable has the same type as <::type>> + */ template bool contains() const noexcept; }; @@ -1173,110 +1527,110 @@ When implementing user-defined executors, it is up to the implementation to stor class logging_executor : public concurrencpp::derivable_executor { private: - mutable std::mutex _lock; - std::queue _queue; - std::condition_variable _condition; - bool _shutdown_requested; - std::thread _thread; - const std::string _prefix; - - void work_loop() { - while (true) { - std::unique_lock lock(_lock); - if (_shutdown_requested) { - return; - } - - if (!_queue.empty()) { - auto task = std::move(_queue.front()); - _queue.pop(); - lock.unlock(); - std::cout << _prefix << " A task is being executed" << std::endl; - task(); - continue; - } - - _condition.wait(lock, [this] { - return !_queue.empty() || _shutdown_requested; - }); - } - } + mutable std::mutex _lock; + std::queue _queue; + std::condition_variable _condition; + bool _shutdown_requested; + std::thread _thread; + const std::string _prefix; + + void work_loop() { + while (true) { + std::unique_lock lock(_lock); + if (_shutdown_requested) { + return; + } + + if (!_queue.empty()) { + auto task = std::move(_queue.front()); + _queue.pop(); + lock.unlock(); + std::cout << _prefix << " A task is being executed" << std::endl; + task(); + continue; + } + + _condition.wait(lock, [this] { + return !_queue.empty() || _shutdown_requested; + }); + } + } public: - logging_executor(std::string_view prefix) : - derivable_executor("logging_executor"), - _shutdown_requested(false), - _prefix(prefix) { - _thread = std::thread([this] { - work_loop(); - }); - } - - void enqueue(concurrencpp::task task) override { - std::cout << _prefix << " A task is being enqueued!" << std::endl; - - std::unique_lock lock(_lock); - if (_shutdown_requested) { - throw concurrencpp::errors::executor_shutdown("logging executor - executor was shutdown."); - } - - _queue.emplace(std::move(task)); - _condition.notify_one(); - } - - void enqueue(std::span tasks) override { - std::cout << _prefix << tasks.size() << " tasks are being enqueued!" << std::endl; - - std::unique_lock lock(_lock); - if (_shutdown_requested) { - throw concurrencpp::errors::executor_shutdown("logging executor - executor was shutdown."); - } - - for (auto& task : tasks) { - _queue.emplace(std::move(task)); - } - - _condition.notify_one(); - } - - int max_concurrency_level() const noexcept override { - return 1; - } - - bool shutdown_requested() const noexcept override { - std::unique_lock lock(_lock); - return _shutdown_requested; - } - - void shutdown() noexcept override { - std::cout << _prefix << " shutdown requested" << std::endl; - - std::unique_lock lock(_lock); - if (_shutdown_requested) return; //nothing to do. - _shutdown_requested = true; - lock.unlock(); - - _condition.notify_one(); - _thread.join(); - } + logging_executor(std::string_view prefix) : + derivable_executor("logging_executor"), + _shutdown_requested(false), + _prefix(prefix) { + _thread = std::thread([this] { + work_loop(); + }); + } + + void enqueue(concurrencpp::task task) override { + std::cout << _prefix << " A task is being enqueued!" << std::endl; + + std::unique_lock lock(_lock); + if (_shutdown_requested) { + throw concurrencpp::errors::executor_shutdown("logging executor - executor was shutdown."); + } + + _queue.emplace(std::move(task)); + _condition.notify_one(); + } + + void enqueue(std::span tasks) override { + std::cout << _prefix << tasks.size() << " tasks are being enqueued!" << std::endl; + + std::unique_lock lock(_lock); + if (_shutdown_requested) { + throw concurrencpp::errors::executor_shutdown("logging executor - executor was shutdown."); + } + + for (auto& task : tasks) { + _queue.emplace(std::move(task)); + } + + _condition.notify_one(); + } + + int max_concurrency_level() const noexcept override { + return 1; + } + + bool shutdown_requested() const noexcept override { + std::unique_lock lock(_lock); + return _shutdown_requested; + } + + void shutdown() noexcept override { + std::cout << _prefix << " shutdown requested" << std::endl; + + std::unique_lock lock(_lock); + if (_shutdown_requested) return; //nothing to do. + _shutdown_requested = true; + lock.unlock(); + + _condition.notify_one(); + _thread.join(); + } }; int main() { - concurrencpp::runtime runtime; - auto logging_ex = runtime.make_executor("Session #1234"); + concurrencpp::runtime runtime; + auto logging_ex = runtime.make_executor("Session #1234"); - for (size_t i = 0; i < 10; i++) { - logging_ex->post([] { - std::cout << "hello world" << std::endl; - }); - } + for (size_t i = 0; i < 10; i++) { + logging_ex->post([] { + std::cout << "hello world" << std::endl; + }); + } - std::getchar(); - return 0; + std::getchar(); + return 0; } ``` -In this example, we created an executor which logs actions like enqueuing a task or executing it. We implement the `executor` interface, and we request the runtime to create and store an instance of it by calling `runtime::make_executor`. The rest of the application behaves exactly the same as if we were to use non user-defined executors. +In this example, we created an executor which logs actions like enqueuing a task or executing it. We implement the `executor` interface, and we request the runtime to create and store an instance of it by calling `runtime::make_executor`. The rest of the application behaves exactly the same as if we were to use non user-defined executors. ### Supported platforms and tools diff --git a/cmake/concurrencppInjectTSAN.cmake b/cmake/concurrencppInjectTSAN.cmake index 4fa0f9f0..12d94fa8 100644 --- a/cmake/concurrencppInjectTSAN.cmake +++ b/cmake/concurrencppInjectTSAN.cmake @@ -4,6 +4,7 @@ macro(add_library TARGET) _add_library(${ARGV}) if("${TARGET}" STREQUAL "concurrencpp") - target_compile_options(concurrencpp PRIVATE -fsanitize=thread) + target_compile_options(concurrencpp PUBLIC -fsanitize=thread) + target_link_options(concurrencpp PUBLIC -fsanitize=thread) endif() endmacro() diff --git a/include/concurrencpp/concurrencpp.h b/include/concurrencpp/concurrencpp.h index 8fee8d01..6340fc61 100644 --- a/include/concurrencpp/concurrencpp.h +++ b/include/concurrencpp/concurrencpp.h @@ -10,6 +10,8 @@ #include "concurrencpp/results/result.h" #include "concurrencpp/results/make_result.h" #include "concurrencpp/results/when_result.h" +#include "concurrencpp/results/shared_result.h" +#include "concurrencpp/results/shared_result_awaitable.h" #include "concurrencpp/executors/executor_all.h" #endif diff --git a/include/concurrencpp/executors/executor.h b/include/concurrencpp/executors/executor.h index 70e78ddc..d83ff5ad 100644 --- a/include/concurrencpp/executors/executor.h +++ b/include/concurrencpp/executors/executor.h @@ -3,6 +3,7 @@ #include "concurrencpp/task.h" #include "concurrencpp/results/result.h" +#include "concurrencpp/results/promises.h" #include #include diff --git a/include/concurrencpp/executors/inline_executor.h b/include/concurrencpp/executors/inline_executor.h index a599b69e..455fbdb3 100644 --- a/include/concurrencpp/executors/inline_executor.h +++ b/include/concurrencpp/executors/inline_executor.h @@ -11,11 +11,9 @@ namespace concurrencpp { std::atomic_bool m_abort; void throw_if_aborted() const { - if (!m_abort.load(std::memory_order_relaxed)) { - return; + if (m_abort.load(std::memory_order_relaxed)) { + details::throw_executor_shutdown_exception(name); } - - details::throw_executor_shutdown_exception(name); } public: diff --git a/include/concurrencpp/executors/manual_executor.h b/include/concurrencpp/executors/manual_executor.h index de999d30..184814e4 100644 --- a/include/concurrencpp/executors/manual_executor.h +++ b/include/concurrencpp/executors/manual_executor.h @@ -50,7 +50,6 @@ namespace concurrencpp { size_t clear(); bool loop_once(); - bool loop_once_for(std::chrono::milliseconds max_waiting_time); template @@ -59,7 +58,6 @@ namespace concurrencpp { } size_t loop(size_t max_count); - size_t loop_for(size_t max_count, std::chrono::milliseconds max_waiting_time); template @@ -68,7 +66,6 @@ namespace concurrencpp { } void wait_for_task(); - bool wait_for_task_for(std::chrono::milliseconds max_waiting_time); template @@ -77,7 +74,6 @@ namespace concurrencpp { } void wait_for_tasks(size_t count); - size_t wait_for_tasks_for(size_t count, std::chrono::milliseconds max_waiting_time); template diff --git a/include/concurrencpp/executors/thread_executor.h b/include/concurrencpp/executors/thread_executor.h index 63234849..1b233d1b 100644 --- a/include/concurrencpp/executors/thread_executor.h +++ b/include/concurrencpp/executors/thread_executor.h @@ -1,8 +1,8 @@ #ifndef CONCURRENCPP_THREAD_EXECUTOR_H #define CONCURRENCPP_THREAD_EXECUTOR_H -#include "concurrencpp/executors/derivable_executor.h" #include "concurrencpp/threads/thread.h" +#include "concurrencpp/executors/derivable_executor.h" #include #include diff --git a/include/concurrencpp/platform_defs.h b/include/concurrencpp/platform_defs.h index 102cee51..fe34f962 100644 --- a/include/concurrencpp/platform_defs.h +++ b/include/concurrencpp/platform_defs.h @@ -6,7 +6,7 @@ #elif defined(unix) || defined(__unix__) || defined(__unix) # define CRCPP_UNIX_OS #elif defined(__APPLE__) || defined(__MACH__) -# define CRCPP_MACH_OS +# define CRCPP_MAC_OS #elif defined(__FreeBSD__) # define CRCPP_FREE_BSD_OS #elif defined(__ANDROID__) diff --git a/include/concurrencpp/results/constants.h b/include/concurrencpp/results/constants.h index a60bdb2f..4c53f842 100644 --- a/include/concurrencpp/results/constants.h +++ b/include/concurrencpp/results/constants.h @@ -48,6 +48,28 @@ namespace concurrencpp::details::consts { inline const char* k_broken_task_exception_error_msg = "concurrencpp::result - Associated task was interrupted abnormally"; + inline const char* k_shared_result_status_error_msg = "shared_result::status() - result is empty."; + + inline const char* k_shared_result_get_error_msg = "shared_result::get() - result is empty."; + + inline const char* k_shared_result_wait_error_msg = "shared_result::wait() - result is empty."; + + inline const char* k_shared_result_wait_for_error_msg = "shared_result::wait_for() - result is empty."; + + inline const char* k_shared_result_wait_until_error_msg = "shared_result::wait_until() - result is empty."; + + inline const char* k_shared_result_operator_co_await_error_msg = "shared_result::operator co_await() - result is empty."; + + inline const char* k_shared_result_await_via_error_msg = "shared_result::await_via() - result is empty."; + + inline const char* k_shared_result_await_via_executor_null_error_msg = "shared_result::await_via() - given executor is null."; + + inline const char* k_shared_result_resolve_error_msg = "shared_result::resolve() - result is empty."; + + inline const char* k_shared_result_resolve_via_error_msg = "shared_result::resolve_via() - result is empty."; + + inline const char* k_shared_result_resolve_via_executor_null_error_msg = "shared_result::resolve_via() - given executor is null."; + } // namespace concurrencpp::details::consts #endif diff --git a/include/concurrencpp/results/impl/consumer_context.h b/include/concurrencpp/results/impl/consumer_context.h index 8997bfdc..ded3747a 100644 --- a/include/concurrencpp/results/impl/consumer_context.h +++ b/include/concurrencpp/results/impl/consumer_context.h @@ -1,46 +1,64 @@ #ifndef CONCURRENCPP_CONSUMER_CONTEXT_H #define CONCURRENCPP_CONSUMER_CONTEXT_H -#include "concurrencpp/task.h" -#include "concurrencpp/forward_declerations.h" #include "concurrencpp/coroutines/coroutine.h" +#include "concurrencpp/results/result_fwd_declerations.h" #include #include namespace concurrencpp::details { - class await_context { + class await_via_functor; - private: - details::coroutine_handle m_handle; - std::exception_ptr m_interrupt_exception; + class await_via_context { public: - void set_coro_handle(details::coroutine_handle coro_handle) noexcept; - void set_interrupt(const std::exception_ptr& interrupt); + class await_context { - void operator()() noexcept; + private: + coroutine_handle handle; + std::exception_ptr interrupt_exception; - void throw_if_interrupted() const; + public: + void resume() noexcept; - concurrencpp::task to_task() noexcept; - }; + void set_coro_handle(coroutine_handle coro_handle) noexcept; + void set_interrupt(const std::exception_ptr& interrupt) noexcept; - class await_via_context { + void throw_if_interrupted() const; + }; private: - await_context m_await_context; + await_context m_await_ctx; std::shared_ptr m_executor; public: await_via_context() noexcept = default; - await_via_context(std::shared_ptr executor) noexcept; - - void set_coro_handle(details::coroutine_handle coro_handle) noexcept; + await_via_context(const std::shared_ptr& executor) noexcept; void operator()() noexcept; + void resume() noexcept; + + void set_coro_handle(coroutine_handle coro_handle) noexcept; + void set_interrupt(const std::exception_ptr& interrupt) noexcept; + void throw_if_interrupted() const; + + await_via_functor get_functor() noexcept; + }; + + class await_via_functor { + + private: + await_via_context::await_context* m_ctx; + + public: + await_via_functor(await_via_context::await_context* ctx) noexcept; + await_via_functor(await_via_functor&& rhs) noexcept; + ~await_via_functor() noexcept; + + void operator()() noexcept; }; class wait_context { @@ -61,6 +79,7 @@ namespace concurrencpp::details { protected: std::atomic_size_t m_counter; + std::recursive_mutex m_lock; public: virtual ~when_all_state_base() noexcept = default; @@ -86,6 +105,7 @@ namespace concurrencpp::details { public: when_any_context(std::shared_ptr when_any_state, size_t index) noexcept; + when_any_context(const when_any_context&) noexcept = default; void operator()() const noexcept; }; @@ -93,15 +113,16 @@ namespace concurrencpp::details { class consumer_context { private: - enum class consumer_status { idle, await, await_via, wait, when_all, when_any }; + enum class consumer_status { idle, await, await_via, wait, when_all, when_any, shared }; union storage { int idle; - await_context* await_context; + coroutine_handle caller_handle; await_via_context* await_via_ctx; std::shared_ptr wait_ctx; - std::shared_ptr when_all_state; + std::shared_ptr when_all_ctx; when_any_context when_any_ctx; + std::weak_ptr shared_ctx; template static void build(type& o, argument_type&&... arguments) noexcept { @@ -126,18 +147,14 @@ namespace concurrencpp::details { ~consumer_context() noexcept; void clear() noexcept; + void resume_consumer() const noexcept; - void set_await_context(await_context* await_context) noexcept; - - void set_await_via_context(await_via_context* await_ctx) noexcept; - + void set_await_handle(coroutine_handle caller_handle) noexcept; + void set_await_via_context(await_via_context& await_ctx) noexcept; void set_wait_context(std::shared_ptr wait_ctx) noexcept; - void set_when_all_context(std::shared_ptr when_all_state) noexcept; - void set_when_any_context(std::shared_ptr when_any_ctx, size_t index) noexcept; - - void operator()() noexcept; + void set_shared_context(std::weak_ptr shared_result_state) noexcept; }; } // namespace concurrencpp::details diff --git a/include/concurrencpp/results/impl/producer_context.h b/include/concurrencpp/results/impl/producer_context.h index f12e78de..9b616ea0 100644 --- a/include/concurrencpp/results/impl/producer_context.h +++ b/include/concurrencpp/results/impl/producer_context.h @@ -1,9 +1,8 @@ #ifndef CONCURRENCPP_PRODUCER_CONTEXT_H #define CONCURRENCPP_PRODUCER_CONTEXT_H -#include "concurrencpp/results/result_fwd_declerations.h" - #include +#include #include @@ -20,8 +19,10 @@ namespace concurrencpp::details { } public: + producer_context& operator=(producer_context&& rhs) noexcept = default; + template - void build_result(argument_types&&... arguments) { + void build_result(argument_types&&... arguments) noexcept(noexcept(type(std::forward(arguments)...))) { assert(!m_result.has_value()); assert(!static_cast(m_exception)); m_result.emplace(std::forward(arguments)...); @@ -57,6 +58,17 @@ namespace concurrencpp::details { assert(static_cast(m_exception)); std::rethrow_exception(m_exception); } + + type& get_ref() { + assert_state(); + + if (m_result.has_value()) { + return m_result.value(); + } + + assert(static_cast(m_exception)); + std::rethrow_exception(m_exception); + } }; template<> @@ -71,6 +83,8 @@ namespace concurrencpp::details { } public: + producer_context& operator=(producer_context&& rhs) noexcept = default; + void build_result() noexcept { assert(!m_ready); assert(!static_cast(m_exception)); @@ -107,6 +121,10 @@ namespace concurrencpp::details { assert(static_cast(m_exception)); std::rethrow_exception(m_exception); } + + void get_ref() const { + return get(); + } }; template @@ -121,9 +139,12 @@ namespace concurrencpp::details { } public: + producer_context& operator=(producer_context&& rhs) noexcept = default; + void build_result(type& reference) noexcept { assert(m_pointer == nullptr); assert(!static_cast(m_exception)); + assert(reinterpret_cast(std::addressof(reference)) % alignof(type) == 0); m_pointer = std::addressof(reference); } @@ -147,7 +168,7 @@ namespace concurrencpp::details { return result_status::idle; } - type& get() { + type& get() const { assert_state(); if (m_pointer != nullptr) { @@ -158,6 +179,10 @@ namespace concurrencpp::details { assert(static_cast(m_exception)); std::rethrow_exception(m_exception); } + + type& get_ref() const { + return get(); + } }; } // namespace concurrencpp::details diff --git a/include/concurrencpp/results/impl/result_state.h b/include/concurrencpp/results/impl/result_state.h index e0388ce7..f7dadad2 100644 --- a/include/concurrencpp/results/impl/result_state.h +++ b/include/concurrencpp/results/impl/result_state.h @@ -13,31 +13,27 @@ namespace concurrencpp::details { class result_state_base { public: - enum class pc_state { idle, producer, consumer }; + enum class pc_state { idle, consumer_set, consumer_done, producer_done }; private: bool await_via_ready(await_via_context& await_ctx, bool force_rescheduling) noexcept; protected: - std::atomic m_pc_state; + std::atomic m_pc_state {pc_state::idle}; consumer_context m_consumer; + coroutine_handle m_done_handle; void assert_done() const noexcept; public: void wait(); - - bool await(await_context& await_ctx) noexcept; - + bool await(coroutine_handle caller_handle) noexcept; bool await_via(await_via_context& await_ctx, bool force_rescheduling) noexcept; - void when_all(std::shared_ptr when_all_state) noexcept; - when_any_status when_any(std::shared_ptr when_any_state, size_t index) noexcept; + void share_result(std::weak_ptr shared_result_state) noexcept; void try_rewind_consumer() noexcept; - - void publish_result() noexcept; }; template @@ -46,6 +42,15 @@ namespace concurrencpp::details { private: producer_context m_producer; + static void delete_self(coroutine_handle done_handle, result_state* state) noexcept { + if (static_cast(done_handle)) { + assert(done_handle.done()); + return done_handle.destroy(); + } + + delete state; + } + template void from_callable(std::true_type /*is_void_type*/, callable_type&& callable) { callable(); @@ -59,7 +64,7 @@ namespace concurrencpp::details { public: template - void set_result(argument_types&&... arguments) { + void set_result(argument_types&&... arguments) noexcept(noexcept(type(std::forward(arguments)...))) { m_producer.build_result(std::forward(arguments)...); } @@ -71,7 +76,7 @@ namespace concurrencpp::details { // Consumer-side functions result_status status() const noexcept { const auto state = m_pc_state.load(std::memory_order_acquire); - assert(state != pc_state::consumer); + assert(state != pc_state::consumer_set); if (state == pc_state::idle) { return result_status::idle; @@ -81,9 +86,9 @@ namespace concurrencpp::details { } template - concurrencpp::result_status wait_for(std::chrono::duration duration) { + result_status wait_for(std::chrono::duration duration) { const auto state_0 = m_pc_state.load(std::memory_order_acquire); - if (state_0 == pc_state::producer) { + if (state_0 == pc_state::producer_done) { return m_producer.status(); } @@ -91,7 +96,7 @@ namespace concurrencpp::details { m_consumer.set_wait_context(wait_ctx); auto expected_idle_state = pc_state::idle; - const auto idle_0 = m_pc_state.compare_exchange_strong(expected_idle_state, pc_state::consumer, std::memory_order_acq_rel); + const auto idle_0 = m_pc_state.compare_exchange_strong(expected_idle_state, pc_state::consumer_set, std::memory_order_acq_rel); if (!idle_0) { assert_done(); @@ -113,7 +118,7 @@ namespace concurrencpp::details { status back to idle, then the consumer is "protected" because the producer will not try to access the consumer if the flag doesn't say so. */ - auto expected_consumer_state = pc_state::consumer; + auto expected_consumer_state = pc_state::consumer_set; const auto idle_1 = m_pc_state.compare_exchange_strong(expected_consumer_state, pc_state::idle, std::memory_order_acq_rel); if (!idle_1) { @@ -126,7 +131,7 @@ namespace concurrencpp::details { } template - concurrencpp::result_status wait_until(const std::chrono::time_point& timeout_time) { + result_status wait_until(const std::chrono::time_point& timeout_time) { const auto now = clock::now(); if (timeout_time <= now) { return status(); @@ -141,6 +146,10 @@ namespace concurrencpp::details { return m_producer.get(); } + void initialize_producer_from(producer_context& producer_ctx) noexcept { + producer_ctx = std::move(m_producer); + } + template void from_callable(callable_type&& callable) noexcept { using is_void = std::is_same; @@ -151,7 +160,74 @@ namespace concurrencpp::details { set_exception(std::current_exception()); } } + + void complete_producer(coroutine_handle done_handle = {}) noexcept { + m_done_handle = done_handle; + + const auto state_before = this->m_pc_state.exchange(pc_state::producer_done, std::memory_order_acq_rel); + assert(state_before != pc_state::producer_done); + + switch (state_before) { + case pc_state::consumer_set: { + m_consumer.resume_consumer(); + return; + } + + case pc_state::idle: { + return; + } + + case pc_state::consumer_done: { + return delete_self(done_handle, this); + } + + default: { + break; + } + } + + assert(false); + } + + void complete_consumer() noexcept { + const auto pc_state = this->m_pc_state.load(std::memory_order_acquire); + if (pc_state == pc_state::producer_done) { + return delete_self(m_done_handle, this); + } + + const auto pc_state1 = this->m_pc_state.exchange(pc_state::consumer_done, std::memory_order_acq_rel); + assert(pc_state1 != pc_state::consumer_set); + + if (pc_state1 == pc_state::producer_done) { + return delete_self(m_done_handle, this); + } + + assert(pc_state1 == pc_state::idle); + } + }; + + template + struct consumer_result_state_deleter { + void operator()(result_state* state_ptr) { + assert(state_ptr != nullptr); + state_ptr->complete_consumer(); + } + }; + + template + struct producer_result_state_deleter { + void operator()(result_state* state_ptr) { + assert(state_ptr != nullptr); + state_ptr->complete_producer(); + } }; + + template + using consumer_result_state_ptr = std::unique_ptr, consumer_result_state_deleter>; + + template + using producer_result_state_ptr = std::unique_ptr, producer_result_state_deleter>; + } // namespace concurrencpp::details #endif diff --git a/include/concurrencpp/results/impl/shared_result_state.h b/include/concurrencpp/results/impl/shared_result_state.h new file mode 100644 index 00000000..96885f2d --- /dev/null +++ b/include/concurrencpp/results/impl/shared_result_state.h @@ -0,0 +1,176 @@ +#ifndef CONCURRENCPP_SHARED_RESULT_STATE_H +#define CONCURRENCPP_SHARED_RESULT_STATE_H + +#include "concurrencpp/results/impl/result_state.h" + +#include + +namespace concurrencpp::details { + struct shared_await_context { + shared_await_context* next = nullptr; + coroutine_handle caller_handle; + }; + + struct shared_await_via_context { + shared_await_via_context* next = nullptr; + await_via_context await_context; + + shared_await_via_context(std::shared_ptr executor) noexcept; + }; +} // namespace concurrencpp::details + +namespace concurrencpp::details { + class shared_result_state_base { + + protected: + mutable std::shared_mutex m_lock; + shared_await_context* m_awaiters = nullptr; + shared_await_via_context* m_via_awaiters = nullptr; + std::condition_variable_any m_condition; + bool m_ready = false; + + void await_impl(std::unique_lock& write_lock, shared_await_context& awaiter) noexcept; + void await_via_impl(std::unique_lock& write_lock, shared_await_via_context& awaiter) noexcept; + void wait_impl(std::unique_lock& write_lock) noexcept; + bool wait_for_impl(std::unique_lock& write_lock, std::chrono::milliseconds ms) noexcept; + + void notify_all(std::unique_lock& lock) noexcept; + + public: + virtual ~shared_result_state_base() noexcept = default; + virtual void on_result_ready() noexcept = 0; + }; + + template + class shared_result_state final : public shared_result_state_base { + + private: + consumer_result_state_ptr m_state; + producer_context m_producer_context; + + void on_result_ready() noexcept override { + std::unique_lock lock(m_lock); + auto state = std::move(m_state); + state->initialize_producer_from(m_producer_context); + notify_all(lock); + } + + public: + shared_result_state(consumer_result_state_ptr state) : m_state(std::move(state)) {} + + ~shared_result_state() noexcept { + std::unique_lock lock(m_lock); + auto state = std::move(m_state); + lock.unlock(); + + if (static_cast(state)) { + state->try_rewind_consumer(); + } + } + + result_status status() const noexcept { + std::shared_lock lock(m_lock); + return m_producer_context.status(); + } + + bool await(shared_await_context& awaiter) noexcept { + { + std::shared_lock read_lock(m_lock); + if (m_ready) { + return false; + } + } + + std::unique_lock write_lock(m_lock); + if (m_ready) { + return false; + } + + await_impl(write_lock, awaiter); + return true; + } + + bool await_via(shared_await_via_context& awaiter, const bool force_rescheduling) noexcept { + auto resume_if_ready = [&awaiter, force_rescheduling]() mutable { + if (force_rescheduling) { + awaiter.await_context(); + } + + return force_rescheduling; + }; + + { + std::shared_lock read_lock(m_lock); + if (m_ready) { + return resume_if_ready(); + } + } + + std::unique_lock write_lock(m_lock); + if (m_ready) { + return resume_if_ready(); + } + + await_via_impl(write_lock, awaiter); + return true; + } + + void wait() noexcept { + { + std::shared_lock read_lock(m_lock); + if (m_ready) { + return; + } + } + + std::unique_lock lock(m_lock); + if (m_ready) { + return; + } + + wait_impl(lock); + } + + template + result_status wait_for(std::chrono::duration duration) noexcept { + { + std::shared_lock read_lock(m_lock); + if (m_ready) { + return m_producer_context.status(); + } + } + + const auto ms = std::chrono::duration_cast(duration) + std::chrono::milliseconds(1); + + std::unique_lock lock(m_lock); + if (m_ready) { + return m_producer_context.status(); + } + + const auto ready = wait_for_impl(lock, ms); + if (ready) { + return m_producer_context.status(); + } + + lock.unlock(); + return result_status::idle; + } + + template + result_status wait_until(const std::chrono::time_point& timeout_time) noexcept { + const auto now = clock::now(); + if (timeout_time <= now) { + return status(); + } + + const auto diff = timeout_time - now; + return wait_for(diff); + } + + std::add_lvalue_reference_t get() { + return m_producer_context.get_ref(); + } + }; +} // namespace concurrencpp::details + +#endif diff --git a/include/concurrencpp/results/make_result.h b/include/concurrencpp/results/make_result.h index 70652212..c20ecb68 100644 --- a/include/concurrencpp/results/make_result.h +++ b/include/concurrencpp/results/make_result.h @@ -12,10 +12,13 @@ namespace concurrencpp { static_assert(std::is_same_v ? (sizeof...(argument_types) == 0) : true, "concurrencpp::make_ready_result - this overload does not accept any argument."); - auto result_state_ptr = std::make_shared>(); - result_state_ptr->set_result(std::forward(arguments)...); - result_state_ptr->publish_result(); - return {std::move(result_state_ptr)}; + details::producer_result_state_ptr promise(new details::result_state()); + details::consumer_result_state_ptr state_ptr(promise.get()); + + promise->set_result(std::forward(arguments)...); + promise.reset(); // publish the result; + + return {std::move(state_ptr)}; } template @@ -24,10 +27,13 @@ namespace concurrencpp { throw std::invalid_argument(details::consts::k_make_exceptional_result_exception_null_error_msg); } - auto result_state_ptr = std::make_shared>(); - result_state_ptr->set_exception(exception_ptr); - result_state_ptr->publish_result(); - return {std::move(result_state_ptr)}; + details::producer_result_state_ptr promise(new details::result_state()); + details::consumer_result_state_ptr state_ptr(promise.get()); + + promise->set_exception(exception_ptr); + promise.reset(); // publish the result; + + return {std::move(state_ptr)}; } template @@ -36,4 +42,4 @@ namespace concurrencpp { } } // namespace concurrencpp -#endif +#endif \ No newline at end of file diff --git a/include/concurrencpp/results/promises.h b/include/concurrencpp/results/promises.h index 81921c5f..71db7559 100644 --- a/include/concurrencpp/results/promises.h +++ b/include/concurrencpp/results/promises.h @@ -1,9 +1,9 @@ #ifndef CONCURRENCPP_PROMISES_H #define CONCURRENCPP_PROMISES_H -#include "concurrencpp/results/impl/result_state.h" +#include "concurrencpp/task.h" #include "concurrencpp/coroutines/coroutine.h" -#include "concurrencpp/errors.h" +#include "concurrencpp/results/impl/result_state.h" #include @@ -16,8 +16,15 @@ namespace concurrencpp::details { }; template - struct initial_scheduling_awaiter : public details::suspend_always { - void await_suspend(details::coroutine_handle handle) const { + class initial_scheduling_awaiter : public suspend_always { + + private: + await_via_context m_await_via_context; + + public: + void await_suspend(coroutine_handle handle) { + m_await_via_context.set_coro_handle(handle); + auto& per_thread_data = coroutine_per_thread_data::s_tl_per_thread_data; auto executor_base_ptr = std::exchange(per_thread_data.executor, nullptr); @@ -25,15 +32,25 @@ namespace concurrencpp::details { assert(dynamic_cast(executor_base_ptr) != nullptr); auto executor_ptr = static_cast(executor_base_ptr); - executor_ptr->enqueue(handle); + executor_ptr->enqueue(m_await_via_context.get_functor()); + } + + void await_resume() const { + m_await_via_context.throw_if_interrupted(); } }; template<> - struct initial_scheduling_awaiter : public details::suspend_never {}; + struct initial_scheduling_awaiter : public suspend_never {}; - struct initial_accumulating_awaiter : public details::suspend_always { - void await_suspend(details::coroutine_handle handle) const noexcept; + class initial_accumulating_awaiter : public suspend_always { + + private: + await_via_context m_await_via_context; + + public: + void await_suspend(coroutine_handle handle) noexcept; + void await_resume(); }; template @@ -72,7 +89,7 @@ namespace concurrencpp::details { }; struct initialy_resumed_promise { - details::suspend_never initial_suspend() const noexcept { + suspend_never initial_suspend() const noexcept { return {}; } }; @@ -102,7 +119,7 @@ namespace concurrencpp::details { return {}; } - details::suspend_never final_suspend() const noexcept { + suspend_never final_suspend() const noexcept { return {}; } @@ -127,11 +144,10 @@ namespace concurrencpp::details { } }; - struct result_publisher : public details::suspend_always { + struct result_publisher : public suspend_always { template - bool await_suspend(details::coroutine_handle handle) const noexcept { - handle.promise().publish_result(); - return false; // don't suspend, resume and destroy this + void await_suspend(coroutine_handle handle) const noexcept { + handle.promise().complete_producer(handle); } }; @@ -139,37 +155,24 @@ namespace concurrencpp::details { struct result_coro_promise : public return_value_struct, type> { private: - std::shared_ptr> m_result_ptr; + result_state m_result_state; public: - result_coro_promise() : m_result_ptr(std::make_shared>()) {} - - ~result_coro_promise() noexcept { - if (!static_cast(this->m_result_ptr)) { - return; - } - - auto broken_task_error = std::make_exception_ptr(concurrencpp::errors::broken_task("coroutine was destroyed before finishing execution")); - this->m_result_ptr->set_exception(broken_task_error); - this->m_result_ptr->publish_result(); - } - template - void set_result(argument_types&&... args) { - this->m_result_ptr->set_result(std::forward(args)...); + void set_result(argument_types&&... arguments) noexcept(noexcept(type(std::forward(arguments)...))) { + this->m_result_state.set_result(std::forward(arguments)...); } void unhandled_exception() noexcept { - this->m_result_ptr->set_exception(std::current_exception()); + this->m_result_state.set_exception(std::current_exception()); } - ::concurrencpp::result get_return_object() noexcept { - return {this->m_result_ptr}; + result get_return_object() noexcept { + return {&m_result_state}; } - void publish_result() noexcept { - this->m_result_ptr->publish_result(); - this->m_result_ptr.reset(); + void complete_producer(coroutine_handle done_handle) noexcept { + this->m_result_state.complete_producer(done_handle); } result_publisher final_suspend() const noexcept { diff --git a/include/concurrencpp/results/result.h b/include/concurrencpp/results/result.h index ae6cf7aa..b70995bf 100644 --- a/include/concurrencpp/results/result.h +++ b/include/concurrencpp/results/result.h @@ -1,16 +1,12 @@ #ifndef CONCURRENCPP_RESULT_H #define CONCURRENCPP_RESULT_H +#include "concurrencpp/errors.h" +#include "concurrencpp/utils/bind.h" #include "concurrencpp/results/constants.h" -#include "concurrencpp/results/promises.h" #include "concurrencpp/results/result_awaitable.h" #include "concurrencpp/results/impl/result_state.h" -#include "concurrencpp/errors.h" -#include "concurrencpp/forward_declerations.h" - -#include "concurrencpp/utils/bind.h" - #include namespace concurrencpp { @@ -22,24 +18,23 @@ namespace concurrencpp { static_assert(valid_result_type_v, "concurrencpp::result - <> should be now-throw-move constructable or void."); friend class details::when_result_helper; + friend struct details::shared_result_helper; private: - std::shared_ptr> m_state; + details::consumer_result_state_ptr m_state; void throw_if_empty(const char* message) const { - if (m_state.get() != nullptr) { - return; + if (static_cast(!m_state)) { + throw errors::empty_result(message); } - - throw errors::empty_result(message); } public: result() noexcept = default; - ~result() noexcept = default; result(result&& rhs) noexcept = default; - result(std::shared_ptr> state) noexcept : m_state(std::move(state)) {} + result(details::consumer_result_state_ptr state) noexcept : m_state(std::move(state)) {} + result(details::result_state* state) noexcept : m_state(state) {} result& operator=(result&& rhs) noexcept { if (this != &rhs) { @@ -53,7 +48,7 @@ namespace concurrencpp { result& operator=(const result& rhs) = delete; operator bool() const noexcept { - return m_state.get() != nullptr; + return static_cast(m_state); } result_status status() const { @@ -61,19 +56,19 @@ namespace concurrencpp { return m_state->status(); } - void wait() { + void wait() const { throw_if_empty(details::consts::k_result_wait_error_msg); m_state->wait(); } template - result_status wait_for(std::chrono::duration duration) { + result_status wait_for(std::chrono::duration duration) const { throw_if_empty(details::consts::k_result_wait_for_error_msg); return m_state->wait_for(duration); } template - result_status wait_until(std::chrono::time_point timeout_time) { + result_status wait_until(std::chrono::time_point timeout_time) const { throw_if_empty(details::consts::k_result_wait_until_error_msg); return m_state->wait_until(timeout_time); } @@ -121,16 +116,18 @@ namespace concurrencpp { template class result_promise { + static constexpr auto valid_result_type_v = std::is_same_v || std::is_nothrow_move_constructible_v; + + static_assert(valid_result_type_v, "concurrencpp::result - <> should be now-throw-move constructable or void."); + private: - std::shared_ptr> m_state; + details::producer_result_state_ptr m_state; bool m_result_retrieved; void throw_if_empty(const char* message) const { - if (static_cast(m_state)) { - return; + if (!static_cast(m_state)) { + throw errors::empty_result_promise(message); } - - throw errors::empty_result_promise(message); } void break_task_if_needed() noexcept { @@ -144,11 +141,11 @@ namespace concurrencpp { auto exception_ptr = std::make_exception_ptr(errors::broken_task(details::consts::k_broken_task_exception_error_msg)); m_state->set_exception(exception_ptr); - m_state->publish_result(); + m_state.reset(); } public: - result_promise() : m_state(std::make_shared>()), m_result_retrieved(false) {} + result_promise() : m_state(new details::result_state()), m_result_retrieved(false) {} result_promise(result_promise&& rhs) noexcept : m_state(std::move(rhs.m_state)), m_result_retrieved(rhs.m_result_retrieved) {} @@ -178,8 +175,7 @@ namespace concurrencpp { throw_if_empty(details::consts::k_result_promise_set_result_error_msg); m_state->set_result(std::forward(arguments)...); - m_state->publish_result(); - m_state.reset(); + m_state.reset(); // publishes the result } void set_exception(std::exception_ptr exception_ptr) { @@ -190,8 +186,7 @@ namespace concurrencpp { } m_state->set_exception(exception_ptr); - m_state->publish_result(); - m_state.reset(); + m_state.reset(); // publishes the result } template @@ -203,8 +198,7 @@ namespace concurrencpp { throw_if_empty(details::consts::k_result_promise_set_from_function_error_msg); m_state->from_callable(details::bind(std::forward(callable), std::forward(args)...)); - m_state->publish_result(); - m_state.reset(); + m_state.reset(); // publishes the result } result get_result() { @@ -215,9 +209,9 @@ namespace concurrencpp { } m_result_retrieved = true; - return result(m_state); + return result(m_state.get()); } }; } // namespace concurrencpp -#endif +#endif \ No newline at end of file diff --git a/include/concurrencpp/results/result_awaitable.h b/include/concurrencpp/results/result_awaitable.h index b3aa62d7..d43b4da5 100644 --- a/include/concurrencpp/results/result_awaitable.h +++ b/include/concurrencpp/results/result_awaitable.h @@ -2,116 +2,104 @@ #define CONCURRENCPP_RESULT_AWAITABLE_H #include "concurrencpp/coroutines/coroutine.h" -#include "concurrencpp/results/result_fwd_declerations.h" +#include "concurrencpp/results/impl/result_state.h" -namespace concurrencpp { +namespace concurrencpp::details { template - class awaitable : public details::suspend_always { - - private: - details::await_context m_await_ctx; - std::shared_ptr> m_state; + class awaitable_base : public suspend_always { + protected: + consumer_result_state_ptr m_state; public: - awaitable(std::shared_ptr> state) noexcept : m_state(std::move(state)) {} + awaitable_base(consumer_result_state_ptr state) noexcept : m_state(std::move(state)) {} + + awaitable_base(const awaitable_base&) = delete; + awaitable_base(awaitable_base&&) = delete; + }; +} // namespace concurrencpp::details + +namespace concurrencpp { + template + class awaitable : public details::awaitable_base { - awaitable(const awaitable& rhs) noexcept = delete; - awaitable(awaitable&& rhs) noexcept = delete; + public: + awaitable(details::consumer_result_state_ptr state) noexcept : details::awaitable_base(std::move(state)) {} bool await_suspend(details::coroutine_handle caller_handle) noexcept { - assert(static_cast(m_state)); - m_await_ctx.set_coro_handle(caller_handle); - return m_state->await(m_await_ctx); + assert(static_cast(this->m_state)); + return this->m_state->await(caller_handle); } type await_resume() { - auto state = std::move(m_state); - m_await_ctx.throw_if_interrupted(); + auto state = std::move(this->m_state); return state->get(); } }; template - class via_awaitable : public details::suspend_always { + class via_awaitable : public details::awaitable_base { private: details::await_via_context m_await_context; - std::shared_ptr> m_state; const bool m_force_rescheduling; public: - via_awaitable(std::shared_ptr> state, std::shared_ptr executor, bool force_rescheduling) noexcept : - m_await_context(std::move(executor)), m_state(std::move(state)), m_force_rescheduling(force_rescheduling) {} - - via_awaitable(const via_awaitable& rhs) noexcept = delete; - via_awaitable(via_awaitable&& rhs) noexcept = delete; + via_awaitable(details::consumer_result_state_ptr state, std::shared_ptr executor, bool force_rescheduling) noexcept : + details::awaitable_base(std::move(state)), m_await_context(std::move(executor)), m_force_rescheduling(force_rescheduling) {} bool await_suspend(details::coroutine_handle caller_handle) { - assert(static_cast(m_state)); + assert(static_cast(this->m_state)); m_await_context.set_coro_handle(caller_handle); - return m_state->await_via(m_await_context, m_force_rescheduling); + return this->m_state->await_via(m_await_context, m_force_rescheduling); } type await_resume() { - auto state = std::move(m_state); + auto state = std::move(this->m_state); m_await_context.throw_if_interrupted(); return state->get(); } }; template - class resolve_awaitable : public details::suspend_always { - - private: - details::await_context m_await_ctx; - std::shared_ptr> m_state; + class resolve_awaitable : public details::awaitable_base { public: - resolve_awaitable(std::shared_ptr> state) noexcept : m_state(std::move(state)) {} + resolve_awaitable(details::consumer_result_state_ptr state) noexcept : details::awaitable_base(std::move(state)) {} resolve_awaitable(resolve_awaitable&&) noexcept = delete; resolve_awaitable(const resolve_awaitable&) noexcept = delete; bool await_suspend(details::coroutine_handle caller_handle) noexcept { - assert(static_cast(m_state)); - m_await_ctx.set_coro_handle(caller_handle); - return m_state->await(m_await_ctx); + assert(static_cast(this->m_state)); + return this->m_state->await(caller_handle); } result await_resume() { - auto state = std::move(m_state); - m_await_ctx.throw_if_interrupted(); - return result(std::move(state)); + return result(std::move(this->m_state)); } }; template - class resolve_via_awaitable : public details::suspend_always { + class resolve_via_awaitable : public details::awaitable_base { private: details::await_via_context m_await_context; - std::shared_ptr> m_state; const bool m_force_rescheduling; public: - resolve_via_awaitable(std::shared_ptr> state, std::shared_ptr executor, bool force_rescheduling) noexcept - : - m_await_context(std::move(executor)), - m_state(state), m_force_rescheduling(force_rescheduling) {} - - resolve_via_awaitable(const resolve_via_awaitable&) noexcept = delete; - resolve_via_awaitable(resolve_via_awaitable&&) noexcept = delete; + resolve_via_awaitable(details::consumer_result_state_ptr state, std::shared_ptr executor, bool force_rescheduling) noexcept : + details::awaitable_base(std::move(state)), m_await_context(std::move(executor)), m_force_rescheduling(force_rescheduling) {} bool await_suspend(details::coroutine_handle caller_handle) { - assert(static_cast(m_state)); + assert(static_cast(this->m_state)); m_await_context.set_coro_handle(caller_handle); - return m_state->await_via(m_await_context, m_force_rescheduling); + return this->m_state->await_via(m_await_context, m_force_rescheduling); } result await_resume() { - auto state = std::move(m_state); + auto state = std::move(this->m_state); m_await_context.throw_if_interrupted(); return result(std::move(state)); } diff --git a/include/concurrencpp/results/result_fwd_declerations.h b/include/concurrencpp/results/result_fwd_declerations.h index 056245d3..4f67d4c5 100644 --- a/include/concurrencpp/results/result_fwd_declerations.h +++ b/include/concurrencpp/results/result_fwd_declerations.h @@ -10,6 +10,10 @@ namespace concurrencpp { template class result; + + template + class shared_result; + template class result_promise; @@ -33,9 +37,15 @@ namespace concurrencpp::details { template class result_state; + class shared_result_state_base; + + template + class shared_result_state; + struct executor_bulk_tag {}; class when_result_helper; + struct shared_result_helper; enum class when_any_status { set, result_ready }; } // namespace concurrencpp::details diff --git a/include/concurrencpp/results/shared_result.h b/include/concurrencpp/results/shared_result.h new file mode 100644 index 00000000..5504d264 --- /dev/null +++ b/include/concurrencpp/results/shared_result.h @@ -0,0 +1,132 @@ +#ifndef CONCURRENCPP_SHARED_RESULT_H +#define CONCURRENCPP_SHARED_RESULT_H + +#include "concurrencpp/results/result.h" +#include "concurrencpp/results/shared_result_awaitable.h" +#include "concurrencpp/results/impl/shared_result_state.h" + +namespace concurrencpp::details { + struct shared_result_helper { + template + static consumer_result_state_ptr get_state(result& result) noexcept { + return std::move(result.m_state); + } + }; +} // namespace concurrencpp::details + +namespace concurrencpp { + template + class shared_result { + + private: + std::shared_ptr> m_state; + + void throw_if_empty(const char* message) const { + if (static_cast(m_state)) { + return; + } + + throw errors::empty_result(message); + } + + public: + shared_result() noexcept = default; + ~shared_result() noexcept = default; + + shared_result(std::shared_ptr> state) noexcept : m_state(std::move(state)) {} + + shared_result(result rhs) { + if (!static_cast(rhs)) { + return; + } + + auto result_state = details::shared_result_helper::get_state(rhs); + auto result_state_ptr = result_state.get(); + m_state = std::make_shared>(std::move(result_state)); + result_state_ptr->share_result(m_state); + } + + shared_result(const shared_result& rhs) noexcept = default; + shared_result(shared_result&& rhs) noexcept = default; + + shared_result& operator=(const shared_result& rhs) noexcept { + if (this != &rhs && m_state != rhs.m_state) { + m_state = rhs.m_state; + } + + return *this; + } + + shared_result& operator=(shared_result&& rhs) noexcept { + if (this != &rhs && m_state != rhs.m_state) { + m_state = std::move(rhs.m_state); + } + + return *this; + } + + operator bool() const noexcept { + return static_cast(m_state.get()); + } + + result_status status() const { + throw_if_empty(details::consts::k_shared_result_status_error_msg); + return m_state->status(); + } + + void wait() { + throw_if_empty(details::consts::k_shared_result_wait_error_msg); + m_state->wait(); + } + + template + result_status wait_for(std::chrono::duration duration) { + throw_if_empty(details::consts::k_shared_result_wait_for_error_msg); + return m_state->wait_for(duration); + } + + template + result_status wait_until(std::chrono::time_point timeout_time) { + throw_if_empty(details::consts::k_shared_result_wait_until_error_msg); + return m_state->wait_until(timeout_time); + } + + std::add_lvalue_reference_t get() { + throw_if_empty(details::consts::k_shared_result_get_error_msg); + m_state->wait(); + return m_state->get(); + } + + auto operator co_await() { + throw_if_empty(details::consts::k_shared_result_operator_co_await_error_msg); + return shared_awaitable {m_state}; + } + + auto await_via(std::shared_ptr executor, bool force_rescheduling = true) { + throw_if_empty(details::consts::k_shared_result_await_via_error_msg); + + if (!static_cast(executor)) { + throw std::invalid_argument(details::consts::k_shared_result_await_via_executor_null_error_msg); + } + + return shared_via_awaitable {m_state, std::move(executor), force_rescheduling}; + } + + auto resolve() { + throw_if_empty(details::consts::k_shared_result_resolve_error_msg); + return shared_resolve_awaitable {m_state}; + } + + auto resolve_via(std::shared_ptr executor, bool force_rescheduling = true) { + throw_if_empty(details::consts::k_shared_result_resolve_via_error_msg); + + if (!static_cast(executor)) { + throw std::invalid_argument(details::consts::k_shared_result_resolve_via_executor_null_error_msg); + } + + return shared_resolve_via_awaitable {m_state, std::move(executor), force_rescheduling}; + } + }; +} // namespace concurrencpp + +#endif diff --git a/include/concurrencpp/results/shared_result_awaitable.h b/include/concurrencpp/results/shared_result_awaitable.h new file mode 100644 index 00000000..70d9c072 --- /dev/null +++ b/include/concurrencpp/results/shared_result_awaitable.h @@ -0,0 +1,115 @@ +#ifndef CONCURRENCPP_SHARED_RESULT_AWAITABLE_H +#define CONCURRENCPP_SHARED_RESULT_AWAITABLE_H + +#include "concurrencpp/results/impl/shared_result_state.h" + +namespace concurrencpp::details { + template + class shared_awaitable_base : public suspend_always { + protected: + std::shared_ptr> m_state; + + public: + shared_awaitable_base(std::shared_ptr> state) noexcept : m_state(std::move(state)) {} + + shared_awaitable_base(const shared_awaitable_base&) = delete; + shared_awaitable_base(shared_awaitable_base&&) = delete; + }; +} // namespace concurrencpp::details + +namespace concurrencpp { + template + class shared_awaitable : public details::shared_awaitable_base { + + private: + details::shared_await_context m_await_ctx; + + public: + shared_awaitable(std::shared_ptr> state) noexcept : details::shared_awaitable_base(std::move(state)) {} + + bool await_suspend(details::coroutine_handle caller_handle) noexcept { + assert(static_cast(this->m_state)); + this->m_await_ctx.caller_handle = caller_handle; + return this->m_state->await(m_await_ctx); + } + + std::add_lvalue_reference_t await_resume() { + return this->m_state->get(); + } + }; + + template + class shared_via_awaitable : public details::shared_awaitable_base { + + private: + details::shared_await_via_context m_await_context; + const bool m_force_rescheduling; + + public: + shared_via_awaitable(std::shared_ptr> state, + std::shared_ptr executor, + bool force_rescheduling) noexcept : + details::shared_awaitable_base(std::move(state)), + m_await_context(std::move(executor)), m_force_rescheduling(force_rescheduling) {} + + bool await_suspend(details::coroutine_handle caller_handle) { + assert(static_cast(this->m_state)); + this->m_await_context.await_context.set_coro_handle(caller_handle); + return this->m_state->await_via(m_await_context, m_force_rescheduling); + } + + std::add_lvalue_reference_t await_resume() { + this->m_await_context.await_context.throw_if_interrupted(); + return this->m_state->get(); + } + }; + + template + class shared_resolve_awaitable : public details::shared_awaitable_base { + + private: + details::shared_await_context m_await_ctx; + + public: + shared_resolve_awaitable(std::shared_ptr> state) noexcept : details::shared_awaitable_base(std::move(state)) {} + + bool await_suspend(details::coroutine_handle caller_handle) noexcept { + assert(static_cast(this->m_state)); + this->m_await_ctx.caller_handle = caller_handle; + return this->m_state->await(m_await_ctx); + } + + shared_result await_resume() { + return shared_result(std::move(this->m_state)); + } + }; + + template + class shared_resolve_via_awaitable : public details::shared_awaitable_base { + + private: + details::shared_await_via_context m_await_context; + const bool m_force_rescheduling; + + public: + shared_resolve_via_awaitable(std::shared_ptr> state, + std::shared_ptr executor, + bool force_rescheduling) noexcept : + details::shared_awaitable_base(std::move(state)), + m_await_context(std::move(executor)), m_force_rescheduling(force_rescheduling) {} + + bool await_suspend(details::coroutine_handle caller_handle) { + assert(static_cast(this->m_state)); + this->m_await_context.await_context.set_coro_handle(caller_handle); + return this->m_state->await_via(m_await_context, m_force_rescheduling); + } + + shared_result await_resume() { + auto state = std::move(this->m_state); + this->m_await_context.await_context.throw_if_interrupted(); + return shared_result(std::move(state)); + } + }; +} // namespace concurrencpp + +#endif \ No newline at end of file diff --git a/include/concurrencpp/results/when_result.h b/include/concurrencpp/results/when_result.h index 499f1514..5ce69cc5 100644 --- a/include/concurrencpp/results/when_result.h +++ b/include/concurrencpp/results/when_result.h @@ -2,9 +2,9 @@ #define CONCURRENCPP_WHEN_RESULT_H #include +#include #include #include -#include #include #include "concurrencpp/errors.h" @@ -16,11 +16,9 @@ namespace concurrencpp::details { private: template static void throw_if_empty_single(const char* error_message, const result& result) { - if (static_cast(result)) { - return; + if (!static_cast(result)) { + throw errors::empty_result(error_message); } - - throw errors::empty_result(error_message); } static void throw_if_empty_impl(const char* error_message) noexcept { @@ -54,11 +52,12 @@ namespace concurrencpp::details { template class when_all_tuple_state final : public when_all_state_base, public std::enable_shared_from_this> { + using tuple_type = std::tuple; private: tuple_type m_tuple; - std::shared_ptr> m_state_ptr; + producer_result_state_ptr m_state_ptr; template void set_state(result& result) noexcept { @@ -67,12 +66,12 @@ namespace concurrencpp::details { } public: - when_all_tuple_state(result_types&&... results) noexcept : - m_tuple(std::forward(results)...), m_state_ptr(std::make_shared>()) { + when_all_tuple_state(result_types&&... results) noexcept : m_tuple(std::forward(results)...), m_state_ptr(new result_state()) { m_counter = sizeof...(result_types); } void set_state() noexcept { + std::unique_lock lock(m_lock); std::apply( [this](auto&... result) { (this->set_state(result), ...); @@ -81,16 +80,20 @@ namespace concurrencpp::details { } void on_result_ready() noexcept override { - if (m_counter.fetch_sub(1, std::memory_order_relaxed) != 1) { + if (m_counter.fetch_sub(1, std::memory_order_acq_rel) != 1) { return; } - m_state_ptr->set_result(std::move(m_tuple)); - m_state_ptr->publish_result(); + std::unique_lock lock(m_lock); + auto state = std::move(m_state_ptr); + auto tuple = std::move(m_tuple); + lock.unlock(); + + state->set_result(std::move(tuple)); } result get_result() noexcept { - return {m_state_ptr}; + return {m_state_ptr.get()}; } }; @@ -99,7 +102,7 @@ namespace concurrencpp::details { private: std::vector m_vector; - std::shared_ptr>> m_state_ptr; + producer_result_state_ptr> m_state_ptr; template void set_state(result& result) noexcept { @@ -110,27 +113,33 @@ namespace concurrencpp::details { public: template when_all_vector_state(iterator_type begin, iterator_type end) : - m_vector(std::make_move_iterator(begin), std::make_move_iterator(end)), m_state_ptr(std::make_shared>>()) { + m_vector(std::make_move_iterator(begin), std::make_move_iterator(end)), m_state_ptr(new result_state>()) { m_counter = m_vector.size(); } void set_state() noexcept { + std::unique_lock lock(m_lock); for (auto& result : m_vector) { set_state(result); } + lock.unlock(); } void on_result_ready() noexcept override { - if (m_counter.fetch_sub(1, std::memory_order_relaxed) != 1) { + if (m_counter.fetch_sub(1, std::memory_order_acq_rel) != 1) { return; } - m_state_ptr->set_result(std::move(m_vector)); - m_state_ptr->publish_result(); + std::unique_lock lock(m_lock); + auto state = std::move(m_state_ptr); + auto vector = std::move(m_vector); + lock.unlock(); + + state->set_result(std::move(vector)); } result> get_result() noexcept { - return {m_state_ptr}; + return {m_state_ptr.get()}; } }; } // namespace concurrencpp::details @@ -159,7 +168,7 @@ namespace concurrencpp::details { private: tuple_type m_results; - std::shared_ptr>> m_state_ptr; + producer_result_state_ptr> m_state_ptr; template std::pair set_state_impl(std::unique_lock& lock) noexcept { // should be called under a lock. @@ -211,13 +220,16 @@ namespace concurrencpp::details { assert(lock.owns_lock()); (void)lock; - m_state_ptr->set_result(index, std::move(m_results)); - m_state_ptr->publish_result(); + auto state = std::move(m_state_ptr); + auto results = std::move(m_results); + lock.unlock(); + + state->set_result(index, std::move(results)); } public: when_any_tuple_state(result_types&&... results) : - m_results(std::forward(results)...), m_state_ptr(std::make_shared>>()) {} + m_results(std::forward(results)...), m_state_ptr(new result_state>()) {} void on_result_ready(size_t index) noexcept override { if (m_fulfilled.exchange(true, std::memory_order_relaxed)) { @@ -239,7 +251,7 @@ namespace concurrencpp::details { } result> get_result() noexcept { - return {m_state_ptr}; + return {m_state_ptr.get()}; } }; @@ -248,7 +260,7 @@ namespace concurrencpp::details { private: std::vector m_results; - std::shared_ptr>>> m_state_ptr; + producer_result_state_ptr>> m_state_ptr; void unset_state(std::unique_lock& lock) noexcept { assert(lock.owns_lock()); @@ -264,15 +276,18 @@ namespace concurrencpp::details { void complete_promise(std::unique_lock& lock, size_t index) noexcept { assert(lock.owns_lock()); (void)lock; - m_state_ptr->set_result(index, std::move(m_results)); - m_state_ptr->publish_result(); + + auto results = std::move(m_results); + auto state = std::move(m_state_ptr); + lock.unlock(); + + state->set_result(index, std::move(results)); } public: template when_any_vector_state(iterator_type begin, iterator_type end) : - m_results(std::make_move_iterator(begin), std::make_move_iterator(end)), - m_state_ptr(std::make_shared>>>()) {} + m_results(std::make_move_iterator(begin), std::make_move_iterator(end)), m_state_ptr(new result_state>>()) {} void on_result_ready(size_t index) noexcept override { if (m_fulfilled.exchange(true, std::memory_order_relaxed)) { @@ -302,7 +317,7 @@ namespace concurrencpp::details { } result>> get_result() noexcept { - return {m_state_ptr}; + return {m_state_ptr.get()}; } }; } // namespace concurrencpp::details @@ -317,9 +332,9 @@ namespace concurrencpp { details::when_result_helper::throw_if_empty_tuple(details::consts::k_when_all_empty_result_error_msg, std::forward(results)...); auto when_all_state = std::make_shared::type...>>(std::forward(results)...); - + auto result = when_all_state->get_result(); when_all_state->set_state(); - return when_all_state->get_result(); + return std::move(result); } template @@ -333,8 +348,9 @@ namespace concurrencpp { } auto when_all_state = std::make_shared>(begin, end); + auto result = when_all_state->get_result(); when_all_state->set_state(); - return when_all_state->get_result(); + return std::move(result); } template @@ -342,9 +358,10 @@ namespace concurrencpp { static_assert(sizeof...(result_types) != 0, "concurrencpp::when_any() - the function must accept at least one result object."); details::when_result_helper::throw_if_empty_tuple(details::consts::k_when_any_empty_result_error_msg, std::forward(results)...); - auto state = std::make_shared>(std::forward(results)...); - state->set_state(); - return state->get_result(); + auto when_any_state = std::make_shared>(std::forward(results)...); + auto result = when_any_state->get_result(); + when_any_state->set_state(); + return std::move(result); } template @@ -357,9 +374,10 @@ namespace concurrencpp { using type = typename std::iterator_traits::value_type; - auto state = std::make_shared>(begin, end); - state->set_state(); - return state->get_result(); + auto when_any_state = std::make_shared>(begin, end); + auto result = when_any_state->get_result(); + when_any_state->set_state(); + return std::move(result); } } // namespace concurrencpp diff --git a/include/concurrencpp/runtime/constants.h b/include/concurrencpp/runtime/constants.h index 388203da..1f727399 100644 --- a/include/concurrencpp/runtime/constants.h +++ b/include/concurrencpp/runtime/constants.h @@ -10,8 +10,8 @@ namespace concurrencpp::details::consts { constexpr static size_t k_default_number_of_cores = 8; constexpr static unsigned int k_concurrencpp_version_major = 0; - constexpr static unsigned int k_concurrencpp_version_minor = 0; - constexpr static unsigned int k_concurrencpp_version_revision = 9; + constexpr static unsigned int k_concurrencpp_version_minor = 1; + constexpr static unsigned int k_concurrencpp_version_revision = 0; } // namespace concurrencpp::details::consts #endif diff --git a/include/concurrencpp/utils/bind.h b/include/concurrencpp/utils/bind.h index b7bff1bb..3e85de00 100644 --- a/include/concurrencpp/utils/bind.h +++ b/include/concurrencpp/utils/bind.h @@ -6,7 +6,7 @@ namespace concurrencpp::details { template - auto bind(callable_type&& callable) { + auto&& bind(callable_type&& callable) { return std::forward(callable); // no arguments to bind } @@ -21,7 +21,7 @@ namespace concurrencpp::details { template auto bind_with_try_catch(callable_type&& callable) { - return [callable = std::forward(callable)]() mutable { + return [callable = std::forward(callable)]() mutable noexcept { try { callable(); } catch (...) { @@ -32,9 +32,8 @@ namespace concurrencpp::details { template auto bind_with_try_catch(callable_type&& callable, argument_types&&... arguments) { - constexpr static auto inti = std::is_nothrow_invocable_v; return [callable = std::forward(callable), - tuple = std::make_tuple(std::forward(arguments)...)]() mutable noexcept(inti) -> decltype(auto) { + tuple = std::make_tuple(std::forward(arguments)...)]() mutable noexcept -> decltype(auto) { try { return std::apply(callable, tuple); } catch (...) { @@ -42,7 +41,6 @@ namespace concurrencpp::details { } }; } - } // namespace concurrencpp::details -#endif +#endif \ No newline at end of file diff --git a/source/executors/manual_executor.cpp b/source/executors/manual_executor.cpp index 9eb67395..825f5098 100644 --- a/source/executors/manual_executor.cpp +++ b/source/executors/manual_executor.cpp @@ -136,14 +136,14 @@ void manual_executor::wait_for_tasks_impl(size_t count) { std::unique_lock lock(m_lock); m_condition.wait(lock, [this, count] { - return (m_tasks.size() == count) || m_abort; + return (m_tasks.size() >= count) || m_abort; }); if (m_abort) { details::throw_executor_shutdown_exception(name); } - assert(m_tasks.size() == count); + assert(m_tasks.size() >= count); } size_t manual_executor::wait_for_tasks_impl(size_t count, std::chrono::time_point deadline) { @@ -151,7 +151,7 @@ size_t manual_executor::wait_for_tasks_impl(size_t count, std::chrono::time_poin std::unique_lock lock(m_lock); m_condition.wait_until(lock, deadline, [this, count] { - return (m_tasks.size() == count) || m_abort; + return (m_tasks.size() >= count) || m_abort; }); if (m_abort) { diff --git a/source/results/impl/consumer_context.cpp b/source/results/impl/consumer_context.cpp index 8bfa7874..f4116bd0 100644 --- a/source/results/impl/consumer_context.cpp +++ b/source/results/impl/consumer_context.cpp @@ -1,99 +1,98 @@ #include "concurrencpp/results/impl/consumer_context.h" +#include "concurrencpp/results/impl/shared_result_state.h" #include "concurrencpp/executors/executor.h" -using concurrencpp::details::await_context; using concurrencpp::details::await_via_context; using concurrencpp::details::wait_context; using concurrencpp::details::when_any_context; using concurrencpp::details::consumer_context; - -namespace concurrencpp::details { - class await_context_wrapper { - - private: - await_context* m_await_context; - - public: - await_context_wrapper(await_context* await_context) noexcept : m_await_context(await_context) {} - - await_context_wrapper(await_context_wrapper&& rhs) noexcept : m_await_context(rhs.m_await_context) { - rhs.m_await_context = nullptr; - } - - ~await_context_wrapper() noexcept { - if (m_await_context == nullptr) { - return; - } - - m_await_context->set_interrupt(std::make_exception_ptr(errors::broken_task(consts::k_broken_task_exception_error_msg))); - (*m_await_context)(); - } - - void operator()() noexcept { - assert(m_await_context != nullptr); - auto await_context = std::exchange(m_await_context, nullptr); - (*await_context)(); - } - }; -} // namespace concurrencpp::details +using concurrencpp::details::await_via_functor; /* - * await_context + * await_via_context */ -void await_context::set_coro_handle(details::coroutine_handle coro_handle) noexcept { - assert(!static_cast(m_handle)); +void await_via_context::await_context::resume() noexcept { + assert(static_cast(handle)); + assert(!handle.done()); + handle(); +} + +void await_via_context::await_context::set_coro_handle(coroutine_handle coro_handle) noexcept { + assert(!static_cast(handle)); assert(static_cast(coro_handle)); assert(!coro_handle.done()); - m_handle = coro_handle; + handle = coro_handle; } -void await_context::set_interrupt(const std::exception_ptr& interrupt) { - assert(m_interrupt_exception == nullptr); +void await_via_context::await_context::set_interrupt(const std::exception_ptr& interrupt) noexcept { + assert(interrupt_exception == nullptr); assert(static_cast(interrupt)); - m_interrupt_exception = interrupt; + interrupt_exception = interrupt; } -void await_context::operator()() noexcept { - assert(static_cast(m_handle)); - assert(!m_handle.done()); - m_handle(); +void await_via_context::await_context::throw_if_interrupted() const { + if (interrupt_exception != nullptr) { + std::rethrow_exception(interrupt_exception); + } } -void await_context::throw_if_interrupted() const { - if (m_interrupt_exception == nullptr) { - return; +await_via_context::await_via_context(const std::shared_ptr& executor) noexcept : m_executor(executor) {} + +void await_via_context::resume() noexcept { + m_await_ctx.resume(); +} + +void await_via_context::operator()() noexcept { + try { + m_executor->enqueue(get_functor()); + } catch (...) { + // If an exception is thrown, the task destructor sets an interrupt exception + // on the await_context and resumes the coroutine, causing a broken_task exception to be thrown. + // this is why we don't let this exception propagate, as it wil cause the coroutine to be resumed twice (UB) } +} - std::rethrow_exception(m_interrupt_exception); +void await_via_context::set_coro_handle(coroutine_handle coro_handle) noexcept { + m_await_ctx.set_coro_handle(coro_handle); } -concurrencpp::task await_context::to_task() noexcept { - return concurrencpp::task {await_context_wrapper {this}}; +void await_via_context::set_interrupt(const std::exception_ptr& interrupt) noexcept { + m_await_ctx.set_interrupt(interrupt); +} + +void await_via_context::throw_if_interrupted() const { + m_await_ctx.throw_if_interrupted(); +} + +await_via_functor await_via_context::get_functor() noexcept { + return {&m_await_ctx}; } /* - * await_via_context + * await_via_functor */ -await_via_context::await_via_context(std::shared_ptr executor) noexcept : m_executor(std::move(executor)) {} +await_via_functor::await_via_functor(await_via_context::await_context* ctx) noexcept : m_ctx(ctx) {} -void await_via_context::set_coro_handle(details::coroutine_handle coro_handle) noexcept { - m_await_context.set_coro_handle(coro_handle); +await_via_functor::await_via_functor(await_via_functor&& rhs) noexcept : m_ctx(rhs.m_ctx) { + rhs.m_ctx = nullptr; } -void await_via_context::operator()() noexcept { - try { - m_executor->enqueue(m_await_context.to_task()); - } catch (...) { - // If an exception is thrown, the task destructor sets an interrupt exception - // on the await_context and resumes the coroutine, causing a broken_task exception to be thrown. - // this is why we don't let this exception propagate, as it wil cause the coroutine to be resumed twice (UB) +await_via_functor ::~await_via_functor() noexcept { + if (m_ctx == nullptr) { + return; } + + m_ctx->set_interrupt(std::make_exception_ptr(errors::broken_task(consts::k_broken_task_exception_error_msg))); + m_ctx->resume(); } -void await_via_context::throw_if_interrupted() const { - m_await_context.throw_if_interrupted(); + +void await_via_functor::operator()() noexcept { + assert(m_ctx != nullptr); + const auto await_context = std::exchange(m_ctx, nullptr); + await_context->resume(); } /* @@ -131,8 +130,11 @@ when_any_context::when_any_context(std::shared_ptr when_any m_when_any_state(std::move(when_any_state)), m_index(index) {} void when_any_context::operator()() const noexcept { - assert(static_cast(m_when_any_state)); - m_when_any_state->on_result_ready(m_index); + const auto when_any_state = m_when_any_state; + const auto index = m_index; + + assert(static_cast(when_any_state)); + when_any_state->on_result_ready(index); } /* @@ -154,7 +156,7 @@ void consumer_context::clear() noexcept { } case consumer_status::await: { - storage::destroy(m_storage.await_context); + storage::destroy(m_storage.caller_handle); return; } @@ -169,7 +171,7 @@ void consumer_context::clear() noexcept { } case consumer_status::when_all: { - storage::destroy(m_storage.when_all_state); + storage::destroy(m_storage.when_all_ctx); return; } @@ -177,21 +179,26 @@ void consumer_context::clear() noexcept { storage::destroy(m_storage.when_any_ctx); return; } + + case consumer_status::shared: { + storage::destroy(m_storage.shared_ctx); + return; + } } assert(false); } -void consumer_context::set_await_context(await_context* await_context) noexcept { +void consumer_context::set_await_handle(coroutine_handle caller_handle) noexcept { assert(m_status == consumer_status::idle); m_status = consumer_status::await; - storage::build(m_storage.await_context, await_context); + storage::build(m_storage.caller_handle, caller_handle); } -void consumer_context::set_await_via_context(await_via_context* await_ctx) noexcept { +void consumer_context::set_await_via_context(await_via_context& await_ctx) noexcept { assert(m_status == consumer_status::idle); m_status = consumer_status::await_via; - storage::build(m_storage.await_via_ctx, await_ctx); + storage::build(m_storage.await_via_ctx, std::addressof(await_ctx)); } void consumer_context::set_wait_context(std::shared_ptr wait_ctx) noexcept { @@ -203,7 +210,7 @@ void consumer_context::set_wait_context(std::shared_ptr wait_ctx) void consumer_context::set_when_all_context(std::shared_ptr when_all_state) noexcept { assert(m_status == consumer_status::idle); m_status = consumer_status::when_all; - storage::build(m_storage.when_all_state, std::move(when_all_state)); + storage::build(m_storage.when_all_ctx, std::move(when_all_state)); } void consumer_context::set_when_any_context(std::shared_ptr when_any_ctx, size_t index) noexcept { @@ -212,32 +219,57 @@ void consumer_context::set_when_any_context(std::shared_ptr storage::build(m_storage.when_any_ctx, std::move(when_any_ctx), index); } -void consumer_context::operator()() noexcept { +void consumer_context::set_shared_context(std::weak_ptr shared_result_state) noexcept { + assert(m_status == consumer_status::idle); + m_status = consumer_status::shared; + storage::build(m_storage.shared_ctx, std::move(shared_result_state)); +} + +void consumer_context::resume_consumer() const noexcept { switch (m_status) { case consumer_status::idle: { return; } case consumer_status::await: { - return (*m_storage.await_context)(); + auto caller_handle = m_storage.caller_handle; + assert(static_cast(caller_handle)); + assert(!caller_handle.done()); + return caller_handle(); } case consumer_status::await_via: { - return (*m_storage.await_via_ctx)(); + const auto await_via_ctx_ptr = m_storage.await_via_ctx; + assert(await_via_ctx_ptr != nullptr); + return (*await_via_ctx_ptr)(); } case consumer_status::wait: { - return m_storage.wait_ctx->notify(); + const auto wait_ctx = m_storage.wait_ctx; + assert(static_cast(wait_ctx)); + return wait_ctx->notify(); } case consumer_status::when_all: { - return m_storage.when_all_state->on_result_ready(); + const auto when_all_ctx = m_storage.when_all_ctx; + assert(static_cast(when_all_ctx)); + return when_all_ctx->on_result_ready(); } case consumer_status::when_any: { - return m_storage.when_any_ctx(); + const auto when_any_ctx = m_storage.when_any_ctx; + return when_any_ctx(); + } + + case consumer_status::shared: { + const auto shared_state = m_storage.shared_ctx.lock(); + if (static_cast(shared_state)) { + shared_state->on_result_ready(); + } + + return; } } assert(false); -} +} \ No newline at end of file diff --git a/source/results/impl/result_state.cpp b/source/results/impl/result_state.cpp index e7e8958b..2b1ee19d 100644 --- a/source/results/impl/result_state.cpp +++ b/source/results/impl/result_state.cpp @@ -1,15 +1,16 @@ #include "concurrencpp/results/impl/result_state.h" +#include "concurrencpp/results/impl/shared_result_state.h" using concurrencpp::details::await_via_context; using concurrencpp::details::result_state_base; void result_state_base::assert_done() const noexcept { - assert(m_pc_state.load(std::memory_order_relaxed) == pc_state::producer); + assert(m_pc_state.load(std::memory_order_relaxed) == pc_state::producer_done); } void result_state_base::wait() { const auto state = m_pc_state.load(std::memory_order_acquire); - if (state == pc_state::producer) { + if (state == pc_state::producer_done) { return; } @@ -17,7 +18,7 @@ void result_state_base::wait() { m_consumer.set_wait_context(wait_ctx); auto expected_state = pc_state::idle; - const auto idle = m_pc_state.compare_exchange_strong(expected_state, pc_state::consumer, std::memory_order_acq_rel); + const auto idle = m_pc_state.compare_exchange_strong(expected_state, pc_state::consumer_set, std::memory_order_acq_rel); if (!idle) { assert_done(); @@ -28,16 +29,16 @@ void result_state_base::wait() { assert_done(); } -bool result_state_base::await(await_context& await_ctx) noexcept { +bool result_state_base::await(coroutine_handle caller_handle) noexcept { const auto state = m_pc_state.load(std::memory_order_acquire); - if (state == pc_state::producer) { + if (state == pc_state::producer_done) { return false; // don't suspend } - m_consumer.set_await_context(&await_ctx); + m_consumer.set_await_handle(caller_handle); auto expected_state = pc_state::idle; - const auto idle = m_pc_state.compare_exchange_strong(expected_state, pc_state::consumer, std::memory_order_acq_rel); + const auto idle = m_pc_state.compare_exchange_strong(expected_state, pc_state::consumer_set, std::memory_order_acq_rel); if (!idle) { assert_done(); @@ -59,14 +60,14 @@ bool result_state_base::await_via_ready(await_via_context& await_ctx, bool force bool result_state_base::await_via(await_via_context& await_ctx, bool force_rescheduling) noexcept { const auto state = m_pc_state.load(std::memory_order_acquire); - if (state == pc_state::producer) { + if (state == pc_state::producer_done) { return await_via_ready(await_ctx, force_rescheduling); } - m_consumer.set_await_via_context(&await_ctx); + m_consumer.set_await_via_context(await_ctx); auto expected_state = pc_state::idle; - const auto idle = m_pc_state.compare_exchange_strong(expected_state, pc_state::consumer, std::memory_order_acq_rel); + const auto idle = m_pc_state.compare_exchange_strong(expected_state, pc_state::consumer_set, std::memory_order_acq_rel); if (idle) { return true; @@ -78,14 +79,14 @@ bool result_state_base::await_via(await_via_context& await_ctx, bool force_resch void result_state_base::when_all(std::shared_ptr when_all_state) noexcept { const auto state = m_pc_state.load(std::memory_order_acquire); - if (state == pc_state::producer) { + if (state == pc_state::producer_done) { return when_all_state->on_result_ready(); } m_consumer.set_when_all_context(when_all_state); auto expected_state = pc_state::idle; - const auto idle = m_pc_state.compare_exchange_strong(expected_state, pc_state::consumer, std::memory_order_acq_rel); + const auto idle = m_pc_state.compare_exchange_strong(expected_state, pc_state::consumer_set, std::memory_order_acq_rel); if (idle) { return; @@ -97,14 +98,14 @@ void result_state_base::when_all(std::shared_ptr when_all_s concurrencpp::details::when_any_status result_state_base::when_any(std::shared_ptr when_any_state, size_t index) noexcept { const auto state = m_pc_state.load(std::memory_order_acquire); - if (state == pc_state::producer) { + if (state == pc_state::producer_done) { return when_any_status::result_ready; } m_consumer.set_when_any_context(std::move(when_any_state), index); auto expected_state = pc_state::idle; - const auto idle = m_pc_state.compare_exchange_strong(expected_state, pc_state::consumer, std::memory_order_acq_rel); + const auto idle = m_pc_state.compare_exchange_strong(expected_state, pc_state::consumer_set, std::memory_order_acq_rel); if (idle) { return when_any_status::set; @@ -117,13 +118,13 @@ concurrencpp::details::when_any_status result_state_base::when_any(std::shared_p } void result_state_base::try_rewind_consumer() noexcept { - const auto pc_state = this->m_pc_state.load(std::memory_order_acquire); - if (pc_state != pc_state::consumer) { + const auto pc_state = m_pc_state.load(std::memory_order_acquire); + if (pc_state != pc_state::consumer_set) { return; } - auto expected_consumer_state = pc_state::consumer; - const auto consumer = this->m_pc_state.compare_exchange_strong(expected_consumer_state, pc_state::idle, std::memory_order_acq_rel); + auto expected_consumer_state = pc_state::consumer_set; + const auto consumer = m_pc_state.compare_exchange_strong(expected_consumer_state, pc_state::idle, std::memory_order_acq_rel); if (!consumer) { assert_done(); @@ -133,15 +134,20 @@ void result_state_base::try_rewind_consumer() noexcept { m_consumer.clear(); } -void result_state_base::publish_result() noexcept { - const auto state_before = this->m_pc_state.exchange(pc_state::producer, std::memory_order_acq_rel); - - assert(state_before != pc_state::producer); +void result_state_base::share_result(std::weak_ptr shared_result_state) noexcept { + const auto state = m_pc_state.load(std::memory_order_acquire); + if (state == pc_state::producer_done) { + const auto shared_state = shared_result_state.lock(); + if (static_cast(shared_state)) { + shared_state->on_result_ready(); + } - if (state_before == pc_state::idle) { return; } - assert(state_before == pc_state::consumer); - m_consumer(); + m_consumer.set_shared_context(std::move(shared_result_state)); + + auto expected_state = pc_state::idle; + m_pc_state.compare_exchange_strong(expected_state, pc_state::consumer_set, std::memory_order_acq_rel); + // if m_pc_state is producer, anyway we don't have any consumers at this point. we can just bail early. } diff --git a/source/results/impl/shared_result_state.cpp b/source/results/impl/shared_result_state.cpp new file mode 100644 index 00000000..f76508d3 --- /dev/null +++ b/source/results/impl/shared_result_state.cpp @@ -0,0 +1,75 @@ +#include "concurrencpp/results/impl/shared_result_state.h" + +using concurrencpp::details::shared_result_state_base; + +/* + * shared_await_via_context + */ + +concurrencpp::details::shared_await_via_context::shared_await_via_context(std::shared_ptr executor) noexcept : await_context(executor) {} + +/* + * shared_result_state_base + */ + +void shared_result_state_base::await_impl(std::unique_lock& write_lock, shared_await_context& awaiter) noexcept { + assert(write_lock.owns_lock()); + + if (m_awaiters == nullptr) { + m_awaiters = &awaiter; + return; + } + + awaiter.next = m_awaiters; + m_awaiters = &awaiter; +} + +void shared_result_state_base::await_via_impl(std::unique_lock& write_lock, shared_await_via_context& awaiter) noexcept { + if (m_via_awaiters == nullptr) { + m_via_awaiters = &awaiter; + return; + } + + awaiter.next = m_via_awaiters; + m_via_awaiters = &awaiter; +} + +void shared_result_state_base::wait_impl(std::unique_lock& lock) noexcept { + assert(lock.owns_lock()); + m_condition.wait(lock, [this] { + return m_ready; + }); +} + +bool shared_result_state_base::wait_for_impl(std::unique_lock& write_lock, std::chrono::milliseconds ms) noexcept { + assert(write_lock.owns_lock()); + return m_condition.wait_for(write_lock, ms, [this] { + return m_ready; + }); +} + +void shared_result_state_base::notify_all(std::unique_lock& write_lock) noexcept { + assert(write_lock.owns_lock()); + + m_ready = true; + shared_await_context* awaiters = std::exchange(m_awaiters, nullptr); + shared_await_via_context* via_awaiters = std::exchange(m_via_awaiters, nullptr); + write_lock.unlock(); + + // unblock waiters + m_condition.notify_all(); + + // unblock awaiters that want to be resumed by an executor + while (via_awaiters != nullptr) { + const auto next = via_awaiters->next; + via_awaiters->await_context(); + via_awaiters = next; + } + + // unblock synchronous awaiters + while (awaiters != nullptr) { + const auto next = awaiters->next; + awaiters->caller_handle(); + awaiters = next; + } +} \ No newline at end of file diff --git a/source/results/promises.cpp b/source/results/promises.cpp index a54981d2..96cb0f5c 100644 --- a/source/results/promises.cpp +++ b/source/results/promises.cpp @@ -5,10 +5,17 @@ using concurrencpp::details::coroutine_per_thread_data; thread_local coroutine_per_thread_data coroutine_per_thread_data::s_tl_per_thread_data; -void concurrencpp::details::initial_accumulating_awaiter::await_suspend(details::coroutine_handle handle) const noexcept { +void concurrencpp::details::initial_accumulating_awaiter::await_suspend(coroutine_handle handle) noexcept { + m_await_via_context.set_coro_handle(handle); + auto& per_thread_data = coroutine_per_thread_data::s_tl_per_thread_data; auto accumulator = std::exchange(per_thread_data.accumulator, nullptr); assert(accumulator != nullptr); - accumulator->emplace_back(handle); + assert(accumulator->capacity() > accumulator->size()); // so it's always noexcept + accumulator->emplace_back(m_await_via_context.get_functor()); +} + +void concurrencpp::details::initial_accumulating_awaiter::await_resume() { + m_await_via_context.throw_if_interrupted(); } diff --git a/source/task.cpp b/source/task.cpp index e0ec1ae1..50190049 100644 --- a/source/task.cpp +++ b/source/task.cpp @@ -14,6 +14,9 @@ namespace concurrencpp::details { coroutine_handle m_coro_handle; public: + coroutine_handle_wrapper(const coroutine_handle_wrapper&) = delete; + coroutine_handle_wrapper& operator=(const coroutine_handle_wrapper&) = delete; + coroutine_handle_wrapper(coroutine_handle coro_handle) noexcept : m_coro_handle(coro_handle) {} coroutine_handle_wrapper(coroutine_handle_wrapper&& rhs) noexcept : m_coro_handle(rhs.m_coro_handle) { @@ -30,7 +33,6 @@ namespace concurrencpp::details { void execute_destroy() noexcept { m_coro_handle(); - m_coro_handle = {}; } void operator()() noexcept { diff --git a/source/threads/thread.cpp b/source/threads/thread.cpp index 27bbd249..0cb9a9b2 100644 --- a/source/threads/thread.cpp +++ b/source/threads/thread.cpp @@ -60,7 +60,7 @@ void thread::set_name(std::string_view name) noexcept { ::pthread_setname_np(::pthread_self(), name.data()); } -#elif defined(CRCPP_MACH_OS) +#elif defined(CRCPP_MAC_OS) # include diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 817ea959..92dd0a5f 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -10,7 +10,8 @@ option(ENABLE_THREAD_SANITIZER "\ Build concurrencpp with LLVM thread sanitizer. \ Does not have an effect if the compiler is not Clang based." OFF) -if(ENABLE_THREAD_SANITIZER AND CXX_COMPILER_ID MATCHES "Clang") +string(TOLOWER ${CMAKE_CXX_COMPILER_ID} compiler_id) +if(ENABLE_THREAD_SANITIZER AND compiler_id MATCHES "clang") # Instead of polluting the lists file, we inject a command definition # override that will apply the sanitizer flag set(CMAKE_PROJECT_concurrencpp_INCLUDE @@ -38,7 +39,6 @@ set(test_sources source/tests/all_tests.cpp source/tests/coroutine_tests/coroutine_promises_test.cpp source/tests/coroutine_tests/coroutines_tests.cpp - source/tests/executor_tests/derivable_executor_tests.cpp source/tests/executor_tests/inline_executor_tests.cpp source/tests/executor_tests/manual_executor_tests.cpp source/tests/executor_tests/thread_executor_tests.cpp @@ -49,6 +49,9 @@ set(test_sources source/tests/result_tests/result_promise_tests.cpp source/tests/result_tests/result_resolve_tests.cpp source/tests/result_tests/result_tests.cpp + source/tests/result_tests/shared_result_tests.cpp + source/tests/result_tests/shared_result_resolve_tests.cpp + source/tests/result_tests/shared_result_await_tests.cpp source/tests/result_tests/when_all_tests.cpp source/tests/result_tests/when_any_tests.cpp source/tests/timer_tests/timer_queue_tests.cpp @@ -66,7 +69,8 @@ set(test_headers include/tests/test_utils/executor_shutdowner.h include/tests/test_utils/result_factory.h include/tests/test_utils/throwing_executor.h - include/tests/test_utils/test_ready_result.h) + include/tests/test_utils/test_ready_result.h + include/tests/test_utils/make_result_array.h) function(temp_test NAMESPACE NAME) set(target ${NAMESPACE}_${NAME}) @@ -87,12 +91,21 @@ if(NOT ENABLE_THREAD_SANITIZER) return() endif() -set(tsan_sources executors fibbonacci matrix_multiplication quick_sort result when_all when_any) +set(tsan_sources + executors + fibonacci + matrix_multiplication + quick_sort + result + shared_result + when_all + when_any) foreach(source IN LISTS tsan_sources) - temp_test(dummy tsan_${source} ${test_headers} source/thread_sanitizer/${source}.cpp) + temp_test(dummy tsan_${source} ${test_headers} source/thread_sanitizer/${source}.cpp source/helpers/assertions.cpp) endforeach() -return() + +#return() # add_test(NAMESPACE NAME [PROPERTIES ...]) # diff --git a/test/include/tests/all_tests.h b/test/include/tests/all_tests.h index eb3a34e9..084e359e 100644 --- a/test/include/tests/all_tests.h +++ b/test/include/tests/all_tests.h @@ -20,6 +20,10 @@ namespace concurrencpp::tests { void test_when_all(); void test_when_any(); + void test_shared_result(); + void test_shared_result_await_all(); + void test_shared_result_resolve_all(); + void test_coroutine_promises(); void test_coroutines(); diff --git a/test/include/tests/test_utils/make_result_array.h b/test/include/tests/test_utils/make_result_array.h new file mode 100644 index 00000000..e37a9b09 --- /dev/null +++ b/test/include/tests/test_utils/make_result_array.h @@ -0,0 +1,137 @@ +#ifndef CONCURRENCPP_MAKE_RESULT_ARRAY_H +#define CONCURRENCPP_MAKE_RESULT_ARRAY_H + +#include "concurrencpp/concurrencpp.h" +#include "tests/test_utils/test_ready_result.h" + +#include +#include + +namespace concurrencpp::tests { + template + std::vector> make_result_array(size_t count, + std::chrono::time_point tp, + std::shared_ptr te, + converter_type converter) { + std::vector> results; + results.reserve(count); + + for (size_t i = 0; i < count; i++) { + results.emplace_back(te->submit([tp, i, &converter]() mutable -> type { + std::this_thread::sleep_until(tp); + return converter(i); + })); + } + + return results; + } + + template + std::vector> make_exceptional_array(size_t count, + std::chrono::time_point tp, + std::shared_ptr te, + converter_type converter) { + std::vector> results; + results.reserve(count); + + for (size_t i = 0; i < count; i++) { + results.emplace_back(te->submit([tp, i, converter]() mutable -> type { + std::this_thread::sleep_until(tp); + + throw costume_exception(i); + + return converter(i); + })); + } + + return results; + } + + template + void test_result_array(std::vector> results, consumer_type&& consumer, converter_type&& converter) { + for (size_t i = 0; i < results.size(); i++) { + if constexpr (!std::is_same_v) { + test_ready_result(consumer(std::move(results[i])), converter(i)); + } else { + test_ready_result(consumer(std::move(results[i]))); + } + } + } + + template + void test_exceptional_array(std::vector> results, consumer_type&& consumer) { + for (size_t i = 0; i < results.size(); i++) { + test_ready_result_costume_exception(consumer(std::move(results[i])), i); + } + } + + template + void test_shared_result_array(std::vector> results, consumer_type&& consumer, converter_type&& converter) { + for (size_t i = 0; i < results.size(); i++) { + if constexpr (!std::is_same_v) { + test_ready_result(consumer(std::move(results[i])), converter(i)); + } else { + test_ready_result(consumer(std::move(results[i]))); + } + } + } + + template + void test_shared_result_exceptional_array(std::vector> results, consumer_type&& consumer) { + for (size_t i = 0; i < results.size(); i++) { + test_ready_result_costume_exception(consumer(std::move(results[i])), i); + } + } + + template + struct converter { + type operator()(size_t) const noexcept { + return 0; + } + }; + + template<> + struct converter { + size_t operator()(size_t i) const noexcept { + return i; + } + }; + + template<> + struct converter { + std::string operator()(size_t i) const { + return std::to_string(i); + } + }; + + template<> + struct converter { + void operator()(size_t) const noexcept {} + }; + + template<> + class converter { + + private: + std::shared_ptr> array = std::make_shared>(); + + public: + size_t& operator()(size_t i) const { + return (*array)[i % 16]; + } + }; + + template<> + class converter { + + private: + std::shared_ptr> array = std::make_shared>(); + + public: + std::string& operator()(size_t i) { + return (*array)[i % 16]; + } + }; +} // namespace concurrencpp::tests + +#endif \ No newline at end of file diff --git a/test/include/tests/test_utils/test_ready_result.h b/test/include/tests/test_utils/test_ready_result.h index 733e542e..8dd0c9e8 100644 --- a/test/include/tests/test_utils/test_ready_result.h +++ b/test/include/tests/test_utils/test_ready_result.h @@ -6,6 +6,19 @@ #include "result_factory.h" #include "helpers/assertions.h" +#include + +namespace concurrencpp::tests { + template + void test_same_ref_shared_result(::concurrencpp::shared_result& result) noexcept { + const auto value_ptr = std::addressof(result.get()); + + for (size_t i = 0; i < 8; i++) { + assert_equal(value_ptr, std::addressof(result.get())); + } + } +} // namespace concurrencpp::tests + namespace concurrencpp::tests { struct costume_exception : public std::exception { const intptr_t id; @@ -27,6 +40,20 @@ namespace concurrencpp::tests { } } + template + void test_ready_result(::concurrencpp::shared_result result, const type& o) { + assert_true(static_cast(result)); + assert_equal(result.status(), concurrencpp::result_status::value); + + try { + assert_equal(result.get(), o); + } catch (...) { + assert_true(false); + } + + test_same_ref_shared_result(result); + } + template void test_ready_result(::concurrencpp::result result, std::reference_wrapper> ref) { assert_true(static_cast(result)); @@ -39,6 +66,20 @@ namespace concurrencpp::tests { } } + template + void test_ready_result(::concurrencpp::shared_result result, std::reference_wrapper> ref) { + assert_true(static_cast(result)); + assert_equal(result.status(), concurrencpp::result_status::value); + + try { + assert_equal(&result.get(), &ref.get()); + } catch (...) { + assert_true(false); + } + + test_same_ref_shared_result(result); + } + template void test_ready_result(::concurrencpp::result result) { test_ready_result(std::move(result), result_factory::get()); @@ -56,6 +97,32 @@ namespace concurrencpp::tests { } } + template + void test_ready_result(::concurrencpp::shared_result result) { + assert_true(static_cast(result)); + assert_equal(result.status(), concurrencpp::result_status::value); + + try { + assert_equal(result.get(), result_factory::get()); + } catch (...) { + assert_true(false); + } + + test_same_ref_shared_result(result); + } + + template<> + inline void test_ready_result(::concurrencpp::shared_result result) { + assert_true(static_cast(result)); + assert_equal(result.status(), concurrencpp::result_status::value); + + try { + result.get(); // just make sure no exception is thrown. + } catch (...) { + assert_true(false); + } + } + template void test_ready_result_costume_exception(concurrencpp::result result, const intptr_t id) { assert_true(static_cast(result)); @@ -70,6 +137,27 @@ namespace concurrencpp::tests { assert_true(false); } + + template + void test_ready_result_costume_exception(concurrencpp::shared_result result, const intptr_t id) { + assert_true(static_cast(result)); + assert_equal(result.status(), concurrencpp::result_status::exception); + + for (size_t i = 0; i < 10; i++) { + try { + result.get(); + } catch (costume_exception e) { + assert_equal(e.id, id); + if (i == 9) { + return; + } + + } catch (...) { + } + } + + assert_true(false); + } } // namespace concurrencpp::tests #endif diff --git a/test/source/tests/all_tests.cpp b/test/source/tests/all_tests.cpp index 504b5373..d397b0df 100644 --- a/test/source/tests/all_tests.cpp +++ b/test/source/tests/all_tests.cpp @@ -12,6 +12,11 @@ void concurrencpp::tests::test_all() { test_result(); test_result_resolve_all(); test_result_await_all(); + + test_shared_result(); + test_shared_result_await_all(); + test_shared_result_resolve_all(); + test_result_promise(); test_make_result(); diff --git a/test/source/tests/executor_tests/derivable_executor_tests.cpp b/test/source/tests/executor_tests/derivable_executor_tests.cpp deleted file mode 100644 index 127babbe..00000000 --- a/test/source/tests/executor_tests/derivable_executor_tests.cpp +++ /dev/null @@ -1,339 +0,0 @@ -#include "concurrencpp/concurrencpp.h" -#include "tests/all_tests.h" - -#include "tester/tester.h" -#include "helpers/assertions.h" -#include "helpers/object_observer.h" -#include "tests/test_utils/result_factory.h" - -namespace concurrencpp::tests { - template - void test_derivable_executor_post_impl(std::shared_ptr executor, std::counting_semaphore<>& semaphore); - void test_derivable_executor_post(); - - template - void test_derivable_executor_submit_impl(std::shared_ptr executor); - void test_derivable_executor_submit(); - - template - void test_derivable_executor_bulk_post_impl(std::shared_ptr executor, std::counting_semaphore<>& semaphore); - void test_derivable_executor_bulk_post(); - - template - void test_derivable_executor_bulk_submit_impl(std::shared_ptr executor); - void test_derivable_executor_bulk_submit(); -} // namespace concurrencpp::tests - -template -void concurrencpp::tests::test_derivable_executor_post_impl(std::shared_ptr executor, std::counting_semaphore<>& semaphore) { - executor->post([&semaphore] { - semaphore.release(); - return result_factory::get(); - }); - - executor->post([&semaphore] { - semaphore.release(); - return result_factory::throw_ex(); - }); - - executor->post([&semaphore] { - semaphore.release(); - return result_factory::get(); - }); - - executor->post([&semaphore] { - semaphore.release(); - return result_factory::throw_ex(); - }); - - executor->post([&semaphore] { - semaphore.release(); - return result_factory::get(); - }); - - executor->post([&semaphore] { - semaphore.release(); - return result_factory::throw_ex(); - }); - - executor->post([&semaphore]() -> int& { - semaphore.release(); - return result_factory::get(); - }); - - executor->post([&semaphore]() -> int& { - semaphore.release(); - return result_factory::throw_ex(); - }); - - executor->post([&semaphore]() -> std::string& { - semaphore.release(); - return result_factory::get(); - }); - - executor->post([&semaphore]() -> std::string& { - semaphore.release(); - return result_factory::throw_ex(); - }); -} - -void concurrencpp::tests::test_derivable_executor_post() { - concurrencpp::runtime runtime; - std::counting_semaphore<> semaphore(0); - auto worker_thread_executor = runtime.make_worker_thread_executor(); - - test_derivable_executor_post_impl(runtime.inline_executor(), semaphore); - test_derivable_executor_post_impl(runtime.thread_executor(), semaphore); - test_derivable_executor_post_impl(runtime.thread_pool_executor(), semaphore); - test_derivable_executor_post_impl(worker_thread_executor, semaphore); - - for (size_t i = 0; i < 40; i++) { - assert_true(semaphore.try_acquire_for(std::chrono::seconds(10))); - } -} - -template -void concurrencpp::tests::test_derivable_executor_submit_impl(std::shared_ptr executor) { - auto int_res_val = executor->submit([] { - return result_factory::get(); - }); - - static_assert(std::is_same_v>); - - auto int_res_ex = executor->submit([] { - return result_factory::throw_ex(); - }); - - static_assert(std::is_same_v>); - - auto str_res_val = executor->submit([] { - return result_factory::get(); - }); - - static_assert(std::is_same_v>); - - auto str_res_ex = executor->submit([] { - return result_factory::throw_ex(); - }); - - static_assert(std::is_same_v>); - - auto void_res_val = executor->submit([] { - return result_factory::get(); - }); - - static_assert(std::is_same_v>); - - auto void_res_ex = executor->submit([] { - return result_factory::throw_ex(); - }); - - static_assert(std::is_same_v>); - - auto int_ref_res_val = executor->submit([]() -> int& { - return result_factory::get(); - }); - - static_assert(std::is_same_v>); - - auto int_ref_res_ex = executor->submit([]() -> int& { - return result_factory::throw_ex(); - }); - - static_assert(std::is_same_v>); - - auto str_ref_res_val = executor->submit([]() -> std::string& { - return result_factory::get(); - }); - - static_assert(std::is_same_v>); - - auto str_ref_res_ex = executor->submit([]() -> std::string& { - return result_factory::throw_ex(); - }); - - static_assert(std::is_same_v>); - - assert_equal(int_res_val.get(), result_factory::get()); - assert_equal(str_res_val.get(), result_factory::get()); - - void_res_val.get(); - - assert_equal(&int_ref_res_val.get(), &result_factory::get()); - assert_equal(&str_ref_res_val.get(), &result_factory::get()); - - assert_throws([&] { - int_res_ex.get(); - }); - - assert_throws([&] { - str_res_ex.get(); - }); - - assert_throws([&] { - void_res_ex.get(); - }); - - assert_throws([&] { - int_ref_res_ex.get(); - }); - - assert_throws([&] { - str_ref_res_ex.get(); - }); -} - -void concurrencpp::tests::test_derivable_executor_submit() { - concurrencpp::runtime runtime; - auto worker_thread_executor = runtime.make_worker_thread_executor(); -} - -namespace concurrencpp::tests { - template - struct result_producer { - std::counting_semaphore<>* semaphore; - const bool terminate_by_exception; - - result_producer(bool terminate_by_exception) noexcept : semaphore(nullptr), terminate_by_exception(terminate_by_exception) {} - - result_producer(bool terminate_by_exception, std::counting_semaphore<>* semaphore) noexcept : - semaphore(semaphore), terminate_by_exception(terminate_by_exception) {} - - type operator()() { - if (semaphore != nullptr) { - semaphore->release(); - } - - if (terminate_by_exception) { - return result_factory::throw_ex(); - } - - return result_factory::get(); - } - }; -} // namespace concurrencpp::tests - -template -void concurrencpp::tests::test_derivable_executor_bulk_post_impl(std::shared_ptr executor, std::counting_semaphore<>& semaphore) { - result_producer int_fun[2] = {{false, &semaphore}, {true, &semaphore}}; - executor->bulk_post(std::span> {int_fun}); - - result_producer str_fun[2] = {{false, &semaphore}, {true, &semaphore}}; - executor->bulk_post(std::span> {str_fun}); - - result_producer void_fun[2] = {{false, &semaphore}, {true, &semaphore}}; - executor->bulk_post(std::span> {void_fun}); - - result_producer int_ref_fun[2] = {{false, &semaphore}, {true, &semaphore}}; - executor->bulk_post(std::span> {int_ref_fun}); - - result_producer str_ref_fun[2] = {{false, &semaphore}, {true, &semaphore}}; - executor->bulk_post(std::span> {str_ref_fun}); -} - -void concurrencpp::tests::test_derivable_executor_bulk_post() { - concurrencpp::runtime runtime; - std::counting_semaphore<> semaphore(0); - auto worker_thread_executor = runtime.make_worker_thread_executor(); - - test_derivable_executor_bulk_post_impl(runtime.inline_executor(), semaphore); - test_derivable_executor_bulk_post_impl(runtime.thread_executor(), semaphore); - test_derivable_executor_bulk_post_impl(runtime.thread_pool_executor(), semaphore); - test_derivable_executor_bulk_post_impl(worker_thread_executor, semaphore); - - for (size_t i = 0; i < 40; i++) { - assert_true(semaphore.try_acquire_for(std::chrono::seconds(10))); - } -} - -template -void concurrencpp::tests::test_derivable_executor_bulk_submit_impl(std::shared_ptr executor) { - result_producer int_fun[2] = {{false}, {true}}; - auto int_res_vec = executor->bulk_submit(std::span> {int_fun}); - - static_assert(std::is_same_v>>); - - auto& int_res_val = int_res_vec[0]; - auto& int_res_ex = int_res_vec[1]; - - result_producer str_fun[2] = {{false}, {true}}; - auto str_res_vec = executor->bulk_submit(std::span> {str_fun}); - - static_assert(std::is_same_v>>); - - auto& str_res_val = str_res_vec[0]; - auto& str_res_ex = str_res_vec[1]; - - result_producer void_fun[2] = {{false}, {true}}; - auto void_res_vec = executor->bulk_submit(std::span> {void_fun}); - - static_assert(std::is_same_v>>); - - auto& void_res_val = void_res_vec[0]; - auto& void_res_ex = void_res_vec[1]; - - result_producer int_ref_fun[2] = {{false}, {true}}; - auto int_ref_res_vec = executor->bulk_submit(std::span> {int_ref_fun}); - - static_assert(std::is_same_v>>); - - auto& int_ref_res_val = int_ref_res_vec[0]; - auto& int_ref_res_ex = int_ref_res_vec[1]; - - result_producer str_ref_fun[2] = {{false}, {true}}; - auto str_ref_res_vec = executor->bulk_submit(std::span> {str_ref_fun}); - - static_assert(std::is_same_v>>); - - auto& str_ref_res_val = str_ref_res_vec[0]; - auto& str_ref_res_ex = str_ref_res_vec[1]; - - assert_equal(int_res_val.get(), result_factory::get()); - assert_equal(str_res_val.get(), result_factory::get()); - - void_res_val.get(); - - assert_equal(&int_ref_res_val.get(), &result_factory::get()); - assert_equal(&str_ref_res_val.get(), &result_factory::get()); - - assert_throws([&] { - int_res_ex.get(); - }); - - assert_throws([&] { - str_res_ex.get(); - }); - - assert_throws([&] { - void_res_ex.get(); - }); - - assert_throws([&] { - int_ref_res_ex.get(); - }); - - assert_throws([&] { - str_ref_res_ex.get(); - }); -} - -void concurrencpp::tests::test_derivable_executor_bulk_submit() { - concurrencpp::runtime runtime; - auto worker_thread_executor = runtime.make_worker_thread_executor(); - - test_derivable_executor_bulk_submit_impl(runtime.inline_executor()); - test_derivable_executor_bulk_submit_impl(runtime.thread_executor()); - test_derivable_executor_bulk_submit_impl(runtime.thread_pool_executor()); - test_derivable_executor_bulk_submit_impl(worker_thread_executor); -} - -void concurrencpp::tests::test_derivable_executor() { - tester tester("derivable_executor test"); - - tester.add_step("post", test_derivable_executor_post); - tester.add_step("submit", test_derivable_executor_submit); - tester.add_step("bulk_post", test_derivable_executor_bulk_post); - tester.add_step("bulk_submit", test_derivable_executor_bulk_submit); - - tester.launch_test(); -} diff --git a/test/source/tests/executor_tests/inline_executor_tests.cpp b/test/source/tests/executor_tests/inline_executor_tests.cpp index fbef15ab..78a9b500 100644 --- a/test/source/tests/executor_tests/inline_executor_tests.cpp +++ b/test/source/tests/executor_tests/inline_executor_tests.cpp @@ -123,14 +123,14 @@ void concurrencpp::tests::test_inline_executor_submit_foreign() { results[i] = executor->submit(observer.get_testing_stub(i)); } - assert_equal(observer.get_execution_count(), task_count); - assert_equal(observer.get_destruction_count(), task_count); - assert_executed_inline(observer.get_execution_map()); - for (size_t i = 0; i < task_count; i++) { assert_equal(results[i].status(), result_status::value); assert_equal(results[i].get(), i); } + + assert_equal(observer.get_execution_count(), task_count); + assert_equal(observer.get_destruction_count(), task_count); + assert_executed_inline(observer.get_execution_map()); } void concurrencpp::tests::test_inline_executor_submit_inline() { @@ -142,6 +142,7 @@ void concurrencpp::tests::test_inline_executor_submit_inline() { auto results_res = executor->submit([executor, &observer] { std::vector> results; results.resize(task_count); + for (size_t i = 0; i < task_count; i++) { results[i] = executor->submit(observer.get_testing_stub(i)); } @@ -149,20 +150,20 @@ void concurrencpp::tests::test_inline_executor_submit_inline() { return results; }); - assert_equal(observer.get_execution_count(), task_count); - assert_equal(observer.get_destruction_count(), task_count); - assert_executed_inline(observer.get_execution_map()); - auto results = results_res.get(); for (size_t i = 0; i < task_count; i++) { assert_equal(results[i].status(), result_status::value); assert_equal(results[i].get(), i); } + + assert_equal(observer.get_execution_count(), task_count); + assert_equal(observer.get_destruction_count(), task_count); + assert_executed_inline(observer.get_execution_map()); } void concurrencpp::tests::test_inline_executor_submit() { - test_inline_executor_submit_inline(); test_inline_executor_submit_foreign(); + test_inline_executor_submit_inline(); } void concurrencpp::tests::test_inline_executor_bulk_post_foreign() { @@ -227,15 +228,14 @@ void concurrencpp::tests::test_inline_executor_bulk_submit_foreign() { } auto results = executor->bulk_submit(stubs); - - assert_equal(observer.get_execution_count(), task_count); - assert_equal(observer.get_destruction_count(), task_count); - assert_executed_inline(observer.get_execution_map()); - for (size_t i = 0; i < task_count; i++) { assert_equal(results[i].status(), result_status::value); assert_equal(results[i].get(), i); } + + assert_equal(observer.get_execution_count(), task_count); + assert_equal(observer.get_destruction_count(), task_count); + assert_executed_inline(observer.get_execution_map()); } void concurrencpp::tests::test_inline_executor_bulk_submit_inline() { @@ -255,15 +255,15 @@ void concurrencpp::tests::test_inline_executor_bulk_submit_inline() { return executor->bulk_submit(stubs); }); - assert_equal(observer.get_execution_count(), task_count); - assert_equal(observer.get_destruction_count(), task_count); - assert_executed_inline(observer.get_execution_map()); - auto results = results_res.get(); for (size_t i = 0; i < task_count; i++) { assert_equal(results[i].status(), result_status::value); assert_equal(results[i].get(), i); } + + assert_equal(observer.get_execution_count(), task_count); + assert_equal(observer.get_destruction_count(), task_count); + assert_executed_inline(observer.get_execution_map()); } void concurrencpp::tests::test_inline_executor_bulk_submit() { diff --git a/test/source/tests/executor_tests/manual_executor_tests.cpp b/test/source/tests/executor_tests/manual_executor_tests.cpp index b3a14528..e736c9e4 100644 --- a/test/source/tests/executor_tests/manual_executor_tests.cpp +++ b/test/source/tests/executor_tests/manual_executor_tests.cpp @@ -244,13 +244,12 @@ void concurrencpp::tests::test_manual_executor_submit_foreign() { for (size_t i = 0; i < task_count / 2; i++) { assert_true(executor->loop_once()); + assert_equal(results[i].get(), i); assert_equal(observer.get_execution_count(), i + 1); assert_equal(observer.get_destruction_count(), i + 1); - assert_equal(results[i].get(), i); } executor->shutdown(); - assert_equal(observer.get_destruction_count(), task_count); assert_executed_locally(observer.get_execution_map()); @@ -259,6 +258,8 @@ void concurrencpp::tests::test_manual_executor_submit_foreign() { results[i].get(); }); } + + assert_equal(observer.get_destruction_count(), task_count); } void concurrencpp::tests::test_manual_executor_submit_inline() { @@ -295,13 +296,12 @@ void concurrencpp::tests::test_manual_executor_submit_inline() { for (size_t i = 0; i < task_count / 2; i++) { assert_true(executor->loop_once()); + assert_equal(results[i].get(), i); assert_equal(observer.get_execution_count(), i + 1); assert_equal(observer.get_destruction_count(), i + 1); - assert_equal(results[i].get(), i); } executor->shutdown(); - assert_equal(observer.get_destruction_count(), task_count); assert_executed_locally(observer.get_execution_map()); @@ -310,6 +310,8 @@ void concurrencpp::tests::test_manual_executor_submit_inline() { results[i].get(); }); } + + assert_equal(observer.get_destruction_count(), task_count); } void concurrencpp::tests::test_manual_executor_submit() { @@ -417,13 +419,12 @@ void concurrencpp::tests::test_manual_executor_bulk_submit_foreign() { for (size_t i = 0; i < task_count / 2; i++) { assert_true(executor->loop_once()); + assert_equal(results[i].get(), i); assert_equal(observer.get_execution_count(), i + 1); assert_equal(observer.get_destruction_count(), i + 1); - assert_equal(results[i].get(), i); } executor->shutdown(); - assert_equal(observer.get_destruction_count(), task_count); assert_executed_locally(observer.get_execution_map()); @@ -432,6 +433,8 @@ void concurrencpp::tests::test_manual_executor_bulk_submit_foreign() { results[i].get(); }); } + + assert_equal(observer.get_destruction_count(), task_count); } void concurrencpp::tests::test_manual_executor_bulk_submit_inline() { @@ -467,13 +470,12 @@ void concurrencpp::tests::test_manual_executor_bulk_submit_inline() { for (size_t i = 0; i < task_count / 2; i++) { assert_true(executor->loop_once()); + assert_equal(results[i].get(), i); assert_equal(observer.get_execution_count(), i + 1); assert_equal(observer.get_destruction_count(), i + 1); - assert_equal(results[i].get(), i); } executor->shutdown(); - assert_equal(observer.get_destruction_count(), task_count); assert_executed_locally(observer.get_execution_map()); @@ -482,6 +484,8 @@ void concurrencpp::tests::test_manual_executor_bulk_submit_inline() { results[i].get(); }); } + + assert_equal(observer.get_destruction_count(), task_count); } void concurrencpp::tests::test_manual_executor_bulk_submit() { @@ -514,10 +518,10 @@ void concurrencpp::tests::test_manual_executor_loop_once() { for (size_t i = 0; i < task_count; i++) { assert_true(executor->loop_once()); + assert_equal(results[i].get(), i); assert_equal(observer.get_execution_count(), i + 1); assert_equal(observer.get_destruction_count(), i + 1); assert_equal(executor->size(), task_count - (i + 1)); - assert_equal(results[i].get(), i); } assert_executed_locally(observer.get_execution_map()); @@ -720,7 +724,6 @@ void concurrencpp::tests::test_manual_executor_loop() { const auto total_executed = (i + 1) * chunk_size; assert_equal(observer.get_execution_count(), total_executed); - assert_equal(observer.get_destruction_count(), total_executed); assert_equal(executor->size(), task_count - total_executed); } @@ -729,7 +732,6 @@ void concurrencpp::tests::test_manual_executor_loop() { assert_equal(executed, remained); assert_equal(observer.get_execution_count(), task_count); - assert_equal(observer.get_destruction_count(), task_count); assert_true(executor->empty()); assert_equal(executor->size(), static_cast(0)); @@ -741,6 +743,8 @@ void concurrencpp::tests::test_manual_executor_loop() { for (size_t i = 0; i < task_count; i++) { assert_equal(results[i].get(), i); } + + assert_equal(observer.get_destruction_count(), task_count); } void concurrencpp::tests::test_manual_executor_loop_for() { @@ -942,7 +946,6 @@ void concurrencpp::tests::test_manual_executor_clear() { assert_true(executor->empty()); assert_equal(executor->size(), static_cast(0)); assert_equal(observer.get_execution_count(), static_cast(0)); - assert_equal(observer.get_destruction_count(), task_count); for (auto& result : results) { assert_throws([&result]() mutable { @@ -950,6 +953,8 @@ void concurrencpp::tests::test_manual_executor_clear() { }); } + assert_equal(observer.get_destruction_count(), task_count); + assert_equal(executor->clear(), static_cast(0)); } diff --git a/test/source/tests/executor_tests/thread_executor_tests.cpp b/test/source/tests/executor_tests/thread_executor_tests.cpp index 19f508ab..a08d0614 100644 --- a/test/source/tests/executor_tests/thread_executor_tests.cpp +++ b/test/source/tests/executor_tests/thread_executor_tests.cpp @@ -159,14 +159,14 @@ void concurrencpp::tests::test_thread_executor_submit_foreign() { results[i] = executor->submit(observer.get_testing_stub(i)); } + for (size_t i = 0; i < task_count; i++) { + assert_equal(results[i].get(), i); + } + assert_true(observer.wait_execution_count(task_count, std::chrono::minutes(1))); assert_true(observer.wait_destruction_count(task_count, std::chrono::minutes(1))); assert_unique_execution_threads(observer.get_execution_map(), task_count); - - for (size_t i = 0; i < task_count; i++) { - assert_equal(results[i].get(), i); - } } void concurrencpp::tests::test_thread_executor_submit_inline() { @@ -185,15 +185,15 @@ void concurrencpp::tests::test_thread_executor_submit_inline() { return results; }); - assert_true(observer.wait_execution_count(task_count, std::chrono::minutes(1))); - assert_true(observer.wait_destruction_count(task_count, std::chrono::minutes(1))); - - assert_unique_execution_threads(observer.get_execution_map(), task_count); - auto results = results_res.get(); for (size_t i = 0; i < task_count; i++) { assert_equal(results[i].get(), i); } + + assert_true(observer.wait_execution_count(task_count, std::chrono::minutes(1))); + assert_true(observer.wait_destruction_count(task_count, std::chrono::minutes(1))); + + assert_unique_execution_threads(observer.get_execution_map(), task_count); } void concurrencpp::tests::test_thread_executor_submit() { @@ -265,14 +265,14 @@ void concurrencpp::tests::test_thread_executor_bulk_submit_foreign() { auto results = executor->bulk_submit(stubs); + for (size_t i = 0; i < task_count; i++) { + assert_equal(results[i].get(), i); + } + assert_true(observer.wait_execution_count(task_count, std::chrono::minutes(1))); assert_true(observer.wait_destruction_count(task_count, std::chrono::minutes(1))); assert_unique_execution_threads(observer.get_execution_map(), task_count); - - for (size_t i = 0; i < task_count; i++) { - assert_equal(results[i].get(), i); - } } void concurrencpp::tests::test_thread_executor_bulk_submit_inline() { @@ -292,15 +292,15 @@ void concurrencpp::tests::test_thread_executor_bulk_submit_inline() { return executor->bulk_submit(stubs); }); - assert_true(observer.wait_execution_count(task_count, std::chrono::minutes(1))); - assert_true(observer.wait_destruction_count(task_count, std::chrono::minutes(1))); - - assert_unique_execution_threads(observer.get_execution_map(), task_count); - auto results = results_res.get(); for (size_t i = 0; i < task_count; i++) { assert_equal(results[i].get(), i); } + + assert_true(observer.wait_execution_count(task_count, std::chrono::minutes(1))); + assert_true(observer.wait_destruction_count(task_count, std::chrono::minutes(1))); + + assert_unique_execution_threads(observer.get_execution_map(), task_count); } void concurrencpp::tests::test_thread_executor_bulk_submit() { diff --git a/test/source/tests/executor_tests/thread_pool_executor_tests.cpp b/test/source/tests/executor_tests/thread_pool_executor_tests.cpp index 64023780..fa16d52d 100644 --- a/test/source/tests/executor_tests/thread_pool_executor_tests.cpp +++ b/test/source/tests/executor_tests/thread_pool_executor_tests.cpp @@ -165,12 +165,12 @@ void concurrencpp::tests::test_thread_pool_executor_submit_foreign() { results[i] = executor->submit(observer.get_testing_stub(i)); } - assert_true(observer.wait_execution_count(task_count, std::chrono::minutes(2))); - assert_true(observer.wait_destruction_count(task_count, std::chrono::minutes(2))); - for (size_t i = 0; i < task_count; i++) { assert_equal(results[i].get(), i); } + + assert_true(observer.wait_execution_count(task_count, std::chrono::minutes(2))); + assert_true(observer.wait_destruction_count(task_count, std::chrono::minutes(2))); } void concurrencpp::tests::test_thread_pool_executor_submit_inline() { @@ -191,13 +191,13 @@ void concurrencpp::tests::test_thread_pool_executor_submit_inline() { return results; }); - assert_true(observer.wait_execution_count(task_count, std::chrono::minutes(2))); - assert_true(observer.wait_destruction_count(task_count, std::chrono::minutes(2))); - auto results = results_res.get(); for (size_t i = 0; i < task_count; i++) { assert_equal(results[i].get(), static_cast(i)); } + + assert_true(observer.wait_execution_count(task_count, std::chrono::minutes(2))); + assert_true(observer.wait_destruction_count(task_count, std::chrono::minutes(2))); } void concurrencpp::tests::test_thread_pool_executor_submit() { @@ -271,12 +271,12 @@ void concurrencpp::tests::test_thread_pool_executor_bulk_submit_foreign() { auto results = executor->bulk_submit(stubs); - assert_true(observer.wait_execution_count(task_count, std::chrono::minutes(2))); - assert_true(observer.wait_destruction_count(task_count, std::chrono::minutes(2))); - for (size_t i = 0; i < task_count; i++) { assert_equal(results[i].get(), i); } + + assert_true(observer.wait_execution_count(task_count, std::chrono::minutes(2))); + assert_true(observer.wait_destruction_count(task_count, std::chrono::minutes(2))); } void concurrencpp::tests::test_thread_pool_executor_bulk_submit_inline() { @@ -298,13 +298,13 @@ void concurrencpp::tests::test_thread_pool_executor_bulk_submit_inline() { return executor->bulk_submit(stubs); }); - assert_true(observer.wait_execution_count(task_count, std::chrono::minutes(2))); - assert_true(observer.wait_destruction_count(task_count, std::chrono::minutes(2))); - auto results = results_res.get(); for (size_t i = 0; i < task_count; i++) { assert_equal(results[i].get(), i); } + + assert_true(observer.wait_execution_count(task_count, std::chrono::minutes(2))); + assert_true(observer.wait_destruction_count(task_count, std::chrono::minutes(2))); } void concurrencpp::tests::test_thread_pool_executor_bulk_submit() { diff --git a/test/source/tests/executor_tests/worker_thread_executor_tests.cpp b/test/source/tests/executor_tests/worker_thread_executor_tests.cpp index 729adac7..f357cd04 100644 --- a/test/source/tests/executor_tests/worker_thread_executor_tests.cpp +++ b/test/source/tests/executor_tests/worker_thread_executor_tests.cpp @@ -152,13 +152,13 @@ void concurrencpp::tests::test_worker_thread_executor_submit_foreign() { results[i] = executor->submit(observer.get_testing_stub(i)); } - assert_true(observer.wait_execution_count(task_count, std::chrono::minutes(1))); - assert_true(observer.wait_destruction_count(task_count, std::chrono::minutes(1))); - assert_unique_execution_thread(observer.get_execution_map()); - for (size_t i = 0; i < task_count; i++) { assert_equal(results[i].get(), i); } + + assert_true(observer.wait_execution_count(task_count, std::chrono::minutes(1))); + assert_true(observer.wait_destruction_count(task_count, std::chrono::minutes(1))); + assert_unique_execution_thread(observer.get_execution_map()); } void concurrencpp::tests::test_worker_thread_executor_submit_inline() { @@ -177,14 +177,14 @@ void concurrencpp::tests::test_worker_thread_executor_submit_inline() { return results; }); - assert_true(observer.wait_execution_count(task_count, std::chrono::minutes(1))); - assert_true(observer.wait_destruction_count(task_count, std::chrono::minutes(1))); - assert_unique_execution_thread(observer.get_execution_map()); - auto results = results_res.get(); for (size_t i = 0; i < task_count; i++) { assert_equal(results[i].get(), i); } + + assert_true(observer.wait_execution_count(task_count, std::chrono::minutes(1))); + assert_true(observer.wait_destruction_count(task_count, std::chrono::minutes(1))); + assert_unique_execution_thread(observer.get_execution_map()); } void concurrencpp::tests::test_worker_thread_executor_submit() { @@ -253,14 +253,13 @@ void concurrencpp::tests::test_worker_thread_executor_bulk_submit_foreign() { } auto results = executor->bulk_submit(stubs); + for (size_t i = 0; i < task_count; i++) { + assert_equal(results[i].get(), i); + } assert_true(observer.wait_execution_count(task_count, std::chrono::minutes(1))); assert_true(observer.wait_destruction_count(task_count, std::chrono::minutes(1))); assert_unique_execution_thread(observer.get_execution_map()); - - for (size_t i = 0; i < task_count; i++) { - assert_equal(results[i].get(), i); - } } void concurrencpp::tests::test_worker_thread_executor_bulk_submit_inline() { @@ -280,14 +279,14 @@ void concurrencpp::tests::test_worker_thread_executor_bulk_submit_inline() { return executor->bulk_submit(stubs); }); - assert_true(observer.wait_execution_count(task_count, std::chrono::minutes(1))); - assert_true(observer.wait_destruction_count(task_count, std::chrono::minutes(1))); - assert_unique_execution_thread(observer.get_execution_map()); - auto results = results_res.get(); for (size_t i = 0; i < task_count; i++) { assert_equal(results[i].get(), i); } + + assert_true(observer.wait_execution_count(task_count, std::chrono::minutes(1))); + assert_true(observer.wait_destruction_count(task_count, std::chrono::minutes(1))); + assert_unique_execution_thread(observer.get_execution_map()); } void concurrencpp::tests::test_worker_thread_executor_bulk_submit() { diff --git a/test/source/tests/result_tests/result_await_tests.cpp b/test/source/tests/result_tests/result_await_tests.cpp index b0fdaf99..7f7c3684 100644 --- a/test/source/tests/result_tests/result_await_tests.cpp +++ b/test/source/tests/result_tests/result_await_tests.cpp @@ -193,10 +193,11 @@ template void concurrencpp::tests::test_result_await_impl() { // empty result throws { - assert_throws([] { - result result; - result.resolve(); - }); + assert_throws_with_error_message( + [] { + result().operator co_await(); + }, + concurrencpp::details::consts::k_result_operator_co_await_error_msg); } auto thread_executor = std::make_shared(); @@ -674,11 +675,12 @@ namespace concurrencpp::tests { template void concurrencpp::tests::test_result_await_via_impl() { // empty result throws - assert_throws([] { - result result; - auto executor = std::make_shared(); - result.resolve_via(executor); - }); + assert_throws_with_error_message( + [] { + auto executor = std::make_shared(); + result().await_via(executor); + }, + concurrencpp::details::consts::k_result_await_via_error_msg); // null executor throws assert_throws_with_error_message( diff --git a/test/source/tests/result_tests/result_resolve_tests.cpp b/test/source/tests/result_tests/result_resolve_tests.cpp index df523e43..b66bda97 100644 --- a/test/source/tests/result_tests/result_resolve_tests.cpp +++ b/test/source/tests/result_tests/result_resolve_tests.cpp @@ -163,10 +163,11 @@ template void concurrencpp::tests::test_result_resolve_impl() { // empty result throws { - assert_throws([] { - result result; - result.resolve(); - }); + assert_throws_with_error_message( + [] { + result().resolve(); + }, + concurrencpp::details::consts::k_result_resolve_error_msg); } auto thread_executor = std::make_shared(); @@ -568,11 +569,12 @@ namespace concurrencpp::tests { template void concurrencpp::tests::test_result_resolve_via_impl() { // empty result throws - assert_throws([] { - result result; - auto executor = std::make_shared(); - result.resolve_via(executor); - }); + assert_throws_with_error_message( + [] { + auto executor = std::make_shared(); + result().resolve_via(executor); + }, + concurrencpp::details::consts::k_result_resolve_via_error_msg); // null executor throws assert_throws_with_error_message( diff --git a/test/source/tests/result_tests/result_tests.cpp b/test/source/tests/result_tests/result_tests.cpp index 7d08d447..90e6b24b 100644 --- a/test/source/tests/result_tests/result_tests.cpp +++ b/test/source/tests/result_tests/result_tests.cpp @@ -81,9 +81,11 @@ template void concurrencpp::tests::test_result_status_impl() { // empty result throws { - assert_throws([] { - result().status(); - }); + assert_throws_with_error_message( + [] { + result().status(); + }, + concurrencpp::details::consts::k_result_status_error_msg); } // idle result @@ -140,9 +142,11 @@ template void concurrencpp::tests::test_result_get_impl() { // empty result throws { - assert_throws([] { - result().get(); - }); + assert_throws_with_error_message( + [] { + result().get(); + }, + concurrencpp::details::consts::k_result_get_error_msg); } // get blocks until value is present and empties the result @@ -236,9 +240,11 @@ template void concurrencpp::tests::test_result_wait_impl() { // empty result throws { - assert_throws([] { - result().wait(); - }); + assert_throws_with_error_message( + [] { + result().wait(); + }, + concurrencpp::details::consts::k_result_wait_error_msg); } // wait blocks until value is present @@ -348,9 +354,11 @@ template void concurrencpp::tests::test_result_wait_for_impl() { // empty result throws { - assert_throws([] { - result().wait_for(seconds(1)); - }); + assert_throws_with_error_message( + [] { + result().wait_for(seconds(1)); + }, + concurrencpp::details::consts::k_result_wait_for_error_msg); } // if the result is ready by value, don't block and return status::value @@ -476,10 +484,12 @@ template void concurrencpp::tests::test_result_wait_until_impl() { // empty result throws { - assert_throws([] { - const auto later = high_resolution_clock::now() + seconds(10); - result().wait_until(later); - }); + assert_throws_with_error_message( + [] { + const auto later = high_resolution_clock::now() + seconds(10); + result().wait_until(later); + }, + concurrencpp::details::consts::k_result_wait_until_error_msg); } // if time_point <= now, the function is equivalent to result::status diff --git a/test/source/tests/result_tests/shared_result_await_tests.cpp b/test/source/tests/result_tests/shared_result_await_tests.cpp new file mode 100644 index 00000000..8a25e99d --- /dev/null +++ b/test/source/tests/result_tests/shared_result_await_tests.cpp @@ -0,0 +1,769 @@ +#include "concurrencpp/concurrencpp.h" +#include "tests/all_tests.h" + +#include "tests/test_utils/result_factory.h" +#include "tests/test_utils/test_ready_result.h" +#include "tests/test_utils/throwing_executor.h" +#include "tests/test_utils/executor_shutdowner.h" + +#include "tester/tester.h" +#include "helpers/assertions.h" +#include "helpers/random.h" + +namespace concurrencpp::tests { + template + void test_shared_result_await_impl(); + void test_shared_result_await(); + + template + void test_shared_result_await_via_impl(); + void test_shared_result_await_via(); + +} // namespace concurrencpp::tests + +using concurrencpp::result; +using concurrencpp::details::thread; + +/* + * In this test suit, we need to check all the possible scenarios that "await" can have. + * Our tests are split into 2 branches: when the result is already ready at the moment of resolving and + * when it's not. + * + * If the result is already ready, the test matrix looks like this: + * status[value, exception] + * Overall 2 scenarios + * + * If the result is not ready, the test matrix looks like this: + * status[value, exception] + * Overall 2 scenarios + * + * These tests are almost identical to result::resolve(_via) tests. If we got here, + * that means that result::resolve(_via) works correctly. so we modify the resolving tests to use regular + * "await" and then continue as a regular resolving test. If any assert fails, it's result::await(_via) fault and not + * result:resolve(_via) + */ + +namespace concurrencpp::tests { + template + struct test_await_ready_result { + result operator()(); + }; + + template + struct test_await_ready_result { + + private: + uintptr_t m_thread_id_0 = 0; + + result proxy_task() { + auto result = result_factory::make_ready(); + concurrencpp::shared_result sr(std::move(result)); + + m_thread_id_0 = thread::get_current_virtual_id(); + + co_return co_await sr; + } + + public: + result operator()() { + auto done_result = co_await proxy_task().resolve(); + + const auto thread_id_1 = thread::get_current_virtual_id(); + + assert_equal(m_thread_id_0, thread_id_1); + test_ready_result(std::move(done_result)); + } + }; + + template + struct test_await_ready_result { + + private: + uintptr_t m_thread_id_0 = 0; + + result proxy_task(const size_t id) { + auto result = make_exceptional_result(costume_exception(id)); + concurrencpp::shared_result sr(std::move(result)); + + m_thread_id_0 = thread::get_current_virtual_id(); + + co_return co_await sr; + } + + public: + result operator()() { + const auto id = 1234567; + auto done_result = co_await proxy_task(id).resolve(); + + const auto thread_id_1 = thread::get_current_virtual_id(); + ; + assert_equal(m_thread_id_0, thread_id_1); + test_ready_result_costume_exception(std::move(done_result), id); + } + }; + + template + struct test_await_not_ready_result { + result operator()(std::shared_ptr executor); + }; + + template + struct test_await_not_ready_result { + + private: + uintptr_t m_setting_thread_id = 0; + uintptr_t m_resuming_thread_id = 0; + + result proxy_task(std::shared_ptr manual_executor) { + auto result = manual_executor->submit([]() -> decltype(auto) { + return result_factory::get(); + }); + concurrencpp::shared_result sr(std::move(result)); + + co_return co_await sr; + } + + result inner_task(std::shared_ptr manual_executor) { + auto done_result = co_await proxy_task(manual_executor).resolve(); + + m_resuming_thread_id = thread::get_current_virtual_id(); + + test_ready_result(std::move(done_result)); + } + + public: + result operator()(std::shared_ptr manual_executor, std::shared_ptr thread_executor) { + assert_true(manual_executor->empty()); + + auto result = inner_task(manual_executor); + + co_await thread_executor->submit([this, manual_executor] { + m_setting_thread_id = concurrencpp::details::thread::get_current_virtual_id(); + assert_true(manual_executor->loop_once()); + }); + + co_await result; + + assert_equal(m_setting_thread_id, m_resuming_thread_id); + } + }; + + template + struct test_await_not_ready_result { + + private: + uintptr_t m_setting_thread_id = 0; + uintptr_t m_resuming_thread_id = 0; + + result proxy_task(std::shared_ptr manual_executor, const size_t id) { + auto result = manual_executor->submit([id]() -> decltype(auto) { + throw costume_exception(id); + return result_factory::get(); + }); + concurrencpp::shared_result sr(std::move(result)); + + co_return co_await sr; + } + + result inner_task(std::shared_ptr manual_executor) { + const auto id = 1234567; + auto done_result = co_await proxy_task(manual_executor, id).resolve(); + + m_resuming_thread_id = concurrencpp::details::thread::get_current_virtual_id(); + + test_ready_result_costume_exception(std::move(done_result), id); + } + + public: + result operator()(std::shared_ptr manual_executor, std::shared_ptr thread_executor) { + assert_true(manual_executor->empty()); + + auto result = inner_task(manual_executor); + + co_await thread_executor->submit([this, manual_executor] { + m_setting_thread_id = concurrencpp::details::thread::get_current_virtual_id(); + assert_true(manual_executor->loop_once()); + }); + + co_await result; + + assert_equal(m_setting_thread_id, m_resuming_thread_id); + } + }; + +} // namespace concurrencpp::tests + +template +void concurrencpp::tests::test_shared_result_await_impl() { + // empty result throws + { + assert_throws_with_error_message( + [] { + shared_result().operator co_await(); + }, + concurrencpp::details::consts::k_shared_result_operator_co_await_error_msg); + } + + // await can be called multiple times + { + shared_result sr(result_factory::make_ready()); + + for (size_t i = 0; i < 6; i++) { + sr.operator co_await(); + assert_true(sr); + } + } + + auto thread_executor = std::make_shared(); + auto manual_executor = std::make_shared(); + executor_shutdowner es0(thread_executor), es1(manual_executor); + + test_await_ready_result()().get(); + test_await_ready_result()().get(); + test_await_not_ready_result()(manual_executor, thread_executor).get(); + test_await_not_ready_result()(manual_executor, thread_executor).get(); +} + +void concurrencpp::tests::test_shared_result_await() { + test_shared_result_await_impl(); + test_shared_result_await_impl(); + test_shared_result_await_impl(); + test_shared_result_await_impl(); + test_shared_result_await_impl(); +} + +/* + * In this test suit, we need to check all the possible scenarios that "await_via" can have. + * Our tests are split into 2 branches: when the result is already ready at the moment of resolving and + * when it's not. + * + * If the result is already ready, the test matrix looks like this: + * status[value, exception] x force_rescheduling[true, false] x executor throws[true, false] + * Overall 8 scenarios + * + * If the result is not ready, the test matrix looks like this: + * status[value, exception] x executor throws [true, false] + * Overall 4 scenarios + */ + +namespace concurrencpp::tests { + struct thread_id_setter { + + std::uintptr_t& dest; + + thread_id_setter(std::uintptr_t& dest) noexcept : dest(dest) {} + + ~thread_id_setter() noexcept { + dest = thread::get_current_virtual_id(); + } + }; + + template + struct test_await_via_ready_result { + result operator()(std::shared_ptr executor); + }; + + template + struct test_await_via_ready_result { + + private: + uintptr_t m_thread_id_0 = 0; + uintptr_t m_thread_id_1 = 0; + + result proxy_task(std::shared_ptr executor) { + auto result = result_factory::make_ready(); + concurrencpp::shared_result sr(std::move(result)); + + m_thread_id_0 = thread::get_current_virtual_id(); + + thread_id_setter setter(m_thread_id_1); // sets thread id after await returns + + co_return co_await sr.await_via(executor, false); + } + + public: + result operator()(std::shared_ptr executor) { + // result is ready + force_rescheduling = false + executor doesn't throw + // = inline execution, result is returned + + auto done_result = co_await proxy_task(executor).resolve(); + + assert_equal(m_thread_id_0, m_thread_id_1); + test_ready_result(std::move(done_result)); + } + }; + + template + struct test_await_via_ready_result { + + private: + uintptr_t m_thread_id_0 = 0; + uintptr_t m_thread_id_1 = 0; + + result proxy_task(std::shared_ptr executor) { + auto result = result_factory::make_ready(); + concurrencpp::shared_result sr(std::move(result)); + + m_thread_id_0 = thread::get_current_virtual_id(); + + thread_id_setter setter(m_thread_id_1); // sets thread id after await returns + + co_return co_await sr.await_via(executor, true); + } + + public: + result operator()(std::shared_ptr executor) { + // result is ready + force_rescheduling = true + executor doesn't throw + // = rescheduling, result is returned + + auto done_result = co_await proxy_task(executor).resolve(); + + assert_not_equal(m_thread_id_0, m_thread_id_1); + test_ready_result(std::move(done_result)); + } + }; + + template + struct test_await_via_ready_result { + + private: + uintptr_t m_thread_id_0 = 0; + uintptr_t m_thread_id_1 = 0; + + result proxy_task(std::shared_ptr executor) { + auto result = result_factory::make_ready(); + concurrencpp::shared_result sr(std::move(result)); + + m_thread_id_0 = thread::get_current_virtual_id(); + + thread_id_setter setter(m_thread_id_1); // sets thread id after await returns + + co_return co_await sr.await_via(executor, false); + } + + public: + result operator()(std::shared_ptr executor) { + // result is ready + force_rescheduling = false + executor throws + // = inline execution, result is returned (executor doesn't have the chance to throw) + auto done_result = co_await proxy_task(executor).resolve(); + + assert_equal(m_thread_id_0, m_thread_id_1); + test_ready_result(std::move(done_result)); + } + }; + + template + struct test_await_via_ready_result { + + result operator()(std::shared_ptr executor) { + // result is ready + force_rescheduling = true + executor throws + // = inline execution, errors::broken_task exception is thrown + auto result = result_factory::make_ready(); + concurrencpp::shared_result sr(std::move(result)); + + const auto thread_id_0 = thread::get_current_virtual_id(); + + try { + co_await sr.await_via(executor, true); + } catch (const errors::broken_task&) { + const auto thread_id_1 = thread::get_current_virtual_id(); + + assert_false(static_cast(result)); + assert_equal(thread_id_0, thread_id_1); + co_return; + } catch (...) { + } + + assert_false(true); + } + }; + + template + struct test_await_via_ready_result { + + private: + uintptr_t m_thread_id_0 = 0; + uintptr_t m_thread_id_1 = 0; + + result proxy_task(std::shared_ptr executor, const size_t id) { + auto result = make_exceptional_result(costume_exception(id)); + concurrencpp::shared_result sr(std::move(result)); + + m_thread_id_0 = thread::get_current_virtual_id(); + + thread_id_setter setter(m_thread_id_1); // sets thread id after await returns + + co_return co_await sr.await_via(executor, false); + } + + public: + result operator()(std::shared_ptr executor) { + // result is ready (exception) + force_rescheduling = false + executor doesn't throw + // = inline execution, asynchronous exception is thrown + const auto id = 1234567; + + auto done_result = co_await proxy_task(executor, id).resolve(); + + assert_equal(m_thread_id_0, m_thread_id_1); + test_ready_result_costume_exception(std::move(done_result), id); + } + }; + + template + struct test_await_via_ready_result { + + private: + uintptr_t m_thread_id_0 = 0; + uintptr_t m_thread_id_1 = 0; + + result proxy_task(std::shared_ptr executor, const size_t id) { + auto result = make_exceptional_result(costume_exception(id)); + concurrencpp::shared_result sr(std::move(result)); + + m_thread_id_0 = thread::get_current_virtual_id(); + + thread_id_setter setter(m_thread_id_1); // sets thread id after await returns + + co_return co_await sr.await_via(executor, true); + } + + public: + result operator()(std::shared_ptr executor) { + // result is ready (exception) + force_rescheduling = true + executor doesn't throw + // = rescheduling, asynchronous exception is thrown + const auto id = 1234567; + auto done_result = co_await proxy_task(executor, id).resolve(); + + assert_not_equal(m_thread_id_0, m_thread_id_1); + test_ready_result_costume_exception(std::move(done_result), id); + } + }; + + template + struct test_await_via_ready_result { + + private: + uintptr_t m_thread_id_0 = 0; + uintptr_t m_thread_id_1 = 0; + + result proxy_task(std::shared_ptr executor, const size_t id) { + auto result = make_exceptional_result(costume_exception(id)); + concurrencpp::shared_result sr(std::move(result)); + + m_thread_id_0 = thread::get_current_virtual_id(); + + thread_id_setter setter(m_thread_id_1); // sets thread id after await returns + + co_return co_await sr.await_via(executor, false); + } + + public: + result operator()(std::shared_ptr executor) { + // result is ready (exception) + force_rescheduling = false + executor throws + // = inline execution, asynchronous exception is thrown (executor doesn't have the chance to throw itself) + + const auto id = 1234567; + auto done_result = co_await proxy_task(executor, id).resolve(); + + assert_equal(m_thread_id_0, m_thread_id_1); + test_ready_result_costume_exception(std::move(done_result), id); + } + }; + + template + struct test_await_via_ready_result { + result operator()(std::shared_ptr executor) { + // result is ready (exception) + force_rescheduling = true + executor throws + // = inline execution, errors::broken_task exception is thrown + auto result = result_factory::make_exceptional(); + concurrencpp::shared_result sr(std::move(result)); + + const auto thread_id_0 = thread::get_current_virtual_id(); + + try { + co_await sr.await_via(executor, true); + } catch (const errors::broken_task&) { + const auto thread_id_1 = thread::get_current_virtual_id(); + + assert_equal(thread_id_0, thread_id_1); + assert_false(static_cast(result)); + co_return; + } catch (...) { + } + + assert_false(true); + } + }; + + template + struct test_await_via_not_ready_result { + result operator()(std::shared_ptr manual_executor, std::shared_ptr thread_executor); + }; + + template + struct test_await_via_not_ready_result { + // result is not ready (completes with a value) + executor doesn't throw + // = rescheduling, result is returned + + private: + uintptr_t m_launcher_thread_id = 0; + uintptr_t m_setting_thread_id = 0; + uintptr_t m_resuming_thread_id = 0; + + result proxy_task(std::shared_ptr manual_executor, std::shared_ptr thread_executor) { + m_launcher_thread_id = concurrencpp::details::thread::get_current_virtual_id(); + + auto result = manual_executor->submit([]() -> decltype(auto) { + return result_factory::get(); + }); + concurrencpp::shared_result sr(std::move(result)); + + co_return co_await sr.await_via(thread_executor, true); + } + + result inner_task(std::shared_ptr manual_executor, std::shared_ptr thread_executor) { + auto done_result = co_await proxy_task(manual_executor, thread_executor).resolve(); + + m_resuming_thread_id = concurrencpp::details::thread::get_current_virtual_id(); + test_ready_result(std::move(done_result)); + } + + public: + result operator()(std::shared_ptr manual_executor, std::shared_ptr thread_executor) { + assert_true(manual_executor->empty()); + + auto result = inner_task(manual_executor, thread_executor); + + co_await thread_executor->submit([this, manual_executor]() mutable { + m_setting_thread_id = thread::get_current_virtual_id(); + assert_true(manual_executor->loop_once()); + }); + + co_await result; + + assert_not_equal(m_launcher_thread_id, m_setting_thread_id); + assert_not_equal(m_launcher_thread_id, m_resuming_thread_id); + assert_not_equal(m_setting_thread_id, m_resuming_thread_id); + } + }; + + template + struct test_await_via_not_ready_result { + // result is not ready (completes with a value) + executor throws + // = resumed inline in the setting thread, errors::broken_task is thrown. + + private: + uintptr_t m_launcher_thread_id = 0; + uintptr_t m_setting_thread_id = 0; + uintptr_t m_resuming_thread_id = 0; + + result inner_task(std::shared_ptr manual_executor, std::shared_ptr throwing_executor) { + m_launcher_thread_id = thread::get_current_virtual_id(); + + auto result = manual_executor->submit([]() -> decltype(auto) { + return result_factory::get(); + }); + concurrencpp::shared_result sr(std::move(result)); + + try { + co_await sr.await_via(throwing_executor, true); + } catch (const errors::broken_task&) { + m_resuming_thread_id = thread::get_current_virtual_id(); + co_return; + } catch (...) { + } + + assert_false(true); + } + + public: + result operator()(std::shared_ptr manual_executor, + std::shared_ptr throwing_executor, + std::shared_ptr thread_executor) { + assert_true(manual_executor->empty()); + + auto result = inner_task(manual_executor, throwing_executor); + + co_await thread_executor->submit([this, manual_executor]() mutable { + m_setting_thread_id = concurrencpp::details::thread::get_current_virtual_id(); + assert_true(manual_executor->loop_once()); + }); + + co_await result; + + assert_not_equal(m_launcher_thread_id, m_setting_thread_id); + assert_not_equal(m_launcher_thread_id, m_resuming_thread_id); + assert_equal(m_setting_thread_id, m_resuming_thread_id); + } + }; + + template + struct test_await_via_not_ready_result { + // result is not ready (completes with an exception) + executor doesn't throw + // = rescheduling, asynchronous exception is thrown + + private: + uintptr_t m_launcher_thread_id = 0; + uintptr_t m_setting_thread_id = 0; + uintptr_t m_resuming_thread_id = 0; + + result proxy_task(std::shared_ptr manual_executor, std::shared_ptr thread_executor, const size_t id) { + m_launcher_thread_id = thread::get_current_virtual_id(); + + auto result = manual_executor->submit([id]() -> decltype(auto) { + throw costume_exception(id); + return result_factory::get(); + }); + concurrencpp::shared_result sr(std::move(result)); + + co_return co_await sr.await_via(thread_executor, true); + } + + result inner_task(std::shared_ptr manual_executor, std::shared_ptr thread_executor) { + const size_t id = 1234567; + auto done_result = co_await proxy_task(manual_executor, thread_executor, id).resolve(); + + m_resuming_thread_id = thread::get_current_virtual_id(); + test_ready_result_costume_exception(std::move(done_result), id); + } + + public: + result operator()(std::shared_ptr manual_executor, std::shared_ptr thread_executor) { + assert_true(manual_executor->empty()); + + auto result = inner_task(manual_executor, thread_executor); + + co_await thread_executor->submit([this, manual_executor]() mutable { + m_setting_thread_id = thread::get_current_virtual_id(); + assert_true(manual_executor->loop_once()); + }); + + co_await result; + + assert_not_equal(m_launcher_thread_id, m_setting_thread_id); + assert_not_equal(m_launcher_thread_id, m_resuming_thread_id); + assert_not_equal(m_setting_thread_id, m_resuming_thread_id); + } + }; + + template + struct test_await_via_not_ready_result { + // result is not ready (completes with an exception) + executor throws + // = resumed inline in the setting thread, errors::broken_task is thrown. + + private: + uintptr_t m_launcher_thread_id = 0; + uintptr_t m_setting_thread_id = 0; + uintptr_t m_resuming_thread_id = 0; + + result inner_task(std::shared_ptr manual_executor, std::shared_ptr throwing_executor) { + m_launcher_thread_id = thread::get_current_virtual_id(); + + auto result = manual_executor->submit([]() -> decltype(auto) { + return result_factory::throw_ex(); + }); + concurrencpp::shared_result sr(std::move(result)); + + try { + co_await sr.await_via(throwing_executor, true); + } catch (const errors::broken_task&) { + m_resuming_thread_id = thread::get_current_virtual_id(); + co_return; + } catch (...) { + } + + assert_false(true); + } + + public: + result operator()(std::shared_ptr manual_executor, + std::shared_ptr throwing_executor, + std::shared_ptr thread_executor) { + assert_true(manual_executor->empty()); + + auto result = inner_task(manual_executor, throwing_executor); + + co_await thread_executor->submit([this, manual_executor]() mutable { + m_setting_thread_id = concurrencpp::details::thread::get_current_virtual_id(); + assert_true(manual_executor->loop_once()); + }); + + co_await result; + + assert_not_equal(m_launcher_thread_id, m_setting_thread_id); + assert_not_equal(m_launcher_thread_id, m_resuming_thread_id); + assert_equal(m_setting_thread_id, m_resuming_thread_id); + } + }; + +} // namespace concurrencpp::tests + +template +void concurrencpp::tests::test_shared_result_await_via_impl() { + // empty result throws + assert_throws_with_error_message( + [] { + auto executor = std::make_shared(); + shared_result().await_via(executor); + }, + concurrencpp::details::consts::k_shared_result_await_via_error_msg); + + // null executor throws + assert_throws_with_error_message( + [] { + auto result = result_factory::make_ready(); + concurrencpp::shared_result sr(std::move(result)); + sr.await_via({}, true); + }, + concurrencpp::details::consts::k_shared_result_await_via_executor_null_error_msg); + + // await_via can be called multiple times + { + shared_result sr(result_factory::make_ready()); + auto executor = std::make_shared(); + + for (size_t i = 0; i < 6; i++) { + sr.await_via(executor); + assert_true(sr); + } + } + + auto thread_executor = std::make_shared(); + auto throwing_executor = std::make_shared(); + auto manual_executor = std::make_shared(); + + executor_shutdowner es0(thread_executor), es1(throwing_executor), es2(manual_executor); + + test_await_via_ready_result()(thread_executor).get(); + test_await_via_ready_result()(throwing_executor).get(); + test_await_via_ready_result()(thread_executor).get(); + test_await_via_ready_result()(throwing_executor).get(); + + test_await_via_ready_result()(thread_executor).get(); + test_await_via_ready_result()(throwing_executor).get(); + test_await_via_ready_result()(thread_executor).get(); + test_await_via_ready_result()(throwing_executor).get(); + + test_await_via_not_ready_result()(manual_executor, thread_executor).get(); + test_await_via_not_ready_result()(manual_executor, throwing_executor, thread_executor).get(); + + test_await_via_not_ready_result()(manual_executor, thread_executor).get(); + test_await_via_not_ready_result()(manual_executor, throwing_executor, thread_executor).get(); +} + +void concurrencpp::tests::test_shared_result_await_via() { + test_shared_result_await_via_impl(); + test_shared_result_await_via_impl(); + test_shared_result_await_via_impl(); + test_shared_result_await_via_impl(); + test_shared_result_await_via_impl(); +} + +void concurrencpp::tests::test_shared_result_await_all() { + tester tester("shared_result::await, shared_result::await_via test"); + + tester.add_step("await", test_shared_result_await); + tester.add_step("await_via", test_shared_result_await_via); + + tester.launch_test(); +} diff --git a/test/source/tests/result_tests/shared_result_resolve_tests.cpp b/test/source/tests/result_tests/shared_result_resolve_tests.cpp new file mode 100644 index 00000000..b3029018 --- /dev/null +++ b/test/source/tests/result_tests/shared_result_resolve_tests.cpp @@ -0,0 +1,660 @@ +#include "concurrencpp/concurrencpp.h" +#include "tests/all_tests.h" + +#include "tests/test_utils/result_factory.h" +#include "tests/test_utils/test_ready_result.h" +#include "tests/test_utils/throwing_executor.h" +#include "tests/test_utils/executor_shutdowner.h" + +#include "tester/tester.h" +#include "helpers/assertions.h" + +namespace concurrencpp::tests { + template + void test_shared_result_resolve_impl(); + void test_shared_result_resolve(); + + template + void test_shared_result_resolve_via_impl(); + void test_shared_result_resolve_via(); + +} // namespace concurrencpp::tests + +using concurrencpp::result; +using concurrencpp::details::thread; + +/* + * In this test suit, we need to check all the possible scenarios that result::resolve can have. + * Our tests are split into 2 branches: when the result is already ready at the moment of resolving and + * when it's not. + * + * If the result is already ready, the test matrix looks like this: + * status[value, exception] + * Overall 2 scenarios + * + * If the result is not ready, the test matrix looks like this: + * status[value, exception] + * Overall 2 scenarios + */ + +namespace concurrencpp::tests { + template + struct test_await_ready_result { + result operator()(); + }; + + template + struct test_await_ready_result { + result operator()() { + auto result = result_factory::make_ready(); + concurrencpp::shared_result sr(std::move(result)); + + const auto thread_id_0 = thread::get_current_virtual_id(); + + auto done_result = co_await sr.resolve(); + + const auto thread_id_1 = thread::get_current_virtual_id(); + + assert_false(static_cast(result)); + assert_equal(thread_id_0, thread_id_1); + test_ready_result(std::move(done_result)); + } + }; + + template + struct test_await_ready_result { + result operator()() { + const auto id = 1234567; + auto result = make_exceptional_result(costume_exception(id)); + concurrencpp::shared_result sr(std::move(result)); + + const auto thread_id_0 = concurrencpp::details::thread::get_current_virtual_id(); + + auto done_result = co_await sr.resolve(); + + const auto thread_id_1 = concurrencpp::details::thread::get_current_virtual_id(); + + assert_false(static_cast(result)); + assert_equal(thread_id_0, thread_id_1); + test_ready_result_costume_exception(std::move(done_result), id); + } + }; + + template + struct test_await_not_ready_result { + result operator()(std::shared_ptr executor); + }; + + template + struct test_await_not_ready_result { + + private: + uintptr_t m_setting_thread_id = 0; + uintptr_t m_resuming_thread_id = 0; + + result inner_task(std::shared_ptr manual_executor) { + auto result = manual_executor->submit([]() -> decltype(auto) { + return result_factory::get(); + }); + concurrencpp::shared_result sr(std::move(result)); + + auto done_result = co_await sr.resolve(); + + m_resuming_thread_id = thread::get_current_virtual_id(); + + test_ready_result(std::move(done_result)); + } + + public: + result operator()(std::shared_ptr manual_executor, std::shared_ptr thread_executor) { + assert_true(manual_executor->empty()); + + auto result = inner_task(manual_executor); + + co_await thread_executor->submit([this, manual_executor] { + m_setting_thread_id = thread::get_current_virtual_id(); + assert_true(manual_executor->loop_once()); + }); + + co_await result; + + assert_equal(m_setting_thread_id, m_resuming_thread_id); + } + }; + + template + struct test_await_not_ready_result { + + private: + uintptr_t m_setting_thread_id = 0; + uintptr_t m_resuming_thread_id = 0; + + result inner_task(std::shared_ptr manual_executor) { + const auto id = 1234567; + auto result = manual_executor->submit([id]() -> decltype(auto) { + throw costume_exception(id); + return result_factory::get(); + }); + concurrencpp::shared_result sr(std::move(result)); + + auto done_result = co_await sr.resolve(); + + m_resuming_thread_id = thread::get_current_virtual_id(); + + test_ready_result_costume_exception(std::move(done_result), id); + } + + public: + result operator()(std::shared_ptr manual_executor, std::shared_ptr thread_executor) { + assert_true(manual_executor->empty()); + + auto result = inner_task(manual_executor); + + co_await thread_executor->submit([this, manual_executor] { + m_setting_thread_id = concurrencpp::details::thread::get_current_virtual_id(); + assert_true(manual_executor->loop_once()); + }); + + co_await result; + + assert_equal(m_setting_thread_id, m_resuming_thread_id); + } + }; + +} // namespace concurrencpp::tests + +template +void concurrencpp::tests::test_shared_result_resolve_impl() { + // empty result throws + { + assert_throws_with_error_message( + [] { + shared_result().resolve(); + }, + concurrencpp::details::consts::k_shared_result_resolve_error_msg); + } + + // resolve can be called multiple times + { + shared_result sr(result_factory::make_ready()); + + for (size_t i = 0; i < 6; i++) { + sr.resolve(); + assert_true(sr); + } + } + + auto thread_executor = std::make_shared(); + auto manual_executor = std::make_shared(); + executor_shutdowner es0(thread_executor), es1(manual_executor); + + test_await_ready_result()().get(); + test_await_ready_result()().get(); + test_await_not_ready_result()(manual_executor, thread_executor).get(); + test_await_not_ready_result()(manual_executor, thread_executor).get(); +} + +void concurrencpp::tests::test_shared_result_resolve() { + test_shared_result_resolve_impl(); + test_shared_result_resolve_impl(); + test_shared_result_resolve_impl(); + test_shared_result_resolve_impl(); + test_shared_result_resolve_impl(); +} + +/* + * In this test suit, we need to check all the possible scenarios that result::resolve_via can have. + * Our tests are split into 2 branches: when the result is already ready at the moment of resolving and + * when it's not. + * + * If the result is already ready, the test matrix looks like this: + * status[value, exception] x force_rescheduling[true, false] x executor throws[true, false] + * Overall 8 scenarios + * + * If the result is not ready, the test matrix looks like this: + * status[value, exception] x executor throws [true, false] + * Overall 4 scenarios + */ + +namespace concurrencpp::tests { + template + struct test_resolve_via_ready_result { + result operator()(std::shared_ptr executor); + }; + + template + struct test_resolve_via_ready_result { + result operator()(std::shared_ptr executor) { + // result is ready + force_rescheduling = false + executor doesn't throw + // = inline execution, result is returned + + auto result = result_factory::make_ready(); + concurrencpp::shared_result sr(std::move(result)); + + const auto thread_id_0 = thread::get_current_virtual_id(); + + auto done_result = co_await sr.resolve_via(executor, false); + + const auto thread_id_1 = thread::get_current_virtual_id(); + + assert_false(static_cast(result)); + assert_equal(thread_id_0, thread_id_1); + test_ready_result(std::move(done_result)); + } + }; + + template + struct test_resolve_via_ready_result { + result operator()(std::shared_ptr executor) { + // result is ready + force_rescheduling = true + executor doesn't throw + // = rescheduling, result is returned + auto result = result_factory::make_ready(); + concurrencpp::shared_result sr(std::move(result)); + + const auto thread_id_0 = thread::get_current_virtual_id(); + + auto done_result = co_await sr.resolve_via(executor, true); + + const auto thread_id_1 = thread::get_current_virtual_id(); + + assert_false(static_cast(result)); + assert_not_equal(thread_id_0, thread_id_1); + test_ready_result(std::move(done_result)); + } + }; + + template + struct test_resolve_via_ready_result { + result operator()(std::shared_ptr executor) { + // result is ready + force_rescheduling = false + executor throws + // = inline execution, result is returned (executor doesn't have the chance to throw) + auto result = result_factory::make_ready(); + concurrencpp::shared_result sr(std::move(result)); + + const auto thread_id_0 = thread::get_current_virtual_id(); + + auto done_result = co_await sr.resolve_via(executor, false); + + const auto thread_id_1 = thread::get_current_virtual_id(); + + assert_false(static_cast(result)); + assert_equal(thread_id_0, thread_id_1); + test_ready_result(std::move(done_result)); + } + }; + + template + struct test_resolve_via_ready_result { + result operator()(std::shared_ptr executor) { + // result is ready + force_rescheduling = true + executor throws + // = inline execution, broken_task is thrown + auto result = result_factory::make_ready(); + concurrencpp::shared_result sr(std::move(result)); + + const auto thread_id_0 = thread::get_current_virtual_id(); + + try { + co_await sr.resolve_via(executor, true); + } catch (const errors::broken_task&) { + const auto thread_id_1 = thread::get_current_virtual_id(); + + assert_false(static_cast(result)); + assert_equal(thread_id_0, thread_id_1); + co_return; + } catch (...) { + } + + assert_false(true); + } + }; + + template + struct test_resolve_via_ready_result { + result operator()(std::shared_ptr executor) { + // result is ready (exception) + force_rescheduling = false + executor doesn't throw + // = inline execution, asynchronous exception is returned + const auto id = 1234567; + auto result = make_exceptional_result(costume_exception(id)); + concurrencpp::shared_result sr(std::move(result)); + + const auto thread_id_0 = thread::get_current_virtual_id(); + + auto done_result = co_await sr.resolve_via(executor, false); + + const auto thread_id_1 = thread::get_current_virtual_id(); + + assert_false(static_cast(result)); + assert_equal(thread_id_0, thread_id_1); + test_ready_result_costume_exception(std::move(done_result), id); + } + }; + + template + struct test_resolve_via_ready_result { + result operator()(std::shared_ptr executor) { + // result is ready (exception) + force_rescheduling = true + executor doesn't throw + // = rescheduling, asynchronous exception is returned + auto id = 1234567; + auto result = make_exceptional_result(costume_exception(id)); + concurrencpp::shared_result sr(std::move(result)); + + const auto thread_id_0 = thread::get_current_virtual_id(); + + auto done_result = co_await sr.resolve_via(executor, true); + + const auto thread_id_1 = thread::get_current_virtual_id(); + + assert_false(static_cast(result)); + assert_not_equal(thread_id_0, thread_id_1); + test_ready_result_costume_exception(std::move(done_result), id); + } + }; + + template + struct test_resolve_via_ready_result { + result operator()(std::shared_ptr executor) { + // result is ready (exception) + force_rescheduling = false + executor throws + // = inline execution, asynchronous exception is returned (executor doesn't have the chance to throw itself) + const auto id = 1234567; + auto result = make_exceptional_result(costume_exception(id)); + concurrencpp::shared_result sr(std::move(result)); + + const auto thread_id_0 = thread::get_current_virtual_id(); + + auto done_result = co_await sr.resolve_via(executor, false); + + const auto thread_id_1 = thread::get_current_virtual_id(); + + assert_false(static_cast(result)); + assert_equal(thread_id_0, thread_id_1); + test_ready_result_costume_exception(std::move(done_result), id); + } + }; + + template + struct test_resolve_via_ready_result { + result operator()(std::shared_ptr executor) { + // result is ready (exception) + force_rescheduling = true + executor throws + // = inline execution, broken_task exception is thrown + auto result = result_factory::make_exceptional(); + concurrencpp::shared_result sr(std::move(result)); + + const auto thread_id_0 = thread::get_current_virtual_id(); + + try { + auto done_result = co_await sr.resolve_via(executor, true); + } catch (const errors::broken_task&) { + const auto thread_id_1 = thread::get_current_virtual_id(); + + assert_equal(thread_id_0, thread_id_1); + assert_false(static_cast(result)); + co_return; + } catch (...) { + } + + assert_false(true); + } + }; + + template + struct test_resolve_via_not_ready_result { + result operator()(std::shared_ptr manual_executor, std::shared_ptr thread_executor); + }; + + template + struct test_resolve_via_not_ready_result { + // result is not ready (completes with a value) + executor doesn't throw + // = rescheduling, result is returned + + private: + uintptr_t m_launcher_thread_id = 0; + uintptr_t m_setting_thread_id = 0; + uintptr_t m_resuming_thread_id = 0; + + result inner_task(std::shared_ptr manual_executor, std::shared_ptr thread_executor) { + m_launcher_thread_id = thread::get_current_virtual_id(); + + auto result = manual_executor->submit([]() -> decltype(auto) { + return result_factory::get(); + }); + concurrencpp::shared_result sr(std::move(result)); + + auto done_result = co_await sr.resolve_via(thread_executor, true); + + m_resuming_thread_id = thread::get_current_virtual_id(); + test_ready_result(std::move(done_result)); + } + + public: + result operator()(std::shared_ptr manual_executor, std::shared_ptr thread_executor) { + assert_true(manual_executor->empty()); + + auto result = inner_task(manual_executor, thread_executor); + + co_await thread_executor->submit([this, manual_executor]() mutable { + m_setting_thread_id = thread::get_current_virtual_id(); + assert_true(manual_executor->loop_once()); + }); + + co_await result; + + assert_not_equal(m_launcher_thread_id, m_setting_thread_id); + assert_not_equal(m_launcher_thread_id, m_resuming_thread_id); + assert_not_equal(m_setting_thread_id, m_resuming_thread_id); + } + }; + + template + struct test_resolve_via_not_ready_result { + // result is not ready (completes with a value) + executor throws + // = resumed inline in the setting thread, errors::broken_task is thrown. + + private: + uintptr_t m_launcher_thread_id = 0; + uintptr_t m_setting_thread_id = 0; + uintptr_t m_resuming_thread_id = 0; + + result inner_task(std::shared_ptr manual_executor, std::shared_ptr throwing_executor) { + m_launcher_thread_id = thread::get_current_virtual_id(); + + auto result = manual_executor->submit([]() -> decltype(auto) { + return result_factory::get(); + }); + concurrencpp::shared_result sr(std::move(result)); + + try { + auto done_result = co_await sr.resolve_via(throwing_executor, true); + } catch (const errors::broken_task&) { + m_resuming_thread_id = thread::get_current_virtual_id(); + co_return; + } catch (...) { + } + + assert_false(true); + } + + public: + result operator()(std::shared_ptr manual_executor, + std::shared_ptr throwing_executor, + std::shared_ptr thread_executor) { + assert_true(manual_executor->empty()); + + auto result = inner_task(manual_executor, throwing_executor); + + co_await thread_executor->submit([this, manual_executor]() mutable { + m_setting_thread_id = concurrencpp::details::thread::get_current_virtual_id(); + assert_true(manual_executor->loop_once()); + }); + + co_await result; + + assert_not_equal(m_launcher_thread_id, m_setting_thread_id); + assert_not_equal(m_launcher_thread_id, m_resuming_thread_id); + assert_equal(m_setting_thread_id, m_resuming_thread_id); + } + }; + + template + struct test_resolve_via_not_ready_result { + // result is not ready (completes with an exception) + executor doesn't throw + // = rescheduling, asynchronous exception is returned + + private: + uintptr_t m_launcher_thread_id = 0; + uintptr_t m_setting_thread_id = 0; + uintptr_t m_resuming_thread_id = 0; + + result inner_task(std::shared_ptr manual_executor, std::shared_ptr thread_executor) { + m_launcher_thread_id = thread::get_current_virtual_id(); + + const size_t id = 1234567; + auto result = manual_executor->submit([id]() -> decltype(auto) { + throw costume_exception(id); + return result_factory::get(); + }); + concurrencpp::shared_result sr(std::move(result)); + + auto done_result = co_await sr.resolve_via(thread_executor, true); + + m_resuming_thread_id = thread::get_current_virtual_id(); + test_ready_result_costume_exception(std::move(done_result), id); + } + + public: + result operator()(std::shared_ptr manual_executor, std::shared_ptr thread_executor) { + assert_true(manual_executor->empty()); + + auto result = inner_task(manual_executor, thread_executor); + + co_await thread_executor->submit([this, manual_executor]() mutable { + m_setting_thread_id = concurrencpp::details::thread::get_current_virtual_id(); + assert_true(manual_executor->loop_once()); + }); + + co_await result; + + assert_not_equal(m_launcher_thread_id, m_setting_thread_id); + assert_not_equal(m_launcher_thread_id, m_resuming_thread_id); + assert_not_equal(m_setting_thread_id, m_resuming_thread_id); + } + }; + + template + struct test_resolve_via_not_ready_result { + // result is not ready (completes with an exception) + executor throws + // = resumed inline in the setting thread, errors::broken_task is thrown. + + private: + uintptr_t m_launcher_thread_id = 0; + uintptr_t m_setting_thread_id = 0; + uintptr_t m_resuming_thread_id = 0; + + result inner_task(std::shared_ptr manual_executor, std::shared_ptr throwing_executor) { + m_launcher_thread_id = thread::get_current_virtual_id(); + + auto result = manual_executor->submit([]() -> decltype(auto) { + return result_factory::throw_ex(); + }); + concurrencpp::shared_result sr(std::move(result)); + + try { + co_await sr.resolve_via(throwing_executor, true); + } catch (const errors::broken_task&) { + m_resuming_thread_id = thread::get_current_virtual_id(); + co_return; + } catch (...) { + } + + assert_false(true); + } + + public: + result operator()(std::shared_ptr manual_executor, + std::shared_ptr throwing_executor, + std::shared_ptr thread_executor) { + assert_true(manual_executor->empty()); + + auto result = inner_task(manual_executor, throwing_executor); + + co_await thread_executor->submit([this, manual_executor]() mutable { + m_setting_thread_id = thread::get_current_virtual_id(); + assert_true(manual_executor->loop_once()); + }); + + co_await result; + + assert_not_equal(m_launcher_thread_id, m_setting_thread_id); + assert_not_equal(m_launcher_thread_id, m_resuming_thread_id); + assert_equal(m_setting_thread_id, m_resuming_thread_id); + } + }; + +} // namespace concurrencpp::tests + +template +void concurrencpp::tests::test_shared_result_resolve_via_impl() { + // empty result throws + assert_throws_with_error_message( + [] { + auto executor = std::make_shared(); + shared_result().resolve_via(executor); + }, + concurrencpp::details::consts::k_shared_result_resolve_via_error_msg); + + // null executor throws + assert_throws_with_error_message( + [] { + auto result = result_factory::make_ready(); + shared_result(std::move(result)).resolve_via({}); + }, + concurrencpp::details::consts::k_shared_result_resolve_via_executor_null_error_msg); + + // resolve_via can be called multiple times + { + shared_result sr(result_factory::make_ready()); + auto executor = std::make_shared(); + + for (size_t i = 0; i < 6; i++) { + sr.resolve_via(executor); + assert_true(sr); + } + } + + auto thread_executor = std::make_shared(); + auto throwing_executor = std::make_shared(); + auto manual_executor = std::make_shared(); + + executor_shutdowner es0(thread_executor), es1(throwing_executor), es2(manual_executor); + + test_resolve_via_ready_result()(thread_executor).get(); + test_resolve_via_ready_result()(throwing_executor).get(); + test_resolve_via_ready_result()(thread_executor).get(); + test_resolve_via_ready_result()(throwing_executor).get(); + + test_resolve_via_ready_result()(thread_executor).get(); + test_resolve_via_ready_result()(throwing_executor).get(); + test_resolve_via_ready_result()(thread_executor).get(); + test_resolve_via_ready_result()(throwing_executor).get(); + + test_resolve_via_not_ready_result()(manual_executor, thread_executor).get(); + test_resolve_via_not_ready_result()(manual_executor, throwing_executor, thread_executor).get(); + test_resolve_via_not_ready_result()(manual_executor, thread_executor).get(); + test_resolve_via_not_ready_result()(manual_executor, throwing_executor, thread_executor).get(); +} + +void concurrencpp::tests::test_shared_result_resolve_via() { + test_shared_result_resolve_via_impl(); + test_shared_result_resolve_via_impl(); + test_shared_result_resolve_via_impl(); + test_shared_result_resolve_via_impl(); + test_shared_result_resolve_via_impl(); +} + +void concurrencpp::tests::test_shared_result_resolve_all() { + tester tester("shared_result::resolve, shared_result::resolve_via test"); + + tester.add_step("reslove", test_shared_result_resolve); + tester.add_step("reslove_via", test_shared_result_resolve_via); + + tester.launch_test(); +} diff --git a/test/source/tests/result_tests/shared_result_tests.cpp b/test/source/tests/result_tests/shared_result_tests.cpp new file mode 100644 index 00000000..ded8e75c --- /dev/null +++ b/test/source/tests/result_tests/shared_result_tests.cpp @@ -0,0 +1,895 @@ +#include "concurrencpp/concurrencpp.h" +#include "tests/all_tests.h" + +#include "tests/test_utils/test_ready_result.h" +#include "tests/test_utils/result_factory.h" + +#include "tester/tester.h" +#include "helpers/assertions.h" +#include "helpers/random.h" + +namespace concurrencpp::tests { + template + void test_shared_result_constructor_impl(); + void test_shared_result_constructor(); + + template + void test_shared_result_status_impl(); + void test_shared_result_status(); + + template + void test_shared_result_get_impl(); + void test_shared_result_get(); + + template + void test_shared_result_wait_impl(); + void test_shared_result_wait(); + + template + void test_shared_result_wait_for_impl(); + void test_shared_result_wait_for(); + + template + void test_shared_result_wait_until_impl(); + void test_shared_result_wait_until(); + + template + void test_shared_result_assignment_operator_empty_to_empty_move(); + template + void test_shared_result_assignment_operator_non_empty_to_non_empty_move(); + template + void test_shared_result_assignment_operator_empty_to_non_empty_move(); + template + void test_shared_result_assignment_operator_non_empty_to_empty_move(); + template + void test_shared_result_assignment_operator_assign_to_self_move(); + + template + void test_shared_result_assignment_operator_empty_to_empty_copy(); + template + void test_shared_result_assignment_operator_non_empty_to_non_empty_copy(); + template + void test_shared_result_assignment_operator_empty_to_non_empty_copy(); + template + void test_shared_result_assignment_operator_non_empty_to_empty_copy(); + template + void test_shared_result_assignment_operator_assign_to_self_copy(); + + template + void test_shared_result_assignment_operator_impl(); + void test_shared_result_assignment_operator(); +} // namespace concurrencpp::tests + +using concurrencpp::result; +using concurrencpp::result_promise; +using namespace std::chrono; +using namespace concurrencpp::tests; + +template +void concurrencpp::tests::test_shared_result_constructor_impl() { + shared_result default_constructed_result; + assert_false(static_cast(default_constructed_result)); + + // from result + result_promise rp; + auto result = rp.get_result(); + shared_result sr(std::move(result)); + + assert_true(static_cast(sr)); + assert_equal(sr.status(), result_status::idle); + + // copy + auto copy_result = sr; + assert_true(static_cast(copy_result)); + assert_equal(sr.status(), result_status::idle); + + // move + auto new_result = std::move(sr); + assert_false(static_cast(sr)); + assert_true(static_cast(new_result)); + assert_equal(new_result.status(), result_status::idle); +} + +void concurrencpp::tests::test_shared_result_constructor() { + test_shared_result_constructor_impl(); + test_shared_result_constructor_impl(); + test_shared_result_constructor_impl(); + test_shared_result_constructor_impl(); + test_shared_result_constructor_impl(); +} + +template +void concurrencpp::tests::test_shared_result_status_impl() { + // empty result throws + { + assert_throws_with_error_message( + [] { + shared_result().status(); + }, + concurrencpp::details::consts::k_shared_result_status_error_msg); + } + + // idle result + { + result_promise rp; + auto result = rp.get_result(); + shared_result sr(std::move(result)); + assert_equal(sr.status(), result_status::idle); + } + + // ready by value + { + result_promise rp; + auto result = rp.get_result(); + shared_result sr(std::move(result)); + rp.set_from_function(result_factory::get); + assert_equal(sr.status(), result_status::value); + } + + // exception result + { + result_promise rp; + auto result = rp.get_result(); + shared_result sr(std::move(result)); + rp.set_from_function(result_factory::throw_ex); + assert_equal(sr.status(), result_status::exception); + } + + // multiple calls of status are ok + { + result_promise rp; + auto result = rp.get_result(); + shared_result sr(std::move(result)); + rp.set_from_function(result_factory::get); + + for (size_t i = 0; i < 10; i++) { + assert_equal(sr.status(), result_status::value); + } + } +} + +void concurrencpp::tests::test_shared_result_status() { + test_shared_result_status_impl(); + test_shared_result_status_impl(); + test_shared_result_status_impl(); + test_shared_result_status_impl(); + test_shared_result_status_impl(); +} + +template +void concurrencpp::tests::test_shared_result_get_impl() { + // empty result throws + { + assert_throws_with_error_message( + [] { + shared_result().get(); + }, + concurrencpp::details::consts::k_shared_result_get_error_msg); + } + + // get blocks until value is present + { + result_promise rp; + auto result = rp.get_result(); + shared_result sr(std::move(result)); + + const auto unblocking_time = high_resolution_clock::now() + milliseconds(150); + + std::thread thread([rp = std::move(rp), unblocking_time]() mutable { + std::this_thread::sleep_until(unblocking_time); + rp.set_from_function(result_factory::get); + }); + + sr.get(); + const auto now = high_resolution_clock::now(); + + assert_false(static_cast(result)); + assert_bigger_equal(now, unblocking_time); + assert_smaller(now, unblocking_time + seconds(1)); + + test_ready_result(std::move(sr)); + thread.join(); + } + + // get blocks until exception is present and empties the result + { + result_promise rp; + auto result = rp.get_result(); + shared_result sr(std::move(result)); + const auto id = 12345689; + const auto unblocking_time = high_resolution_clock::now() + milliseconds(150); + + std::thread thread([rp = std::move(rp), id, unblocking_time]() mutable { + std::this_thread::sleep_until(unblocking_time); + rp.set_exception(std::make_exception_ptr(costume_exception(id))); + }); + + try { + sr.get(); + } catch (...) { + } + + const auto now = high_resolution_clock::now(); + + assert_false(static_cast(result)); + assert_bigger_equal(now, unblocking_time); + assert_smaller(now, unblocking_time + seconds(1)); + test_ready_result_costume_exception(std::move(sr), id); + + thread.join(); + } + + // if result is ready with value, get returns immediately + { + result_promise rp; + auto result = rp.get_result(); + shared_result sr(std::move(result)); + + rp.set_from_function(result_factory::get); + + const auto time_before = high_resolution_clock::now(); + + sr.get(); + + const auto time_after = high_resolution_clock::now(); + const auto total_blocking_time = duration_cast(time_after - time_before).count(); + assert_false(static_cast(result)); + assert_smaller_equal(total_blocking_time, 10); + test_ready_result(std::move(sr)); + } + + // if result is ready with exception, get returns immediately + { + result_promise rp; + auto result = rp.get_result(); + shared_result sr(std::move(result)); + + const auto id = 123456789; + + rp.set_exception(std::make_exception_ptr(costume_exception(id))); + + const auto time_before = high_resolution_clock::now(); + + try { + sr.get(); + } catch (...) { + } + + const auto time_after = high_resolution_clock::now(); + const auto total_blocking_time = duration_cast(time_after - time_before).count(); + + assert_smaller_equal(total_blocking_time, 10); + assert_false(static_cast(result)); + test_ready_result_costume_exception(std::move(sr), id); + } + + // get can be called multiple times + { + shared_result sr_val(result_factory::make_ready()); + + for (size_t i = 0; i < 6; i++) { + sr_val.get(); + assert_true(static_cast(sr_val)); + } + + shared_result sr_ex(result_factory::make_exceptional()); + + for (size_t i = 0; i < 6; i++) { + try { + sr_ex.get(); + } catch (...) { + } + assert_true(static_cast(sr_ex)); + } + } +} + +void concurrencpp::tests::test_shared_result_get() { + test_shared_result_get_impl(); + test_shared_result_get_impl(); + test_shared_result_get_impl(); + test_shared_result_get_impl(); + test_shared_result_get_impl(); +} + +template +void concurrencpp::tests::test_shared_result_wait_impl() { + // empty result throws + { + assert_throws_with_error_message( + [] { + shared_result().wait(); + }, + concurrencpp::details::consts::k_shared_result_wait_error_msg); + } + + // wait blocks until value is present + { + result_promise rp; + auto result = rp.get_result(); + shared_result sr(std::move(result)); + const auto unblocking_time = high_resolution_clock::now() + milliseconds(150); + + std::thread thread([rp = std::move(rp), unblocking_time]() mutable { + std::this_thread::sleep_until(unblocking_time); + rp.set_from_function(result_factory::get); + }); + + sr.wait(); + const auto now = high_resolution_clock::now(); + + assert_bigger_equal(now, unblocking_time); + assert_smaller(now, unblocking_time + seconds(1)); + + test_ready_result(std::move(sr)); + thread.join(); + } + + // wait blocks until exception is present + { + result_promise rp; + auto result = rp.get_result(); + shared_result sr(std::move(result)); + const auto id = 123456789; + const auto unblocking_time = high_resolution_clock::now() + milliseconds(150); + + std::thread thread([rp = std::move(rp), id, unblocking_time]() mutable { + std::this_thread::sleep_until(unblocking_time); + rp.set_exception(std::make_exception_ptr(costume_exception(id))); + }); + + sr.wait(); + const auto now = high_resolution_clock::now(); + + assert_bigger_equal(now, unblocking_time); + assert_smaller(now, unblocking_time + seconds(1)); + + test_ready_result_costume_exception(std::move(sr), id); + thread.join(); + } + + // if result is ready with value, wait returns immediately + { + result_promise rp; + auto result = rp.get_result(); + shared_result sr(std::move(result)); + rp.set_from_function(result_factory::get); + + const auto time_before = high_resolution_clock::now(); + sr.wait(); + const auto time_after = high_resolution_clock::now(); + const auto total_blocking_time = duration_cast(time_after - time_before).count(); + + assert_smaller_equal(total_blocking_time, 5); + test_ready_result(std::move(sr)); + } + + // if result is ready with exception, wait returns immediately + { + result_promise rp; + auto result = rp.get_result(); + shared_result sr(std::move(result)); + + const auto id = 123456789; + + rp.set_exception(std::make_exception_ptr(costume_exception(id))); + + const auto time_before = high_resolution_clock::now(); + sr.wait(); + const auto time_after = high_resolution_clock::now(); + const auto total_blocking_time = duration_cast(time_after - time_before).count(); + + assert_smaller_equal(total_blocking_time, 5); + test_ready_result_costume_exception(std::move(sr), id); + } + + // multiple calls to wait are ok + { + result_promise rp; + auto result = rp.get_result(); + shared_result sr(std::move(result)); + + const auto unblocking_time = high_resolution_clock::now() + milliseconds(50); + + std::thread thread([rp = std::move(rp), unblocking_time]() mutable { + std::this_thread::sleep_until(unblocking_time); + rp.set_from_function(result_factory::get); + }); + + for (size_t i = 0; i < 10; i++) { + sr.wait(); + } + + test_ready_result(std::move(sr)); + thread.join(); + } +} + +void concurrencpp::tests::test_shared_result_wait() { + test_shared_result_wait_impl(); + test_shared_result_wait_impl(); + test_shared_result_wait_impl(); + test_shared_result_wait_impl(); + test_shared_result_wait_impl(); +} + +template +void concurrencpp::tests::test_shared_result_wait_for_impl() { + // empty result throws + { + assert_throws_with_error_message( + [] { + shared_result().wait_for(seconds(1)); + }, + concurrencpp::details::consts::k_shared_result_wait_for_error_msg); + } + + // if the result is ready by value, don't block and return status::value + { + result_promise rp; + auto result = rp.get_result(); + shared_result sr(std::move(result)); + + rp.set_from_function(result_factory::get); + + const auto before = high_resolution_clock::now(); + const auto status = sr.wait_for(seconds(10)); + const auto after = high_resolution_clock::now(); + const auto time = duration_cast(after - before).count(); + + assert_smaller_equal(time, 20); + assert_equal(status, result_status::value); + test_ready_result(std::move(sr)); + } + + // if the result is ready by exception, don't block and return status::exception + { + result_promise rp; + auto result = rp.get_result(); + shared_result sr(std::move(result)); + + const size_t id = 123456789; + + rp.set_exception(std::make_exception_ptr(costume_exception(id))); + + const auto before = high_resolution_clock::now(); + const auto status = sr.wait_for(seconds(10)); + const auto after = high_resolution_clock::now(); + const auto time = duration_cast(after - before).count(); + + assert_smaller_equal(time, 20); + assert_equal(status, result_status::exception); + test_ready_result_costume_exception(std::move(sr), id); + } + + // if timeout reaches and no value/exception - return status::idle + { + result_promise rp; + auto result = rp.get_result(); + shared_result sr(std::move(result)); + + const auto waiting_time = milliseconds(50); + const auto before = high_resolution_clock::now(); + const auto status = sr.wait_for(waiting_time); + const auto after = high_resolution_clock::now(); + const auto time = duration_cast(after - before); + + assert_equal(status, result_status::idle); + assert_bigger_equal(time, waiting_time); + } + + // if result is set before timeout, unblock, and return status::value + { + result_promise rp; + auto result = rp.get_result(); + shared_result sr(std::move(result)); + + const auto unblocking_time = high_resolution_clock::now() + milliseconds(150); + + std::thread thread([rp = std::move(rp), unblocking_time]() mutable { + std::this_thread::sleep_until(unblocking_time); + rp.set_from_function(result_factory::get); + }); + + sr.wait_for(seconds(10)); + const auto now = high_resolution_clock::now(); + + test_ready_result(std::move(sr)); + assert_bigger_equal(now, unblocking_time); + assert_smaller(now, unblocking_time + seconds(1)); + thread.join(); + } + + // if exception is set before timeout, unblock, and return status::exception + { + result_promise rp; + auto result = rp.get_result(); + shared_result sr(std::move(result)); + const auto id = 123456789; + const auto unblocking_time = high_resolution_clock::now() + milliseconds(150); + + std::thread thread([rp = std::move(rp), unblocking_time, id]() mutable { + std::this_thread::sleep_until(unblocking_time); + rp.set_exception(std::make_exception_ptr(costume_exception(id))); + }); + + sr.wait_for(seconds(10)); + const auto now = high_resolution_clock::now(); + + test_ready_result_costume_exception(std::move(sr), id); + assert_bigger_equal(now, unblocking_time); + assert_smaller(now, unblocking_time + seconds(1)); + thread.join(); + } + + // multiple calls of wait_for are ok + { + result_promise rp; + auto result = rp.get_result(); + shared_result sr(std::move(result)); + + const auto unblocking_time = high_resolution_clock::now() + milliseconds(150); + + std::thread thread([rp = std::move(rp), unblocking_time]() mutable { + std::this_thread::sleep_until(unblocking_time); + rp.set_from_function(result_factory::get); + }); + + for (size_t i = 0; i < 10; i++) { + sr.wait_for(milliseconds(10)); + } + + thread.join(); + } +} + +void concurrencpp::tests::test_shared_result_wait_for() { + test_shared_result_wait_for_impl(); + test_shared_result_wait_for_impl(); + test_shared_result_wait_for_impl(); + test_shared_result_wait_for_impl(); + test_shared_result_wait_for_impl(); +} + +template +void concurrencpp::tests::test_shared_result_wait_until_impl() { + // empty result throws + { + assert_throws_with_error_message( + [] { + const auto later = high_resolution_clock::now() + seconds(10); + shared_result().wait_until(later); + }, + concurrencpp::details::consts::k_shared_result_wait_until_error_msg); + } + + // if time_point <= now, the function is equivalent to result::status + { + result_promise rp_idle, rp_val, rp_err; + result idle_result = rp_idle.get_result(), value_result = rp_val.get_result(), err_result = rp_err.get_result(); + shared_result shared_idle_result(std::move(idle_result)), shared_value_result(std::move(value_result)), shared_err_result(std::move(err_result)); + + rp_val.set_from_function(result_factory::get); + rp_err.set_from_function(result_factory::throw_ex); + + const auto now = high_resolution_clock::now(); + std::this_thread::sleep_for(std::chrono::milliseconds(5)); + + assert_equal(shared_idle_result.wait_until(now), concurrencpp::result_status::idle); + assert_equal(shared_value_result.wait_until(now), concurrencpp::result_status::value); + assert_equal(shared_err_result.wait_until(now), concurrencpp::result_status::exception); + } + + // if the result is ready by value, don't block and return status::value + { + result_promise rp; + auto result = rp.get_result(); + shared_result sr(std::move(result)); + + rp.set_from_function(result_factory::get); + + const auto later = high_resolution_clock::now() + seconds(10); + + const auto before = high_resolution_clock::now(); + const auto status = sr.wait_until(later); + const auto after = high_resolution_clock::now(); + + const auto ms = duration_cast(after - before).count(); + + assert_smaller_equal(ms, 20); + assert_equal(status, result_status::value); + test_ready_result(std::move(sr)); + } + + // if the result is ready by exception, don't block and return status::exception + { + result_promise rp; + auto result = rp.get_result(); + shared_result sr(std::move(result)); + const size_t id = 123456789; + + rp.set_exception(std::make_exception_ptr(costume_exception(id))); + + const auto later = high_resolution_clock::now() + seconds(10); + + const auto before = high_resolution_clock::now(); + const auto status = sr.wait_until(later); + const auto after = high_resolution_clock::now(); + + const auto time = duration_cast(after - before).count(); + + assert_smaller_equal(time, 20); + assert_equal(status, result_status::exception); + test_ready_result_costume_exception(std::move(sr), id); + } + + // if timeout reaches and no value/exception - return status::idle + { + result_promise rp; + auto result = rp.get_result(); + shared_result sr(std::move(result)); + + const auto later = high_resolution_clock::now() + milliseconds(150); + const auto status = sr.wait_until(later); + const auto now = high_resolution_clock::now(); + + const auto ms = duration_cast(now - later).count(); + assert_equal(status, result_status::idle); + assert_bigger_equal(now, later); + } + + // if result is set before timeout, unblock, and return status::value + { + result_promise rp; + auto result = rp.get_result(); + shared_result sr(std::move(result)); + + const auto unblocking_time = high_resolution_clock::now() + milliseconds(150); + const auto later = high_resolution_clock::now() + seconds(10); + + std::thread thread([rp = std::move(rp), unblocking_time]() mutable { + std::this_thread::sleep_until(unblocking_time); + rp.set_from_function(result_factory::get); + }); + + sr.wait_until(later); + const auto now = high_resolution_clock::now(); + + test_ready_result(std::move(sr)); + assert_bigger_equal(now, unblocking_time); + assert_smaller(now, unblocking_time + seconds(1)); + thread.join(); + } + + // if exception is set before timeout, unblock, and return status::exception + { + result_promise rp; + auto result = rp.get_result(); + shared_result sr(std::move(result)); + const auto id = 123456789; + + const auto unblocking_time = high_resolution_clock::now() + milliseconds(150); + const auto later = high_resolution_clock::now() + seconds(10); + + std::thread thread([rp = std::move(rp), unblocking_time, id]() mutable { + std::this_thread::sleep_until(unblocking_time); + rp.set_exception(std::make_exception_ptr(costume_exception(id))); + }); + + sr.wait_until(later); + const auto now = high_resolution_clock::now(); + + test_ready_result_costume_exception(std::move(sr), id); + assert_bigger_equal(now, unblocking_time); + assert_smaller_equal(now, unblocking_time + seconds(1)); + thread.join(); + } + + // multiple calls to wait_until are ok + { + result_promise rp; + auto result = rp.get_result(); + shared_result sr(std::move(result)); + + const auto unblocking_time = high_resolution_clock::now() + milliseconds(150); + + std::thread thread([rp = std::move(rp), unblocking_time]() mutable { + std::this_thread::sleep_until(unblocking_time); + rp.set_from_function(result_factory::get); + }); + + for (size_t i = 0; i < 10; i++) { + const auto later = high_resolution_clock::now() + milliseconds(50); + sr.wait_until(later); + } + + thread.join(); + } +} + +void concurrencpp::tests::test_shared_result_wait_until() { + test_shared_result_wait_until_impl(); + test_shared_result_wait_until_impl(); + test_shared_result_wait_until_impl(); + test_shared_result_wait_until_impl(); + test_shared_result_wait_until_impl(); +} + +template +void concurrencpp::tests::test_shared_result_assignment_operator_empty_to_empty_move() { + shared_result result_0, result_1; + result_0 = std::move(result_1); + assert_false(static_cast(result_0)); + assert_false(static_cast(result_1)); +} + +template +void concurrencpp::tests::test_shared_result_assignment_operator_non_empty_to_non_empty_move() { + result_promise rp_0, rp_1; + result result_0 = rp_0.get_result(), result_1 = rp_1.get_result(); + shared_result sr0(std::move(result_0)), sr1(std::move(result_1)); + + sr0 = std::move(sr1); + + assert_false(static_cast(sr1)); + assert_true(static_cast(sr0)); + + rp_0.set_from_function(result_factory::get); + assert_equal(sr0.status(), result_status::idle); + + rp_1.set_from_function(result_factory::get); + test_ready_result(std::move(sr0)); +} + +template +void concurrencpp::tests::test_shared_result_assignment_operator_empty_to_non_empty_move() { + result_promise rp_0; + result result_0 = rp_0.get_result(), result_1; + shared_result sr0(std::move(result_0)), sr1(std::move(result_1)); + + sr0 = std::move(sr1); + assert_false(static_cast(sr0)); + assert_false(static_cast(sr1)); +} + +template +void concurrencpp::tests::test_shared_result_assignment_operator_non_empty_to_empty_move() { + result_promise rp_1; + result result_0, result_1 = rp_1.get_result(); + shared_result sr0(std::move(result_0)), sr1(std::move(result_1)); + + sr0 = std::move(sr1); + assert_true(static_cast(sr0)); + assert_false(static_cast(sr1)); + + rp_1.set_from_function(result_factory::get); + test_ready_result(std::move(sr0)); +} + +template +void concurrencpp::tests::test_shared_result_assignment_operator_assign_to_self_move() { + shared_result empty; + + empty = std::move(empty); + assert_false(static_cast(empty)); + + result_promise rp_1; + auto res1 = rp_1.get_result(); + shared_result non_empty(std::move(res1)); + + non_empty = std::move(non_empty); + assert_true(static_cast(non_empty)); + + auto copy = non_empty; + copy = std::move(non_empty); + assert_true(static_cast(copy)); + assert_true(static_cast(non_empty)); +} + +template +void concurrencpp::tests::test_shared_result_assignment_operator_empty_to_empty_copy() { + shared_result result_0, result_1; + result_0 = result_1; + assert_false(static_cast(result_0)); + assert_false(static_cast(result_1)); +} + +template +void concurrencpp::tests::test_shared_result_assignment_operator_non_empty_to_non_empty_copy() { + result_promise rp_0, rp_1; + result result_0 = rp_0.get_result(), result_1 = rp_1.get_result(); + shared_result sr0(std::move(result_0)), sr1(std::move(result_1)); + + sr0 = sr1; + + assert_true(static_cast(sr1)); + assert_true(static_cast(sr0)); + + rp_0.set_from_function(result_factory::get); + assert_equal(sr0.status(), result_status::idle); + + rp_1.set_from_function(result_factory::get); + test_ready_result(std::move(sr0)); +} + +template +void concurrencpp::tests::test_shared_result_assignment_operator_empty_to_non_empty_copy() { + result_promise rp_0; + result result_0 = rp_0.get_result(), result_1; + shared_result sr0(std::move(result_0)), sr1(std::move(result_1)); + + sr0 = sr1; + assert_false(static_cast(sr0)); + assert_false(static_cast(sr1)); +} + +template +void concurrencpp::tests::test_shared_result_assignment_operator_non_empty_to_empty_copy() { + result_promise rp_1; + result result_0, result_1 = rp_1.get_result(); + shared_result sr0(std::move(result_0)), sr1(std::move(result_1)); + + sr0 = sr1; + assert_true(static_cast(sr0)); + assert_true(static_cast(sr1)); + + rp_1.set_from_function(result_factory::get); + test_ready_result(std::move(sr0)); +} + +template +void concurrencpp::tests::test_shared_result_assignment_operator_assign_to_self_copy() { + shared_result empty; + + empty = empty; + assert_false(static_cast(empty)); + + result_promise rp_1; + auto res1 = rp_1.get_result(); + shared_result non_empty(std::move(res1)); + + non_empty = non_empty; + assert_true(static_cast(non_empty)); + + auto copy = non_empty; + copy = non_empty; + assert_true(static_cast(copy)); + assert_true(static_cast(non_empty)); +} + +template +void concurrencpp::tests::test_shared_result_assignment_operator_impl() { + test_shared_result_assignment_operator_empty_to_empty_move(); + test_shared_result_assignment_operator_non_empty_to_empty_move(); + test_shared_result_assignment_operator_empty_to_non_empty_move(); + test_shared_result_assignment_operator_non_empty_to_non_empty_move(); + test_shared_result_assignment_operator_assign_to_self_move(); + + test_shared_result_assignment_operator_empty_to_empty_copy(); + test_shared_result_assignment_operator_non_empty_to_empty_copy(); + test_shared_result_assignment_operator_empty_to_non_empty_copy(); + test_shared_result_assignment_operator_non_empty_to_non_empty_copy(); + test_shared_result_assignment_operator_assign_to_self_copy(); +} + +void concurrencpp::tests::test_shared_result_assignment_operator() { + test_shared_result_assignment_operator_impl(); + test_shared_result_assignment_operator_impl(); + test_shared_result_assignment_operator_impl(); + test_shared_result_assignment_operator_impl(); + test_shared_result_assignment_operator_impl(); +} + +void concurrencpp::tests::test_shared_result() { + tester tester("shared_result test"); + + tester.add_step("constructor", test_shared_result_constructor); + tester.add_step("status", test_shared_result_status); + tester.add_step("get", test_shared_result_get); + tester.add_step("wait", test_shared_result_wait); + tester.add_step("wait_for", test_shared_result_wait_for); + tester.add_step("wait_until", test_shared_result_wait_until); + tester.add_step("operator =", test_shared_result_assignment_operator); + + tester.launch_test(); +} diff --git a/test/source/thread_sanitizer/executors.cpp b/test/source/thread_sanitizer/executors.cpp index ffee63bf..e99ea6bd 100644 --- a/test/source/thread_sanitizer/executors.cpp +++ b/test/source/thread_sanitizer/executors.cpp @@ -1,174 +1,588 @@ #include "concurrencpp/concurrencpp.h" +#include "tests/test_utils/executor_shutdowner.h" +#include +#include #include -void test_worker_thread_executor(); -void test_thread_pool_executor(); -void test_thread_executor(); -void test_manual_executor(); +void test_executor_post(std::shared_ptr executor, size_t tasks_per_thread = 100'000); +void test_executor_submit(std::shared_ptr executor, size_t tasks_per_thread = 100'000); +void test_executor_bulk_post(std::shared_ptr executor, size_t tasks_per_thread = 100'000); +void test_executor_bulk_submit(std::shared_ptr executor, size_t tasks_per_thread = 100'000); + +void test_manual_executor_wait_for_task(std::shared_ptr executor); +void test_manual_executor_wait_for_task_for(std::shared_ptr executor); +void test_manual_executor_wait_for_tasks(std::shared_ptr executor); +void test_manual_executor_wait_for_tasks_for(std::shared_ptr executor); +void test_manual_executor_loop_once(std::shared_ptr executor); +void test_manual_executor_loop_once_for(std::shared_ptr executor); +void test_manual_executor_loop(std::shared_ptr executor); +void test_manual_executor_loop_for(std::shared_ptr executor); +void test_manual_executor_clear(std::shared_ptr executor); int main() { - std::cout << "concurrencpp::worker_thread_executor" << std::endl; - test_worker_thread_executor(); - std::cout << "====================================" << std::endl; - std::cout << "concurrencpp::thread_pool_executor" << std::endl; - test_thread_pool_executor(); - std::cout << "====================================" << std::endl; + concurrencpp::runtime runtime; + + { + std::cout << "\nStarting concurrencpp::inline_executor test.\n" << std::endl; + + auto ie = runtime.inline_executor(); + + test_executor_post(ie); + test_executor_submit(ie); + test_executor_bulk_post(ie); + test_executor_bulk_submit(ie); + } + + { + std::cout << "\nStarting concurrencpp::thread_pool_executor test.\n" << std::endl; + + auto tpe = runtime.thread_pool_executor(); + + test_executor_post(tpe); + test_executor_submit(tpe); + test_executor_bulk_post(tpe); + test_executor_bulk_submit(tpe); + } + + { + std::cout << "\nStarting concurrencpp::worker_thread_executor test.\n" << std::endl; + + test_executor_post(runtime.make_worker_thread_executor()); + test_executor_submit(runtime.make_worker_thread_executor()); + test_executor_bulk_post(runtime.make_worker_thread_executor()); + test_executor_bulk_submit(runtime.make_worker_thread_executor()); + } + + { + std::cout << "\nStarting concurrencpp::thread_executor test.\n" << std::endl; - std::cout << "concurrencpp::thread_executor" << std::endl; - test_thread_executor(); - std::cout << "====================================" << std::endl; + auto te = runtime.thread_executor(); - std::cout << "concurrencpp::manual_executor" << std::endl; - test_manual_executor(); - std::cout << "====================================" << std::endl; + test_executor_post(te, 216); + test_executor_submit(te, 216); + test_executor_bulk_post(te, 216); + test_executor_bulk_submit(te, 216); + } + + { + std::cout << "\nStarting concurrencpp::manual_executor test.\n" << std::endl; + + test_executor_post(runtime.make_manual_executor()); + test_executor_submit(runtime.make_manual_executor()); + test_executor_bulk_post(runtime.make_manual_executor()); + test_executor_bulk_submit(runtime.make_manual_executor()); + test_manual_executor_wait_for_task(runtime.make_manual_executor()); + test_manual_executor_wait_for_task_for(runtime.make_manual_executor()); + test_manual_executor_wait_for_tasks(runtime.make_manual_executor()); + test_manual_executor_wait_for_tasks_for(runtime.make_manual_executor()); + test_manual_executor_loop_once(runtime.make_manual_executor()); + test_manual_executor_loop_once_for(runtime.make_manual_executor()); + test_manual_executor_loop(runtime.make_manual_executor()); + test_manual_executor_loop_for(runtime.make_manual_executor()); + test_manual_executor_clear(runtime.make_manual_executor()); + } } using namespace concurrencpp; +using namespace std::chrono; + +/* + * When we test manual_executor, we need to inject to the test a group of threads that act + * as the consumers, otherwise no-one will execute the test tasks. + */ +std::vector maybe_inject_consumer_threads(std::shared_ptr executor, + time_point production_tp, + const size_t num_of_threads, + const size_t tasks_per_thread) { + std::vector maybe_consumer_threads; + const auto maybe_manual_executor = std::dynamic_pointer_cast(executor); + + if (!static_cast(maybe_manual_executor)) { + return maybe_consumer_threads; + } + + maybe_consumer_threads.reserve(num_of_threads); + + for (size_t i = 0; i < num_of_threads; i++) { + maybe_consumer_threads.emplace_back([=] { + std::this_thread::sleep_until(production_tp - milliseconds(1)); + maybe_manual_executor->loop_for(tasks_per_thread, minutes(10)); + }); + } + + return maybe_consumer_threads; +} + +void test_executor_post(std::shared_ptr executor, size_t tasks_per_thread) { + std::cout << executor->name << "::post" << std::endl; + + const auto post_tp = system_clock::now() + milliseconds(200); + const auto num_of_threads = std::thread::hardware_concurrency() * 4; + const auto total_task_count = tasks_per_thread * num_of_threads; + + std::latch latch(total_task_count); -void worker_thread_task(std::shared_ptr (&executors)[16], - std::atomic_size_t& counter, - std::shared_ptr wc) { - const auto c = counter.fetch_add(1, std::memory_order_relaxed); + std::vector poster_threads; + poster_threads.resize(num_of_threads); - if (c >= 10'000'000) { - if (c == 10'000'000) { - wc->notify(); - } + for (auto& thread : poster_threads) { + thread = std::thread([=, &latch] { + std::this_thread::sleep_until(post_tp); - return; + for (size_t i = 0; i < tasks_per_thread; i++) { + executor->post([&latch]() mutable { + latch.count_down(); + }); + } + }); } - const auto worker_pos = ::rand() % std::size(executors); - auto& executor = executors[worker_pos]; + auto maybe_consumer_threads = maybe_inject_consumer_threads(executor, post_tp, num_of_threads, tasks_per_thread); - try { - executor->post(worker_thread_task, std::ref(executors), std::ref(counter), wc); - } catch (const concurrencpp::errors::executor_shutdown&) { - return; + latch.wait(); + + for (auto& thread : poster_threads) { + thread.join(); + } + + for (auto& thread : maybe_consumer_threads) { + thread.join(); } + + std::cout << "===================================" << std::endl; } -void test_worker_thread_executor() { - concurrencpp::runtime runtime; +void test_executor_submit(std::shared_ptr executor, size_t tasks_per_thread) { + std::cout << executor->name << "::submit" << std::endl; + + const auto submit_tp = system_clock::now() + milliseconds(200); + const auto num_of_threads = std::thread::hardware_concurrency() * 4; - std::srand(::time(nullptr)); - std::shared_ptr executors[16]; - std::atomic_size_t counter = 0; - auto wc = std::make_shared(); + std::vector submitter_threads; + submitter_threads.resize(num_of_threads); - for (auto& executor : executors) { - executor = runtime.make_worker_thread_executor(); + for (auto& thread : submitter_threads) { + thread = std::thread([=]() mutable { + std::vector> results; + results.reserve(tasks_per_thread); + + std::this_thread::sleep_until(submit_tp); + + for (size_t i = 0; i < tasks_per_thread; i++) { + results.emplace_back(executor->submit([i] { + return i; + })); + } + + for (size_t i = 0; i < tasks_per_thread; i++) { + const auto val = results[i].get(); + if (val != i) { + std::cerr << "submit test failed, submitted " << i << " got " << val << std::endl; + std::abort(); + } + } + }); + } + + auto maybe_consumer_threads = maybe_inject_consumer_threads(executor, submit_tp, num_of_threads, tasks_per_thread); + + for (auto& thread : submitter_threads) { + thread.join(); } - for (size_t i = 0; i < 16; i++) { - executors[i]->post(worker_thread_task, std::ref(executors), std::ref(counter), wc); + for (auto& thread : maybe_consumer_threads) { + thread.join(); } - wc->wait(); + std::cout << "===================================" << std::endl; } -void thread_pool_task(std::shared_ptr tpe, std::atomic_size_t& counter, std::shared_ptr wc) { - const auto c = counter.fetch_add(1, std::memory_order_relaxed); +void test_executor_bulk_post(std::shared_ptr executor, size_t tasks_per_thread) { + std::cout << executor->name << "::bulk_post" << std::endl; + + const auto post_tp = system_clock::now() + milliseconds(250); + const auto num_of_threads = std::thread::hardware_concurrency() * 4; + const auto total_task_count = tasks_per_thread * num_of_threads; + + std::latch latch(total_task_count); + + std::vector poster_threads; + poster_threads.resize(num_of_threads); + + for (auto& thread : poster_threads) { + thread = std::thread([=, &latch] { + auto task = [&latch]() mutable { + latch.count_down(); + }; - if (c >= 10'000'000) { - if (c == 10'000'000) { - wc->notify(); - } + std::vector tasks; + tasks.reserve(tasks_per_thread); - return; + for (size_t i = 0; i < tasks_per_thread; i++) { + tasks.emplace_back(task); + } + + std::this_thread::sleep_until(post_tp); + + executor->bulk_post(tasks); + }); } - try { - tpe->post(thread_pool_task, tpe, std::ref(counter), wc); - } catch (const concurrencpp::errors::executor_shutdown&) { - return; + auto maybe_consumer_threads = maybe_inject_consumer_threads(executor, post_tp, num_of_threads, tasks_per_thread); + + latch.wait(); + + for (auto& thread : poster_threads) { + thread.join(); } + + for (auto& thread : maybe_consumer_threads) { + thread.join(); + } + + std::cout << "===================================" << std::endl; } -void test_thread_pool_executor() { - concurrencpp::runtime runtime; - auto tpe = runtime.thread_pool_executor(); - std::atomic_size_t counter = 0; - auto wc = std::make_shared(); - const auto max_concurrency_level = tpe->max_concurrency_level(); +struct val_returner { + const size_t val; + + val_returner(size_t val) noexcept : val(val) {} + val_returner(const val_returner&) noexcept = default; + + size_t operator()() const noexcept { + return val; + } +}; + +void test_executor_bulk_submit(std::shared_ptr executor, size_t tasks_per_thread) { + std::cout << executor->name << "::bulk_submit" << std::endl; + + const auto submit_tp = system_clock::now() + milliseconds(250); + const auto num_of_threads = std::thread::hardware_concurrency() * 4; + + std::vector submitter_threads; + submitter_threads.resize(num_of_threads); + + for (auto& thread : submitter_threads) { + thread = std::thread([=]() mutable { + std::vector tasks; + tasks.reserve(tasks_per_thread); + + for (size_t i = 0; i < tasks_per_thread; i++) { + tasks.emplace_back(i); + } + + std::this_thread::sleep_until(submit_tp); + + auto results = executor->bulk_submit(tasks); + + for (size_t i = 0; i < tasks_per_thread; i++) { + const auto val = results[i].get(); + if (val != i) { + std::cerr << "bulk_submit test failed, submitted " << i << " got " << val << std::endl; + std::abort(); + } + } + }); + } - for (size_t i = 0; i < max_concurrency_level; i++) { - tpe->post(thread_pool_task, tpe, std::ref(counter), wc); + auto maybe_consumer_threads = maybe_inject_consumer_threads(executor, submit_tp, num_of_threads, tasks_per_thread); + + for (auto& thread : submitter_threads) { + thread.join(); + } + + for (auto& thread : maybe_consumer_threads) { + thread.join(); } - wc->wait(); + std::cout << "===================================" << std::endl; } -void thread_task(std::shared_ptr tp, std::atomic_size_t& counter, std::shared_ptr wc) { - const auto c = counter.fetch_add(1, std::memory_order_relaxed); - if (c >= 1'024 * 4) { - if (c == 1'024 * 4) { - wc->notify(); - } +void test_manual_executor_wait_for_task(std::shared_ptr executor) { + std::cout << "manual_executor::wait_for_task" << std::endl; - return; + const auto num_of_threads = std::thread::hardware_concurrency() * 10; + + std::vector waiter_threads; + waiter_threads.resize(num_of_threads); + + for (auto& thread : waiter_threads) { + thread = std::thread([executor]() mutable { + executor->wait_for_task(); + }); } - try { - tp->post(thread_task, tp, std::ref(counter), wc); - } catch (const concurrencpp::errors::executor_shutdown&) { - return; + std::this_thread::sleep_for(milliseconds(200)); + + executor->post([] { + }); + + for (auto& thread : waiter_threads) { + thread.join(); } + + std::cout << "===================================" << std::endl; } -void test_thread_executor() { - concurrencpp::runtime runtime; - auto tp = runtime.thread_executor(); - std::atomic_size_t counter = 0; - auto wc = std::make_shared(); +void test_manual_executor_wait_for_task_for(std::shared_ptr executor) { + std::cout << "manual_executor::wait_for_task_for" << std::endl; + + const auto num_of_threads = std::thread::hardware_concurrency() * 10; + + std::vector waiter_threads; + waiter_threads.resize(num_of_threads); + size_t time_to_sleep_counter = 1; + + for (auto& thread : waiter_threads) { + thread = std::thread([executor, time_to_sleep_counter]() mutable { + const auto time = milliseconds(time_to_sleep_counter * 10); + executor->wait_for_task_for(time); + }); - for (size_t i = 0; i < 4; i++) { - tp->post(thread_task, tp, std::ref(counter), wc); + ++time_to_sleep_counter; } - wc->wait(); + std::this_thread::sleep_for(milliseconds(time_to_sleep_counter * 5)); + + executor->post([] { + }); + + for (auto& thread : waiter_threads) { + thread.join(); + } + + std::cout << "===================================" << std::endl; } -void manual_executor_work_loop(std::shared_ptr (&executors)[16], std::atomic_size_t& counter, const size_t worker_index) { - try { - while (true) { - const auto c = counter.fetch_add(1, std::memory_order_relaxed); +void test_manual_executor_wait_for_tasks(std::shared_ptr executor) { + std::cout << "manual_executor::wait_for_tasks" << std::endl; - if (c >= 10'000'000) { - return; - } + const auto num_of_threads = std::thread::hardware_concurrency() * 10; - const auto worker_pos = ::rand() % std::size(executors); - auto& executor = executors[worker_pos]; - executor->post([] { - }); + std::vector waiter_threads; + waiter_threads.resize(num_of_threads); + size_t task_count_counter = 1; - executors[worker_index]->loop(16); - } - } catch (const concurrencpp::errors::executor_shutdown&) { - return; + for (auto& thread : waiter_threads) { + thread = std::thread([executor, task_count_counter]() mutable { + executor->wait_for_tasks(task_count_counter * 10); + }); + + ++task_count_counter; } + + std::this_thread::sleep_for(milliseconds(250)); + + for (size_t i = 0; i < task_count_counter * 10; i++) { + executor->post([] { + }); + } + + for (auto& thread : waiter_threads) { + thread.join(); + } + + std::cout << "===================================" << std::endl; } -void test_manual_executor() { - concurrencpp::runtime runtime; - std::atomic_size_t counter = 0; - std::shared_ptr executors[16]; - std::thread threads[16]; +void test_manual_executor_wait_for_tasks_for(std::shared_ptr executor) { + std::cout << "manual_executor::wait_for_tasks_for" << std::endl; + + const auto num_of_threads = std::thread::hardware_concurrency() * 10; + + std::vector waiter_threads; + waiter_threads.resize(num_of_threads); + size_t counter = 1; + + for (auto& thread : waiter_threads) { + thread = std::thread([executor, counter]() mutable { + const auto time = milliseconds(counter * 10); + executor->wait_for_tasks_for(counter * 10, time); + }); + + ++counter; + } + + std::this_thread::sleep_for(milliseconds(counter * 5)); + + for (size_t i = 0; i < counter * 10; i++) { + executor->post([] { + }); + } + + for (auto& thread : waiter_threads) { + thread.join(); + } + + std::cout << "===================================" << std::endl; +} + +void test_manual_executor_loop_once(std::shared_ptr executor) { + std::cout << "manual_executor::loop_once" << std::endl; + + const auto num_of_threads = std::thread::hardware_concurrency() * 10; + const auto looping_tp = system_clock::now() + milliseconds(250); + + std::vector looping_threads; + looping_threads.resize(num_of_threads); + + for (auto& thread : looping_threads) { + thread = std::thread([executor, looping_tp]() mutable { + std::this_thread::sleep_until(looping_tp); + executor->loop_once(); + }); + } + + std::this_thread::sleep_until(looping_tp); + + for (size_t i = 0; i < num_of_threads; i++) { + executor->post([] { + }); + } + + for (auto& thread : looping_threads) { + thread.join(); + } + + std::cout << "===================================" << std::endl; +} + +void test_manual_executor_loop_once_for(std::shared_ptr executor) { + std::cout << "manual_executor::loop_once_for" << std::endl; + + const auto num_of_threads = std::thread::hardware_concurrency() * 10; + + std::vector waiter_threads; + waiter_threads.resize(num_of_threads); + size_t time_to_sleep_counter = 1; + + for (auto& thread : waiter_threads) { + thread = std::thread([executor, time_to_sleep_counter]() mutable { + const auto time = milliseconds(time_to_sleep_counter * 10); + executor->loop_once_for(time); + }); + + ++time_to_sleep_counter; + } + + std::this_thread::sleep_for(milliseconds(time_to_sleep_counter * 5)); + + for (size_t i = 0; i < num_of_threads; i++) { + executor->post([] { + }); + } + + for (auto& thread : waiter_threads) { + thread.join(); + } + + std::cout << "===================================" << std::endl; +} + +void test_manual_executor_loop(std::shared_ptr executor) { + std::cout << "manual_executor::loop" << std::endl; + + const auto num_of_threads = std::thread::hardware_concurrency() * 10; + const auto looping_tp = system_clock::now() + milliseconds(250); + + std::vector waiter_threads; + waiter_threads.resize(num_of_threads); + size_t task_count_counter = 1; + + for (auto& thread : waiter_threads) { + thread = std::thread([executor, task_count_counter, looping_tp]() mutable { + std::this_thread::sleep_until(looping_tp); + executor->loop(task_count_counter * 10); + }); + + ++task_count_counter; + } + + std::this_thread::sleep_until(looping_tp); + + for (size_t i = 0; i < task_count_counter * 100; i++) { + executor->post([] { + }); + } + + for (auto& thread : waiter_threads) { + thread.join(); + } + + std::cout << "===================================" << std::endl; +} + +void test_manual_executor_loop_for(std::shared_ptr executor) { + std::cout << "manual_executor::loop_for" << std::endl; + + const auto num_of_threads = std::thread::hardware_concurrency() * 10; - for (auto& executor : executors) { - executor = runtime.make_manual_executor(); + std::vector waiter_threads; + waiter_threads.resize(num_of_threads); + size_t counter = 1; + + for (auto& thread : waiter_threads) { + thread = std::thread([executor, counter]() mutable { + const auto time = milliseconds(counter * 10); + executor->loop_for(counter * 10, time); + }); + + ++counter; } - for (size_t i = 0; i < std::size(executors); i++) { - threads[i] = std::thread([&, i] { - manual_executor_work_loop(executors, std::ref(counter), i); + std::this_thread::sleep_for(milliseconds(counter * 5)); + + for (size_t i = 0; i < counter * 100; i++) { + executor->post([] { + }); + } + + for (auto& thread : waiter_threads) { + thread.join(); + } + + std::cout << "===================================" << std::endl; +} + +void test_manual_executor_clear(std::shared_ptr executor) { + std::cout << "manual_executor::loop_for" << std::endl; + + const auto num_of_threads = std::thread::hardware_concurrency() * 10; + + std::vector threads; + threads.reserve(num_of_threads); + + const auto clear_tp = high_resolution_clock::now() + milliseconds(250); + + for (size_t i = 0; i < num_of_threads / 2; i++) { + threads.emplace_back([executor, clear_tp]() mutable { + std::this_thread::sleep_until(clear_tp); + + for (size_t j = 0; j < 100; j++) { + executor->clear(); + } + }); + } + + for (size_t i = 0; i < num_of_threads / 2; i++) { + threads.emplace_back([executor, clear_tp]() mutable { + std::this_thread::sleep_until(clear_tp - milliseconds(1)); + + for (size_t i = 0; i < 10'000; i++) { + executor->post([] { + }); + + std::this_thread::yield(); + } }); } for (auto& thread : threads) { thread.join(); } + + std::cout << "===================================" << std::endl; } diff --git a/test/source/thread_sanitizer/fibbonacci.cpp b/test/source/thread_sanitizer/fibonacci.cpp similarity index 54% rename from test/source/thread_sanitizer/fibbonacci.cpp rename to test/source/thread_sanitizer/fibonacci.cpp index 0e757f47..3aaeaffe 100644 --- a/test/source/thread_sanitizer/fibbonacci.cpp +++ b/test/source/thread_sanitizer/fibonacci.cpp @@ -3,32 +3,29 @@ #include concurrencpp::result fibbonacci(concurrencpp::executor_tag, std::shared_ptr tpe, int curr); -int fibbonacci_sync(int i); +int fibbonacci_sync(int i) noexcept; int main() { + std::cout << "Starting parallel Fibonacci test" << std::endl; + + const auto fibb_sync = fibbonacci_sync(32); + concurrencpp::runtime_options opts; - opts.max_cpu_threads = 24; + opts.max_cpu_threads = std::thread::hardware_concurrency() * 8; concurrencpp::runtime runtime(opts); - auto fibb = fibbonacci(concurrencpp::executor_tag {}, runtime.thread_pool_executor(), 32).get(); - auto fibb_sync = fibbonacci_sync(32); + const auto fibb = fibbonacci({}, runtime.thread_pool_executor(), 32).get(); if (fibb != fibb_sync) { - std::cerr << "fibonnacci test failed. expected " << fibb_sync << " got " << fibb << std::endl; + std::cerr << "fibonacci test failed. expected " << fibb_sync << " got " << fibb << std::endl; std::abort(); } - std::cout << fibb << std::endl; + std::cout << "fibonacci(32) = " << fibb << std::endl; + std::cout << "================================" << std::endl; } using namespace concurrencpp; -result fibbonacci_split(std::shared_ptr tpe, const int curr) { - auto fib_1 = fibbonacci(executor_tag {}, tpe, curr - 1); - auto fib_2 = fibbonacci(executor_tag {}, tpe, curr - 2); - - co_return co_await fib_1 + co_await fib_2; -} - result fibbonacci(executor_tag, std::shared_ptr tpe, const int curr) { if (curr == 1) { co_return 1; @@ -38,10 +35,13 @@ result fibbonacci(executor_tag, std::shared_ptr tpe, co_return 0; } - co_return co_await fibbonacci_split(tpe, curr); + auto fib_1 = fibbonacci({}, tpe, curr - 1); + auto fib_2 = fibbonacci({}, tpe, curr - 2); + + co_return co_await fib_1 + co_await fib_2; } -int fibbonacci_sync(int i) { +int fibbonacci_sync(int i) noexcept { if (i == 0) { return 0; } diff --git a/test/source/thread_sanitizer/matrix_multiplication.cpp b/test/source/thread_sanitizer/matrix_multiplication.cpp index 4aa6ba68..dac3b26e 100644 --- a/test/source/thread_sanitizer/matrix_multiplication.cpp +++ b/test/source/thread_sanitizer/matrix_multiplication.cpp @@ -1,15 +1,19 @@ #include "concurrencpp/concurrencpp.h" -#include #include #include +#include #include concurrencpp::result test_matrix_multiplication(std::shared_ptr ex); int main() { + std::cout << "Testing parallel matrix multiplication" << std::endl; + concurrencpp::runtime runtime; test_matrix_multiplication(runtime.thread_pool_executor()).get(); + + std::cout << "================================" << std::endl; } using namespace concurrencpp; @@ -18,7 +22,7 @@ using matrix = std::array, 1024>; std::unique_ptr make_matrix() { std::default_random_engine generator; - std::uniform_real_distribution distribution(-1000.0, 1000.0); + std::uniform_real_distribution distribution(-5000.0, 5000.0); auto mtx_ptr = std::make_unique(); auto& mtx = *mtx_ptr; @@ -35,29 +39,26 @@ void test_matrix(const matrix& mtx0, const matrix& mtx1, const matrix& mtx2) { for (size_t i = 0; i < 1024; i++) { for (size_t j = 0; j < 1024; j++) { - double res = 0.0; + auto res = 0.0; for (size_t k = 0; k < 1024; k++) { res += mtx0[i][k] * mtx1[k][j]; } if (mtx2[i][j] != res) { - std::cerr << "matrix multiplication test failed. expected " << res << " got " << mtx2[i][j] << "at matix position[" << i << "," << j << std::endl; + std::cerr << "matrix multiplication test failed. expected " << res << " got: " << mtx2[i][j] << "at matrix position[" << i << "," << j << std::endl; } } } } -result -do_multiply(executor_tag, std::shared_ptr executor, const matrix& mtx0, const matrix& mtx1, matrix& mtx2, size_t line, size_t col) { - - double res = 0.0; +result do_multiply(executor_tag, std::shared_ptr executor, const matrix& mtx0, const matrix& mtx1, size_t line, size_t col) { + auto res = 0.0; for (size_t i = 0; i < 1024; i++) { res += mtx0[line][i] * mtx1[i][col]; } - mtx2[line][col] = res; - co_return; + co_return res; }; result test_matrix_multiplication(std::shared_ptr ex) { @@ -69,17 +70,19 @@ result test_matrix_multiplication(std::shared_ptr ex matrix& mtx1 = *mtx1_ptr; matrix& mtx2 = *mtx2_ptr; - std::vector> results; + std::vector> results; results.reserve(1024 * 1024); for (size_t i = 0; i < 1024; i++) { for (size_t j = 0; j < 1024; j++) { - results.emplace_back(do_multiply({}, ex, mtx0, mtx1, mtx2, i, j)); + results.emplace_back(do_multiply({}, ex, mtx0, mtx1, i, j)); } } - for (auto& result : results) { - co_await result; + for (size_t i = 0; i < 1024; i++) { + for (size_t j = 0; j < 1024; j++) { + mtx2[i][j] = co_await results[i * 1024 + j]; + } } test_matrix(mtx0, mtx1, mtx2); diff --git a/test/source/thread_sanitizer/quick_sort.cpp b/test/source/thread_sanitizer/quick_sort.cpp index 458736bb..0630762a 100644 --- a/test/source/thread_sanitizer/quick_sort.cpp +++ b/test/source/thread_sanitizer/quick_sort.cpp @@ -6,23 +6,28 @@ concurrencpp::result quick_sort(concurrencpp::executor_tag, std::shared_ptr tp, int* a, int lo, int hi); int main() { + std::cout << "Starting parallel quick sort test" << std::endl; + concurrencpp::runtime_options opts; - opts.max_cpu_threads = 24; + opts.max_cpu_threads = std::thread::hardware_concurrency() * 8; concurrencpp::runtime runtime(opts); ::srand(::time(nullptr)); - std::vector array(8 * 1'000'000); + std::vector array(8'000'000); for (auto& i : array) { - i = rand() % 10 * 10'000; + i = ::rand() % 100'000; } quick_sort({}, runtime.thread_pool_executor(), array.data(), 0, array.size() - 1).get(); const auto is_sorted = std::is_sorted(array.begin(), array.end()); if (!is_sorted) { - std::cerr << "array is not sorted." << std::endl; + std::cerr << "Quick sort test failed: array is not sorted." << std::endl; + std::abort(); } + + std::cout << "================================" << std::endl; } using namespace concurrencpp; @@ -81,8 +86,8 @@ result quick_sort(executor_tag, std::shared_ptr tp, } const auto p = partition(a, lo, hi); - auto res0 = quick_sort(executor_tag {}, tp, a, lo, p); - auto res1 = quick_sort(executor_tag {}, tp, a, p + 1, hi); + auto res0 = quick_sort({}, tp, a, lo, p); + auto res1 = quick_sort({}, tp, a, p + 1, hi); co_await res0; co_await res1; diff --git a/test/source/thread_sanitizer/result.cpp b/test/source/thread_sanitizer/result.cpp index 8b734594..89e28dcd 100644 --- a/test/source/thread_sanitizer/result.cpp +++ b/test/source/thread_sanitizer/result.cpp @@ -2,145 +2,226 @@ #include -void result_get(std::shared_ptr tp); -void result_wait(std::shared_ptr tp); -void result_wait_for(std::shared_ptr tp); -concurrencpp::result result_await(std::shared_ptr tp); -concurrencpp::result result_await_via(std::shared_ptr tp); +void test_result_get(std::shared_ptr te); +void test_result_wait(std::shared_ptr te); +void test_result_wait_for(std::shared_ptr te); +void test_result_await(std::shared_ptr te); +void test_result_await_via(std::shared_ptr te); +void test_result_resolve(std::shared_ptr te); +void test_result_resolve_via(std::shared_ptr te); int main() { - concurrencpp::runtime_options opts; - opts.max_cpu_threads = 24; - concurrencpp::runtime runtime(opts); + std::cout << "Starting concurrencpp::result test" << std::endl; + + concurrencpp::runtime runtime; + const auto thread_executor = runtime.thread_executor(); + + test_result_get(thread_executor); + test_result_wait(thread_executor); + test_result_wait_for(thread_executor); + test_result_await(thread_executor); + test_result_await_via(thread_executor); + test_result_resolve(thread_executor); + test_result_resolve_via(thread_executor); +} - std::cout << "result::get" << std::endl; - result_get(runtime.thread_pool_executor()); - std::cout << "================================" << std::endl; +#include "tests/test_utils/make_result_array.h" - std::cout << "result::wait" << std::endl; - result_wait(runtime.thread_pool_executor()); - std::cout << "================================" << std::endl; - - std::cout << "result::wait_for" << std::endl; - result_wait_for(runtime.thread_pool_executor()); - std::cout << "================================" << std::endl; +using namespace concurrencpp; +using namespace std::chrono; - std::cout << "result::await" << std::endl; - result_await(runtime.thread_pool_executor()).get(); - std::cout << "================================" << std::endl; +namespace concurrencpp::tests { + template + void test_result_method_val(std::shared_ptr te, method_functor&& tested_method, converter_type converter) { + const auto tp = system_clock::now() + seconds(2); + auto results = make_result_array(1024, tp, te, converter); - std::cout << "result::await_via" << std::endl; - result_await_via(runtime.thread_pool_executor()).get(); - std::cout << "================================" << std::endl; -} + std::this_thread::sleep_until(tp); -using namespace concurrencpp; + test_result_array(std::move(results), tested_method, converter); + } -void result_get(std::shared_ptr tp) { - const size_t task_count = 8'000'000; + template + void test_result_method_ex(std::shared_ptr te, method_functor&& tested_method, converter_type converter) { + const auto tp = system_clock::now() + seconds(2); + auto results = make_exceptional_array(1024, tp, te, converter); - std::vector> results; - results.reserve(task_count); + std::this_thread::sleep_until(tp); - for (size_t i = 0; i < task_count; i++) { - results.emplace_back(tp->submit([i] { - return int(i); - })); + test_exceptional_array(std::move(results), tested_method); } +} // namespace concurrencpp::tests - for (size_t i = 0; i < task_count; i++) { - auto res = results[i].get(); - if (res != i) { - std::cerr << "submit + get, expected " << i << " and got " << res << std::endl; - std::abort(); +namespace concurrencpp::tests { + struct get_method { + template + result operator()(result res) { + co_return res.get(); } - } -} + }; -void result_wait(std::shared_ptr tp) { - const size_t task_count = 8'000'000; + struct wait_method { + template + result operator()(result res) { + res.wait(); + return std::move(res); + } + }; - std::vector> results; - results.reserve(task_count); + struct wait_for_method { + template + result operator()(result res) { + while (res.wait_for(milliseconds(5)) == result_status::idle) { + // do nothing. + } - for (size_t i = 0; i < task_count; i++) { - results.emplace_back(tp->submit([i] { - return int(i); - })); - } + return std::move(res); + } + }; + + class await_method { - for (size_t i = 0; i < task_count; i++) { - results[i].wait(); - auto res = results[i].get(); - if (res != i) { - std::cerr << "submit + get, expected " << i << " and got " << res << std::endl; - std::abort(); + private: + template + result await_task(result res) { + co_return co_await res; } - } -} -void result_wait_for(std::shared_ptr tp) { - const size_t task_count = 8'000'000; + public: + template + result operator()(result res) { + auto wrapper_res = await_task(std::move(res)); + wrapper_res.wait(); + return std::move(wrapper_res); + } + }; - std::vector> results; - results.reserve(task_count); + class await_via_method { - for (size_t i = 0; i < task_count; i++) { - results.emplace_back(tp->submit([i] { - return int(i); - })); - } + private: + std::shared_ptr m_executor; - for (size_t i = 0; i < task_count; i++) { - while (results[i].wait_for(std::chrono::milliseconds(5)) == result_status::idle) - ; + template + result await_task(result res) { + co_return co_await res.await_via(m_executor, true); + } + + public: + await_via_method(std::shared_ptr executor) noexcept : m_executor(std::move(executor)) {} - auto res = results[i].get(); - if (res != i) { - std::cerr << "submit + get, expected " << i << " and got " << res << std::endl; - std::abort(); + template + result operator()(result res) { + auto wrapper_res = await_task(std::move(res)); + wrapper_res.wait(); + return std::move(wrapper_res); } - } -} + }; -result result_await(std::shared_ptr tp) { - const size_t task_count = 8'000'000; + class resolve_method { - std::vector> results; - results.reserve(task_count); + private: + template + result> await_task(result res) { + co_return co_await res.resolve(); + } - for (size_t i = 0; i < task_count; i++) { - results.emplace_back(tp->submit([i] { - return int(i); - })); - } + public: + template + result operator()(result res) { + auto wrapper_res = await_task(std::move(res)); + return wrapper_res.get(); + } + }; + + class resolve_via_method { + + private: + std::shared_ptr m_executor; + + template + result> await_task(result res) { + co_return co_await res.resolve_via(m_executor, true); + } + + public: + resolve_via_method(std::shared_ptr executor) noexcept : m_executor(std::move(executor)) {} - for (size_t i = 0; i < task_count; i++) { - auto res = co_await results[i]; - if (res != i) { - std::cerr << "submit + await, expected " << i << " and got " << res << std::endl; - std::abort(); + template + result operator()(result res) { + auto wrapper_res = await_task(std::move(res)); + return wrapper_res.get(); } + }; + + template + void test_result_method(std::shared_ptr te, tested_method&& method) { + tests::test_result_method_val(te, method, converter {}); + tests::test_result_method_val(te, method, converter {}); + tests::test_result_method_val(te, method, converter {}); + tests::test_result_method_val(te, method, converter {}); + tests::test_result_method_val(te, method, converter {}); + + tests::test_result_method_ex(te, method, converter {}); + tests::test_result_method_ex(te, method, converter {}); + tests::test_result_method_ex(te, method, converter {}); + tests::test_result_method_ex(te, method, converter {}); + tests::test_result_method_ex(te, method, converter {}); } + +} // namespace concurrencpp::tests + +void test_result_get(std::shared_ptr te) { + std::cout << "Testing result::get()" << std::endl; + + tests::test_result_method(te, tests::get_method {}); + + std::cout << "================================" << std::endl; } -result result_await_via(std::shared_ptr tp) { - const size_t task_count = 8'000'000; +void test_result_wait(std::shared_ptr te) { + std::cout << "Testing result::wait()" << std::endl; - std::vector> results; - results.reserve(task_count); + tests::test_result_method(te, tests::wait_method {}); - for (size_t i = 0; i < task_count; i++) { - results.emplace_back(tp->submit([i] { - return int(i); - })); - } + std::cout << "================================" << std::endl; +} - for (size_t i = 0; i < task_count; i++) { - auto res = co_await results[i].await_via(tp); - if (res != i) { - std::cerr << "submit + await, expected " << i << " and got " << res << std::endl; - std::abort(); - } - } +void test_result_wait_for(std::shared_ptr te) { + std::cout << "Testing result::wait_for()" << std::endl; + + tests::test_result_method(te, tests::wait_for_method {}); + + std::cout << "================================" << std::endl; +} + +void test_result_await(std::shared_ptr te) { + std::cout << "Testing result::await()" << std::endl; + + tests::test_result_method(te, tests::await_method {}); + + std::cout << "================================" << std::endl; +} + +void test_result_await_via(std::shared_ptr te) { + std::cout << "Testing result::await_via()" << std::endl; + + tests::test_result_method(te, tests::await_via_method {te}); + + std::cout << "================================" << std::endl; +} + +void test_result_resolve(std::shared_ptr te) { + std::cout << "Testing result::resolve()" << std::endl; + + tests::test_result_method(te, tests::resolve_method {}); + + std::cout << "================================" << std::endl; } + +void test_result_resolve_via(std::shared_ptr te) { + std::cout << "Testing result::resolve_via()" << std::endl; + + tests::test_result_method(te, tests::resolve_via_method {te}); + + std::cout << "================================" << std::endl; +} \ No newline at end of file diff --git a/test/source/thread_sanitizer/shared_result.cpp b/test/source/thread_sanitizer/shared_result.cpp new file mode 100644 index 00000000..fa869a4d --- /dev/null +++ b/test/source/thread_sanitizer/shared_result.cpp @@ -0,0 +1,258 @@ +#include "concurrencpp/concurrencpp.h" + +#include + +void test_shared_result_get(std::shared_ptr te); +void test_shared_result_wait(std::shared_ptr te); +void test_shared_result_wait_for(std::shared_ptr te); +void test_shared_result_await(std::shared_ptr te); +void test_shared_result_await_via(std::shared_ptr te); +void test_shared_result_resolve(std::shared_ptr te); +void test_shared_result_resolve_via(std::shared_ptr te); + +int main() { + std::cout << "Starting concurrencpp::shared_result test" << std::endl; + + concurrencpp::runtime runtime; + const auto thread_executor = runtime.thread_executor(); + + test_shared_result_get(thread_executor); + test_shared_result_wait(thread_executor); + test_shared_result_wait_for(thread_executor); + test_shared_result_await(thread_executor); + test_shared_result_await_via(thread_executor); + test_shared_result_resolve(thread_executor); + test_shared_result_resolve_via(thread_executor); +} + +#include "tests/test_utils/make_result_array.h" + +using namespace concurrencpp; +using namespace std::chrono; + +template +std::vector> to_shared_results(std::vector> results) { + std::vector> shared_results; + shared_results.reserve(results.size()); + std::for_each(results.begin(), results.end(), [&shared_results](auto& result) { + shared_results.emplace_back(std::move(result)); + }); + + return shared_results; +} + +namespace concurrencpp::tests { + template + void test_shared_result_method_val(std::shared_ptr te, method_functor&& tested_method, converter_type&& converter) { + const auto tp = system_clock::now() + seconds(2); + auto results = make_result_array(1024, tp, te, converter); + auto shared_results = to_shared_results(std::move(results)); + + std::thread consumers[8]; + + for (auto& thread : consumers) { + thread = std::thread([=]() mutable { + std::this_thread::sleep_until(tp - milliseconds(1)); + test_shared_result_array(std::move(shared_results), tested_method, converter); + }); + } + + for (auto& thread : consumers) { + thread.join(); + } + } + + template + void test_shared_result_method_ex(std::shared_ptr te, method_functor&& tested_method, converter_type&& converter) { + const auto tp = system_clock::now() + seconds(1); + auto results = make_exceptional_array(1024, tp, te, converter); + auto shared_results = to_shared_results(std::move(results)); + + std::thread consumers[8]; + + for (auto& thread : consumers) { + thread = std::thread([=]() mutable { + std::this_thread::sleep_until(tp - milliseconds(1)); + test_shared_result_exceptional_array(std::move(shared_results), tested_method); + }); + } + + for (auto& thread : consumers) { + thread.join(); + } + } +} // namespace concurrencpp::tests + +namespace concurrencpp::tests { + struct get_method { + template + result operator()(shared_result res) { + co_return res.get(); + } + }; + + struct wait_method { + template + result operator()(shared_result res) { + res.wait(); + co_return res.get(); + } + }; + + struct wait_for_method { + template + result operator()(shared_result res) { + while (res.wait_for(milliseconds(5)) == result_status::idle) { + // do nothing. + } + + co_return res.get(); + } + }; + + class await_method { + + private: + template + result await_task(shared_result res) { + co_return co_await res; + } + + public: + template + result operator()(shared_result res) { + auto wrapper_res = await_task(std::move(res)); + wrapper_res.wait(); + return std::move(wrapper_res); + } + }; + + class await_via_method { + + private: + std::shared_ptr m_executor; + + template + result await_task(shared_result res) { + co_return co_await res.await_via(m_executor, true); + } + + public: + await_via_method(std::shared_ptr executor) noexcept : m_executor(std::move(executor)) {} + + template + result operator()(shared_result res) { + auto wrapper_res = await_task(std::move(res)); + wrapper_res.wait(); + return std::move(wrapper_res); + } + }; + + class resolve_method { + + private: + template + result await_task(shared_result res) { + co_return co_await co_await res.resolve(); + } + + public: + template + result operator()(shared_result res) { + auto wrapper_res = await_task(std::move(res)); + co_return wrapper_res.get(); + } + }; + + class resolve_via_method { + + private: + std::shared_ptr m_executor; + + template + result await_task(shared_result res) { + co_return co_await co_await res.resolve_via(m_executor, true); + } + + public: + resolve_via_method(std::shared_ptr executor) noexcept : m_executor(std::move(executor)) {} + + template + result operator()(shared_result res) { + auto wrapper_res = await_task(std::move(res)); + co_return wrapper_res.get(); + } + }; + + template + void test_shared_result_method(std::shared_ptr te, tested_method&& method) { + tests::test_shared_result_method_val(te, method, converter {}); + tests::test_shared_result_method_val(te, method, converter {}); + tests::test_shared_result_method_val(te, method, converter {}); + tests::test_shared_result_method_val(te, method, converter {}); + tests::test_shared_result_method_val(te, method, converter {}); + + tests::test_shared_result_method_ex(te, method, converter {}); + tests::test_shared_result_method_ex(te, method, converter {}); + tests::test_shared_result_method_ex(te, method, converter {}); + tests::test_shared_result_method_ex(te, method, converter {}); + tests::test_shared_result_method_ex(te, method, converter {}); + } + +} // namespace concurrencpp::tests + +void test_shared_result_get(std::shared_ptr te) { + std::cout << "Testing shared_result::get()" << std::endl; + + tests::test_shared_result_method(te, tests::get_method {}); + + std::cout << "================================" << std::endl; +} + +void test_shared_result_wait(std::shared_ptr te) { + std::cout << "Testing shared_result::wait()" << std::endl; + + tests::test_shared_result_method(te, tests::wait_method {}); + + std::cout << "================================" << std::endl; +} + +void test_shared_result_wait_for(std::shared_ptr te) { + std::cout << "Testing shared_result::wait_for()" << std::endl; + + tests::test_shared_result_method(te, tests::wait_for_method {}); + + std::cout << "================================" << std::endl; +} + +void test_shared_result_await(std::shared_ptr te) { + std::cout << "Testing shared_result::await()" << std::endl; + + tests::test_shared_result_method(te, tests::await_method {}); + + std::cout << "================================" << std::endl; +} + +void test_shared_result_await_via(std::shared_ptr te) { + std::cout << "Testing shared_result::await_via()" << std::endl; + + tests::test_shared_result_method(te, tests::await_via_method {te}); + + std::cout << "================================" << std::endl; +} + +void test_shared_result_resolve(std::shared_ptr te) { + std::cout << "Testing shared_result::resolve()" << std::endl; + + tests::test_shared_result_method(te, tests::resolve_method {}); + + std::cout << "================================" << std::endl; +} + +void test_shared_result_resolve_via(std::shared_ptr te) { + std::cout << "Testing shared_result::resolve_via()" << std::endl; + + tests::test_shared_result_method(te, tests::resolve_via_method {te}); + + std::cout << "================================" << std::endl; +} \ No newline at end of file diff --git a/test/source/thread_sanitizer/when_all.cpp b/test/source/thread_sanitizer/when_all.cpp index 42cc24bb..6dc5aca8 100644 --- a/test/source/thread_sanitizer/when_all.cpp +++ b/test/source/thread_sanitizer/when_all.cpp @@ -2,64 +2,332 @@ #include -concurrencpp::result when_all_test(std::shared_ptr tpe); +concurrencpp::result test_when_all_tuple(std::shared_ptr te); +void test_when_all_vector(std::shared_ptr te); int main() { concurrencpp::runtime runtime; - when_all_test(runtime.thread_executor()).get(); + test_when_all_tuple(runtime.thread_executor()).get(); + test_when_all_vector(runtime.thread_executor()); return 0; } +#include "tests/test_utils/make_result_array.h" + using namespace concurrencpp; +using namespace std::chrono; -std::vector> run_loop_once(std::shared_ptr tpe) { - std::vector> results; - results.reserve(1'024); +result test_when_all_tuple(std::shared_ptr te) { + std::cout << "Testing when_all(result_type&& ... )" << std::endl; - for (size_t i = 0; i < 1'024; i++) { - results.emplace_back(tpe->submit([] { - std::this_thread::yield(); - return 0; - })); - } + const auto tp = system_clock::now() + milliseconds(250); + + // int + auto int_val_res_0 = te->submit([tp] { + std::this_thread::sleep_until(tp); + return 0; + }); + + auto int_val_res_1 = te->submit([tp] { + std::this_thread::sleep_until(tp); + return 1; + }); + + auto int_val_res_2 = te->submit([tp] { + std::this_thread::sleep_until(tp); + return 2; + }); + + auto int_ex_res_0 = te->submit([tp] { + std::this_thread::sleep_until(tp); + throw tests::costume_exception(0); + return int(); + }); + + auto int_ex_res_1 = te->submit([tp] { + std::this_thread::sleep_until(tp); + throw tests::costume_exception(1); + return int(); + }); + + auto int_ex_res_2 = te->submit([tp] { + std::this_thread::sleep_until(tp); + throw tests::costume_exception(2); + return int(); + }); + + // std::string + auto str_val_res_0 = te->submit([tp]() -> std::string { + std::this_thread::sleep_until(tp); + return "0"; + }); + + auto str_val_res_1 = te->submit([tp]() -> std::string { + std::this_thread::sleep_until(tp); + return "1"; + }); + + auto str_val_res_2 = te->submit([tp]() -> std::string { + std::this_thread::sleep_until(tp); + return "2"; + }); + + auto str_ex_res_0 = te->submit([tp]() -> std::string { + std::this_thread::sleep_until(tp); + throw tests::costume_exception(0); + return std::string(); + }); + + auto str_ex_res_1 = te->submit([tp]() -> std::string { + std::this_thread::sleep_until(tp); + throw tests::costume_exception(1); + return std::string(); + }); + + auto str_ex_res_2 = te->submit([tp]() -> std::string { + std::this_thread::sleep_until(tp); + throw tests::costume_exception(2); + return std::string(); + }); + + // void + auto void_val_res_0 = te->submit([tp] { + std::this_thread::sleep_until(tp); + }); + + auto void_val_res_1 = te->submit([tp] { + std::this_thread::sleep_until(tp); + }); + + auto void_val_res_2 = te->submit([tp] { + std::this_thread::sleep_until(tp); + }); + + auto void_ex_res_0 = te->submit([tp] { + std::this_thread::sleep_until(tp); + throw tests::costume_exception(0); + }); + + auto void_ex_res_1 = te->submit([tp] { + std::this_thread::sleep_until(tp); + throw tests::costume_exception(1); + }); + + auto void_ex_res_2 = te->submit([tp] { + std::this_thread::sleep_until(tp); + throw tests::costume_exception(2); + }); + + // int& + int integer0 = 1234567; + int integer1 = 7654321; + int integer2 = 10203040; + + auto int_ref_val_res_0 = te->submit([tp, &integer0]() -> int& { + std::this_thread::sleep_until(tp); + return integer0; + }); + + auto int_ref_val_res_1 = te->submit([tp, &integer1]() -> int& { + std::this_thread::sleep_until(tp); + return integer1; + }); + + auto int_ref_val_res_2 = te->submit([tp, &integer2]() -> int& { + std::this_thread::sleep_until(tp); + return integer2; + }); + + auto int_ref_ex_res_0 = te->submit([tp, &integer0]() -> int& { + std::this_thread::sleep_until(tp); + throw tests::costume_exception(0); + return integer0; + }); + + auto int_ref_ex_res_1 = te->submit([tp, &integer1]() -> int& { + std::this_thread::sleep_until(tp); + throw tests::costume_exception(1); + return integer1; + }); + + auto int_ref_ex_res_2 = te->submit([tp, &integer2]() -> int& { + std::this_thread::sleep_until(tp); + throw tests::costume_exception(2); + return integer2; + }); + + // std::string& + std::string string0 = "0"; + std::string string1 = "1"; + std::string string2 = "0"; + + auto str_ref_val_res_0 = te->submit([tp, &string0]() -> std::string& { + std::this_thread::sleep_until(tp); + return string0; + }); - return results; + auto str_ref_val_res_1 = te->submit([tp, &string1]() -> std::string& { + std::this_thread::sleep_until(tp); + return string1; + }); + + auto str_ref_val_res_2 = te->submit([tp, &string2]() -> std::string& { + std::this_thread::sleep_until(tp); + return string2; + }); + + auto str_ref_ex_res_0 = te->submit([tp, &string0]() -> std::string& { + std::this_thread::sleep_until(tp); + throw tests::costume_exception(0); + return string0; + }); + + auto str_ref_ex_res_1 = te->submit([tp, &string1]() -> std::string& { + std::this_thread::sleep_until(tp); + throw tests::costume_exception(1); + return string1; + }); + + auto str_ref_ex_res_2 = te->submit([tp, &string2]() -> std::string& { + std::this_thread::sleep_until(tp); + throw tests::costume_exception(2); + return string2; + }); + + std::this_thread::sleep_until(tp); + + auto when_all_result = co_await when_all(std::move(int_val_res_0), + std::move(int_val_res_1), + std::move(int_val_res_2), + + std::move(int_ex_res_0), + std::move(int_ex_res_1), + std::move(int_ex_res_2), + + std::move(str_val_res_0), + std::move(str_val_res_1), + std::move(str_val_res_2), + + std::move(str_ex_res_0), + std::move(str_ex_res_1), + std::move(str_ex_res_2), + + std::move(void_val_res_0), + std::move(void_val_res_1), + std::move(void_val_res_2), + + std::move(void_ex_res_0), + std::move(void_ex_res_1), + std::move(void_ex_res_2), + + std::move(int_ref_val_res_0), + std::move(int_ref_val_res_1), + std::move(int_ref_val_res_2), + + std::move(int_ref_ex_res_0), + std::move(int_ref_ex_res_1), + std::move(int_ref_ex_res_2), + + std::move(str_ref_val_res_0), + std::move(str_ref_val_res_1), + std::move(str_ref_val_res_2), + + std::move(str_ref_ex_res_0), + std::move(str_ref_ex_res_1), + std::move(str_ref_ex_res_2)); + + // int + tests::test_ready_result(std::move(std::get<0>(when_all_result)), 0); + tests::test_ready_result(std::move(std::get<1>(when_all_result)), 1); + tests::test_ready_result(std::move(std::get<2>(when_all_result)), 2); + tests::test_ready_result_costume_exception(std::move(std::get<3>(when_all_result)), 0); + tests::test_ready_result_costume_exception(std::move(std::get<4>(when_all_result)), 1); + tests::test_ready_result_costume_exception(std::move(std::get<5>(when_all_result)), 2); + + // std::string + tests::test_ready_result(std::move(std::get<6>(when_all_result)), std::string("0")); + tests::test_ready_result(std::move(std::get<7>(when_all_result)), std::string("1")); + tests::test_ready_result(std::move(std::get<8>(when_all_result)), std::string("2")); + tests::test_ready_result_costume_exception(std::move(std::get<9>(when_all_result)), 0); + tests::test_ready_result_costume_exception(std::move(std::get<10>(when_all_result)), 1); + tests::test_ready_result_costume_exception(std::move(std::get<11>(when_all_result)), 2); + + // void + tests::test_ready_result(std::move(std::get<12>(when_all_result))); + tests::test_ready_result(std::move(std::get<13>(when_all_result))); + tests::test_ready_result(std::move(std::get<14>(when_all_result))); + tests::test_ready_result_costume_exception(std::move(std::get<15>(when_all_result)), 0); + tests::test_ready_result_costume_exception(std::move(std::get<16>(when_all_result)), 1); + tests::test_ready_result_costume_exception(std::move(std::get<17>(when_all_result)), 2); + + // size_t& + tests::test_ready_result(std::move(std::get<18>(when_all_result)), std::ref(integer0)); + tests::test_ready_result(std::move(std::get<19>(when_all_result)), std::ref(integer1)); + tests::test_ready_result(std::move(std::get<20>(when_all_result)), std::ref(integer2)); + tests::test_ready_result_costume_exception(std::move(std::get<21>(when_all_result)), 0); + tests::test_ready_result_costume_exception(std::move(std::get<22>(when_all_result)), 1); + tests::test_ready_result_costume_exception(std::move(std::get<23>(when_all_result)), 2); + + // std::string& + tests::test_ready_result(std::move(std::get<24>(when_all_result)), std::ref(string0)); + tests::test_ready_result(std::move(std::get<25>(when_all_result)), std::ref(string1)); + tests::test_ready_result(std::move(std::get<26>(when_all_result)), std::ref(string2)); + tests::test_ready_result_costume_exception(std::move(std::get<27>(when_all_result)), 0); + tests::test_ready_result_costume_exception(std::move(std::get<28>(when_all_result)), 1); + tests::test_ready_result_costume_exception(std::move(std::get<29>(when_all_result)), 2); + + std::cout << "================================" << std::endl; } -void assert_loop(result>> result_list) { - if (result_list.status() != result_status::value) { - std::abort(); +struct get_method { + template + result operator()(result res) { + co_return res.get(); } +}; - auto results = result_list.get(); +template +result test_when_all_vector_val_impl(std::shared_ptr te) { + const auto tp = system_clock::now() + seconds(2); - for (auto& result : results) { - if (result.get() != 0) { - std::abort(); - } - } + tests::converter converter; + auto results = tests::make_result_array(1024, tp, te, converter); + + std::this_thread::sleep_until(tp); + + auto when_all_done = co_await when_all(results.begin(), results.end()); + + tests::test_result_array(std::move(when_all_done), get_method {}, converter); } -result when_all_test(std::shared_ptr tpe) { - auto loop_0 = run_loop_once(tpe); - auto loop_1 = run_loop_once(tpe); - auto loop_2 = run_loop_once(tpe); - auto loop_3 = run_loop_once(tpe); - auto loop_4 = run_loop_once(tpe); +template +result test_when_all_vector_ex_impl(std::shared_ptr te) { + const auto tp = system_clock::now() + seconds(2); - auto loop_0_all = when_all(loop_0.begin(), loop_0.end()); - auto loop_1_all = when_all(loop_1.begin(), loop_1.end()); - auto loop_2_all = when_all(loop_2.begin(), loop_2.end()); - auto loop_3_all = when_all(loop_3.begin(), loop_3.end()); - auto loop_4_all = when_all(loop_4.begin(), loop_4.end()); + tests::converter converter; + auto results = tests::make_exceptional_array(1024, tp, te, converter); - auto all = when_all(std::move(loop_0_all), std::move(loop_1_all), std::move(loop_2_all), std::move(loop_3_all), std::move(loop_4_all)); + std::this_thread::sleep_until(tp); - auto all_done = co_await all; + auto when_all_done = co_await when_all(results.begin(), results.end()); - assert_loop(std::move(std::get<0>(all_done))); - assert_loop(std::move(std::get<1>(all_done))); - assert_loop(std::move(std::get<2>(all_done))); - assert_loop(std::move(std::get<3>(all_done))); - assert_loop(std::move(std::get<4>(all_done))); + tests::test_exceptional_array(std::move(when_all_done), get_method {}); } + +void test_when_all_vector(std::shared_ptr te) { + std::cout << "Testing when_all(begin, end)" << std::endl; + + test_when_all_vector_val_impl(te).get(); + test_when_all_vector_val_impl(te).get(); + test_when_all_vector_val_impl(te).get(); + test_when_all_vector_val_impl(te).get(); + test_when_all_vector_val_impl(te).get(); + + test_when_all_vector_ex_impl(te).get(); + test_when_all_vector_ex_impl(te).get(); + test_when_all_vector_ex_impl(te).get(); + test_when_all_vector_ex_impl(te).get(); + test_when_all_vector_ex_impl(te).get(); + + std::cout << "================================" << std::endl; +} \ No newline at end of file diff --git a/test/source/thread_sanitizer/when_any.cpp b/test/source/thread_sanitizer/when_any.cpp index 62eb0c01..d5f87afd 100644 --- a/test/source/thread_sanitizer/when_any.cpp +++ b/test/source/thread_sanitizer/when_any.cpp @@ -3,116 +3,452 @@ #include #include -concurrencpp::result when_any_vector_test(std::shared_ptr te); -concurrencpp::result when_any_tuple_test(std::shared_ptr te); +concurrencpp::result test_when_any_tuple(std::shared_ptr te); +void test_when_any_vector(std::shared_ptr te); int main() { concurrencpp::runtime runtime; - when_any_vector_test(runtime.thread_executor()).get(); - std::cout << "=========================================" << std::endl; - when_any_tuple_test(runtime.thread_executor()).get(); - std::cout << "=========================================" << std::endl; + test_when_any_tuple(runtime.thread_executor()).get(); + test_when_any_vector(runtime.thread_executor()); return 0; } +#include "tests/test_utils/make_result_array.h" + using namespace concurrencpp; +using namespace std::chrono; -struct random_ctx { - std::random_device rd; - std::mt19937 mt; - std::uniform_int_distribution dist; +const int integer0 = 1234567; +const int integer1 = 7654321; +const int integer2 = 10203040; - random_ctx() : mt(rd()), dist(1, 5 * 1000) {} +const std::string string0 = "0"; +const std::string string1 = "1"; +const std::string string2 = "0"; - size_t operator()() noexcept { - return dist(mt); - } -}; - -std::vector> run_loop_once(std::shared_ptr te, random_ctx& r, size_t count) { - std::vector> results; - results.reserve(count); - - for (size_t i = 0; i < count; i++) { - const auto sleeping_time = r(); - results.emplace_back(te->submit([sleeping_time] { - std::this_thread::sleep_for(std::chrono::milliseconds(sleeping_time)); - return 0; - })); - } +result test_when_any_tuple(std::shared_ptr te) { + std::cout << "Testing when_any(result_type&& ... )" << std::endl; - return results; -} + const auto tp = system_clock::now() + milliseconds(250); + + // int + auto int_val_res_0 = te->submit([tp] { + std::this_thread::sleep_until(tp); + return 0; + }); + + auto int_val_res_1 = te->submit([tp] { + std::this_thread::sleep_until(tp); + return 1; + }); + + auto int_val_res_2 = te->submit([tp] { + std::this_thread::sleep_until(tp); + return 2; + }); + + auto int_ex_res_0 = te->submit([tp] { + std::this_thread::sleep_until(tp); + throw tests::costume_exception(0); + return int(); + }); + + auto int_ex_res_1 = te->submit([tp] { + std::this_thread::sleep_until(tp); + throw tests::costume_exception(1); + return int(); + }); + + auto int_ex_res_2 = te->submit([tp] { + std::this_thread::sleep_until(tp); + throw tests::costume_exception(2); + return int(); + }); + + // std::string + auto str_val_res_0 = te->submit([tp]() -> std::string { + std::this_thread::sleep_until(tp); + return "0"; + }); + + auto str_val_res_1 = te->submit([tp]() -> std::string { + std::this_thread::sleep_until(tp); + return "1"; + }); + + auto str_val_res_2 = te->submit([tp]() -> std::string { + std::this_thread::sleep_until(tp); + return "2"; + }); + + auto str_ex_res_0 = te->submit([tp]() -> std::string { + std::this_thread::sleep_until(tp); + throw tests::costume_exception(0); + return std::string(); + }); + + auto str_ex_res_1 = te->submit([tp]() -> std::string { + std::this_thread::sleep_until(tp); + throw tests::costume_exception(1); + return std::string(); + }); + + auto str_ex_res_2 = te->submit([tp]() -> std::string { + std::this_thread::sleep_until(tp); + throw tests::costume_exception(2); + return std::string(); + }); + + // void + auto void_val_res_0 = te->submit([tp] { + std::this_thread::sleep_until(tp); + }); + + auto void_val_res_1 = te->submit([tp] { + std::this_thread::sleep_until(tp); + }); + + auto void_val_res_2 = te->submit([tp] { + std::this_thread::sleep_until(tp); + }); + + auto void_ex_res_0 = te->submit([tp] { + std::this_thread::sleep_until(tp); + throw tests::costume_exception(0); + }); + + auto void_ex_res_1 = te->submit([tp] { + std::this_thread::sleep_until(tp); + throw tests::costume_exception(1); + }); + + auto void_ex_res_2 = te->submit([tp] { + std::this_thread::sleep_until(tp); + throw tests::costume_exception(2); + }); + + // int& + auto int_ref_val_res_0 = te->submit([tp]() -> const int& { + std::this_thread::sleep_until(tp); + return integer0; + }); + + auto int_ref_val_res_1 = te->submit([tp]() -> const int& { + std::this_thread::sleep_until(tp); + return integer1; + }); + + auto int_ref_val_res_2 = te->submit([tp]() -> const int& { + std::this_thread::sleep_until(tp); + return integer2; + }); + + auto int_ref_ex_res_0 = te->submit([tp]() -> const int& { + std::this_thread::sleep_until(tp); + throw tests::costume_exception(0); + return integer0; + }); + + auto int_ref_ex_res_1 = te->submit([tp]() -> const int& { + std::this_thread::sleep_until(tp); + throw tests::costume_exception(1); + return integer1; + }); -result when_any_vector_test(std::shared_ptr te) { - random_ctx r; + auto int_ref_ex_res_2 = te->submit([tp]() -> const int& { + std::this_thread::sleep_until(tp); + throw tests::costume_exception(2); + return integer2; + }); - auto loop = run_loop_once(te, r, 1'024); + // std::string& + auto str_ref_val_res_0 = te->submit([tp]() -> const std::string& { + std::this_thread::sleep_until(tp); + return string0; + }); - while (!loop.empty()) { - auto any = co_await when_any(loop.begin(), loop.end()); - auto& done_result = any.results[any.index]; + auto str_ref_val_res_1 = te->submit([tp]() -> const std::string& { + std::this_thread::sleep_until(tp); + return string1; + }); - if (done_result.status() != result_status::value) { - std::abort(); + auto str_ref_val_res_2 = te->submit([tp]() -> const std::string& { + std::this_thread::sleep_until(tp); + return string2; + }); + + auto str_ref_ex_res_0 = te->submit([tp]() -> const std::string& { + std::this_thread::sleep_until(tp); + throw tests::costume_exception(0); + return string0; + }); + + auto str_ref_ex_res_1 = te->submit([tp]() -> const std::string& { + std::this_thread::sleep_until(tp); + throw tests::costume_exception(1); + return string1; + }); + + auto str_ref_ex_res_2 = te->submit([tp]() -> const std::string& { + std::this_thread::sleep_until(tp); + throw tests::costume_exception(2); + return string2; + }); + + std::this_thread::sleep_until(tp); + + auto when_any_result = co_await when_any(std::move(int_val_res_0), + std::move(int_val_res_1), + std::move(int_val_res_2), + + std::move(int_ex_res_0), + std::move(int_ex_res_1), + std::move(int_ex_res_2), + + std::move(str_val_res_0), + std::move(str_val_res_1), + std::move(str_val_res_2), + + std::move(str_ex_res_0), + std::move(str_ex_res_1), + std::move(str_ex_res_2), + + std::move(void_val_res_0), + std::move(void_val_res_1), + std::move(void_val_res_2), + + std::move(void_ex_res_0), + std::move(void_ex_res_1), + std::move(void_ex_res_2), + + std::move(int_ref_val_res_0), + std::move(int_ref_val_res_1), + std::move(int_ref_val_res_2), + + std::move(int_ref_ex_res_0), + std::move(int_ref_ex_res_1), + std::move(int_ref_ex_res_2), + + std::move(str_ref_val_res_0), + std::move(str_ref_val_res_1), + std::move(str_ref_val_res_2), + + std::move(str_ref_ex_res_0), + std::move(str_ref_ex_res_1), + std::move(str_ref_ex_res_2)); + + switch (when_any_result.index) { + // int - val + case 0: { + tests::test_ready_result(std::move(std::get<0>(when_any_result.results)), 0); + break; } - if (done_result.get() != 0) { - std::abort(); + case 1: { + tests::test_ready_result(std::move(std::get<1>(when_any_result.results)), 1); + break; } - any.results.erase(any.results.begin() + any.index); - loop = std::move(any.results); - } -} + case 2: { + tests::test_ready_result(std::move(std::get<2>(when_any_result.results)), 2); + break; + } -template -std::vector> to_vector(std::tuple& results) { - return std::apply( - [](auto&&... elems) { - std::vector> result; - result.reserve(sizeof...(elems)); - (result.emplace_back(std::move(elems)), ...); - return result; - }, - std::forward(results)); -} + // int - ex + case 3: { + tests::test_ready_result_costume_exception(std::move(std::get<3>(when_any_result.results)), 0); + break; + } + + case 4: { + tests::test_ready_result_costume_exception(std::move(std::get<4>(when_any_result.results)), 1); + break; + } + + case 5: { + tests::test_ready_result_costume_exception(std::move(std::get<5>(when_any_result.results)), 2); + break; + } + + // str - val + case 6: { + tests::test_ready_result(std::move(std::get<6>(when_any_result.results)), std::string("0")); + break; + } + + case 7: { + tests::test_ready_result(std::move(std::get<7>(when_any_result.results)), std::string("1")); + break; + } + + case 8: { + tests::test_ready_result(std::move(std::get<8>(when_any_result.results)), std::string("2")); + break; + } + + // str - ex + case 9: { + tests::test_ready_result_costume_exception(std::move(std::get<9>(when_any_result.results)), 0); + break; + } + + case 10: { + tests::test_ready_result_costume_exception(std::move(std::get<10>(when_any_result.results)), 1); + break; + } + + case 11: { + tests::test_ready_result_costume_exception(std::move(std::get<11>(when_any_result.results)), 2); + break; + } + + // void - val + case 12: { + tests::test_ready_result(std::move(std::get<12>(when_any_result.results))); + break; + } + + case 13: { + tests::test_ready_result(std::move(std::get<13>(when_any_result.results))); + break; + } + + case 14: { + tests::test_ready_result(std::move(std::get<14>(when_any_result.results))); + break; + } + + // void - ex + case 15: { + tests::test_ready_result_costume_exception(std::move(std::get<15>(when_any_result.results)), 0); + break; + } + + case 16: { + tests::test_ready_result_costume_exception(std::move(std::get<16>(when_any_result.results)), 1); + break; + } + + case 17: { + tests::test_ready_result_costume_exception(std::move(std::get<17>(when_any_result.results)), 2); + break; + } + + // int& - val + case 18: { + tests::test_ready_result(std::move(std::get<18>(when_any_result.results)), std::ref(integer0)); + break; + } + + case 19: { + tests::test_ready_result(std::move(std::get<19>(when_any_result.results)), std::ref(integer1)); + break; + } + + case 20: { + tests::test_ready_result(std::move(std::get<20>(when_any_result.results)), std::ref(integer2)); + break; + } -concurrencpp::result when_any_tuple_test(std::shared_ptr te) { - random_ctx r; - auto loop = run_loop_once(te, r, 10); + // int& - ex + case 21: { + tests::test_ready_result_costume_exception(std::move(std::get<21>(when_any_result.results)), 0); + break; + } - for (size_t i = 0; i < 256; i++) { - auto any = co_await when_any(std::move(loop[0]), - std::move(loop[1]), - std::move(loop[2]), - std::move(loop[3]), - std::move(loop[4]), - std::move(loop[5]), - std::move(loop[6]), - std::move(loop[7]), - std::move(loop[8]), - std::move(loop[9])); + case 22: { + tests::test_ready_result_costume_exception(std::move(std::get<22>(when_any_result.results)), 1); + break; + } - loop = to_vector(any.results); + case 23: { + tests::test_ready_result_costume_exception(std::move(std::get<23>(when_any_result.results)), 2); + break; + } - const auto done_index = any.index; - auto& done_result = loop[done_index]; + // std::string& - val + case 24: { + tests::test_ready_result(std::move(std::get<24>(when_any_result.results)), std::ref(string0)); + break; + } - if (done_result.status() != result_status::value) { - std::abort(); + case 25: { + tests::test_ready_result(std::move(std::get<25>(when_any_result.results)), std::ref(string1)); + break; } - if (done_result.get() != 0) { - std::abort(); + case 26: { + tests::test_ready_result(std::move(std::get<26>(when_any_result.results)), std::ref(string2)); + break; } - loop.erase(loop.begin() + done_index); + // std::string& - ex + case 27: { + tests::test_ready_result_costume_exception(std::move(std::get<27>(when_any_result.results)), 0); + break; + } + + case 28: { + tests::test_ready_result_costume_exception(std::move(std::get<28>(when_any_result.results)), 1); + break; + } - const auto sleeping_time = r(); - loop.emplace_back(te->submit([sleeping_time] { - std::this_thread::sleep_for(std::chrono::milliseconds(sleeping_time)); - return 0; - })); + case 29: { + tests::test_ready_result_costume_exception(std::move(std::get<29>(when_any_result.results)), 2); + break; + } } + + std::cout << "================================" << std::endl; } + +template +result test_when_any_vector_val_impl(std::shared_ptr te) { + const auto tp = system_clock::now() + seconds(2); + + tests::converter converter; + auto results = tests::make_result_array(1024, tp, te, converter); + + std::this_thread::sleep_until(tp); + + auto when_any_done = co_await when_any(results.begin(), results.end()); + + if constexpr (!std::is_same_v) { + tests::test_ready_result(std::move(when_any_done.results[when_any_done.index]), converter(when_any_done.index)); + + } else { + tests::test_ready_result(std::move(when_any_done.results[when_any_done.index])); + } +} + +template +result test_when_any_vector_ex_impl(std::shared_ptr te) { + const auto tp = system_clock::now() + seconds(2); + + tests::converter converter; + auto results = tests::make_exceptional_array(1024, tp, te, converter); + + std::this_thread::sleep_until(tp); + + auto when_any_done = co_await when_any(results.begin(), results.end()); + + tests::test_ready_result_costume_exception(std::move(when_any_done.results[when_any_done.index]), when_any_done.index); +} + +void test_when_any_vector(std::shared_ptr te) { + std::cout << "Testing when_any(begin, end)" << std::endl; + + test_when_any_vector_val_impl(te).get(); + test_when_any_vector_val_impl(te).get(); + test_when_any_vector_val_impl(te).get(); + test_when_any_vector_val_impl(te).get(); + test_when_any_vector_val_impl(te).get(); + + test_when_any_vector_ex_impl(te).get(); + test_when_any_vector_ex_impl(te).get(); + test_when_any_vector_ex_impl(te).get(); + test_when_any_vector_ex_impl(te).get(); + test_when_any_vector_ex_impl(te).get(); + + std::cout << "================================" << std::endl; +} \ No newline at end of file