diff --git a/core/CMakeLists.txt b/core/CMakeLists.txt index 1d5e482a..99dc8ea8 100644 --- a/core/CMakeLists.txt +++ b/core/CMakeLists.txt @@ -52,6 +52,8 @@ vecmem_add_library( vecmem_core core "src/memory/details/memory_resource_base.cpp" "include/vecmem/memory/atomic.hpp" "include/vecmem/memory/impl/atomic.ipp" + "include/vecmem/memory/device_atomic_ref.hpp" + "include/vecmem/memory/impl/device_atomic_ref.ipp" "include/vecmem/memory/polymorphic_allocator.hpp" "include/vecmem/memory/memory_resource.hpp" "src/memory/alignment.hpp" diff --git a/core/include/vecmem/containers/device_vector.hpp b/core/include/vecmem/containers/device_vector.hpp index 75fcf85d..33789e48 100644 --- a/core/include/vecmem/containers/device_vector.hpp +++ b/core/include/vecmem/containers/device_vector.hpp @@ -1,6 +1,6 @@ /** VecMem project, part of the ACTS project (R&D line) * - * (c) 2021 CERN for the benefit of the ACTS project + * (c) 2021-2022 CERN for the benefit of the ACTS project * * Mozilla Public License Version 2.0 */ @@ -9,7 +9,7 @@ // Local include(s). #include "vecmem/containers/data/vector_view.hpp" #include "vecmem/containers/details/reverse_iterator.hpp" -#include "vecmem/memory/atomic.hpp" +#include "vecmem/memory/device_atomic_ref.hpp" #include "vecmem/utils/type_traits.hpp" #include "vecmem/utils/types.hpp" @@ -145,7 +145,7 @@ class device_vector { // Create copies of all of the elements one-by-one. It's very // inefficient, but we can't make any assumptions about the type of the // input iterator received by this function. - atomic asize(m_size); + device_atomic_ref asize(*m_size); for (InputIt itr = other_begin; itr != other_end; ++itr) { construct(asize.fetch_add(1), *itr); } diff --git a/core/include/vecmem/containers/impl/device_vector.ipp b/core/include/vecmem/containers/impl/device_vector.ipp index 85ec01a9..3c036647 100644 --- a/core/include/vecmem/containers/impl/device_vector.ipp +++ b/core/include/vecmem/containers/impl/device_vector.ipp @@ -1,6 +1,6 @@ /** VecMem project, part of the ACTS project (R&D line) * - * (c) 2021 CERN for the benefit of the ACTS project + * (c) 2021-2022 CERN for the benefit of the ACTS project * * Mozilla Public License Version 2.0 */ @@ -175,7 +175,7 @@ VECMEM_HOST_AND_DEVICE void device_vector::assign(size_type count, // Remove all previous elements. clear(); // Set the assigned size of the vector. - atomic asize(m_size); + device_atomic_ref asize(*m_size); asize.store(count); // Create the required number of identical elements. @@ -194,7 +194,7 @@ VECMEM_HOST_AND_DEVICE auto device_vector::emplace_back(Args&&... args) // Increment the size of the vector at first. So that we would "claim" the // index from other threads. - atomic asize(m_size); + device_atomic_ref asize(*m_size); const size_type index = asize.fetch_add(1); assert(index < m_capacity); @@ -214,7 +214,7 @@ VECMEM_HOST_AND_DEVICE auto device_vector::push_back( // Increment the size of the vector at first. So that we would "claim" the // index from other threads. - atomic asize(m_size); + device_atomic_ref asize(*m_size); const size_type index = asize.fetch_add(1); assert(index < m_capacity); @@ -232,7 +232,7 @@ VECMEM_HOST_AND_DEVICE auto device_vector::pop_back() -> size_type { assert(m_size != nullptr); // Decrement the size of the vector, and remember this new size. - atomic asize(m_size); + device_atomic_ref asize(*m_size); const size_type new_size = asize.fetch_sub(1) - 1; // Remove the last element. @@ -249,7 +249,7 @@ VECMEM_HOST_AND_DEVICE void device_vector::clear() { assert(m_size != nullptr); // Destruct all of the elements that the vector has "at the moment". - atomic asize(m_size); + device_atomic_ref asize(*m_size); const size_type current_size = asize.load(); for (size_type i = 0; i < current_size; ++i) { destruct(i); @@ -273,7 +273,7 @@ VECMEM_HOST_AND_DEVICE void device_vector::resize(size_type new_size, assert(m_size != nullptr); // Get the current size of the vector. - atomic asize(m_size); + device_atomic_ref asize(*m_size); const size_type current_size = asize.load(); // Check if anything needs to be done. @@ -395,7 +395,7 @@ VECMEM_HOST_AND_DEVICE auto device_vector::size() const -> size_type { // SYCL we must pass a non-const pointer to the sycl::atomic object // that performs the load operation. And for that we need a non-const // pointer... - atomic asize(const_cast(m_size)); + device_atomic_ref asize(*(const_cast(m_size))); return asize.load(); } } diff --git a/core/include/vecmem/memory/device_atomic_ref.hpp b/core/include/vecmem/memory/device_atomic_ref.hpp new file mode 100644 index 00000000..cca9ca3d --- /dev/null +++ b/core/include/vecmem/memory/device_atomic_ref.hpp @@ -0,0 +1,187 @@ +/* + * VecMem project, part of the ACTS project (R&D line) + * + * (c) 2022 CERN for the benefit of the ACTS project + * + * Mozilla Public License Version 2.0 + */ +#pragma once + +// System include(s). +#include + +#if (defined(CL_SYCL_LANGUAGE_VERSION) || defined(SYCL_LANGUAGE_VERSION)) && \ + defined(VECMEM_HAVE_SYCL_ATOMIC_REF) + +// SYCL include(s). +#include + +namespace vecmem { + +/// Define @c vecmem::memory_order as @c sycl::memory_order +using memory_order = ::sycl::memory_order; + +/// @c vecmem::atomic_ref equals @c sycl::atomic_ref with "modern SYCL" +template +using device_atomic_ref = + ::sycl::atomic_ref; + +} // namespace vecmem + +#elif ((!defined(__CUDA_ARCH__)) && (!defined(__HIP_DEVICE_COMPILE__)) && \ + (!defined(CL_SYCL_LANGUAGE_VERSION)) && \ + (!defined(SYCL_LANGUAGE_VERSION)) && __cpp_lib_atomic_ref) + +namespace vecmem { + +/// Define @c vecmem::memory_order as @c std::memory_order +using memory_order = std::memory_order; + +/// @c vecmem::atomic_ref equals @c std::atomic_ref in host code with C++20 +template +using device_atomic_ref = std::atomic_ref; + +} // namespace vecmem + +#else + +// VecMem include(s). +#include "vecmem/utils/types.hpp" + +// System include(s). +#include + +namespace vecmem { + +/// Custom (dummy) definition for the memory order +enum class memory_order { + relaxed = 0, + consume = 1, + acquire = 2, + release = 3, + acq_rel = 4, + seq_cst = 5 +}; + +/// Class providing atomic operations for the VecMem code +/// +/// It is only meant to be used with primitive types. Ones that CUDA, HIP and +/// SYCL built-in functions exist for. So no structs, or even pointers. +/// +/// Note that it does not perform atomic operations in host code! That is only +/// implemented with @c std::atomic_ref in C++20. With earlier C++ standards all +/// operations in host code are performed as "regular" operations. +/// +template +class device_atomic_ref { + +public: + /// @name Type definitions + /// @{ + + /// Type managed by the object + typedef T value_type; + /// Difference between two objects + typedef value_type difference_type; + /// Pointer to the value in global memory + typedef value_type* pointer; + /// Reference to a value given by the user + typedef value_type& reference; + + /// @} + + /// @name Check(s) on the value type + /// @{ + + static_assert(std::is_integral::value, + "vecmem::atomic_ref only accepts built-in integral types"); + + /// @} + + /// Constructor, with a pointer to the managed variable + VECMEM_HOST_AND_DEVICE + device_atomic_ref(reference ref); + /// Copy constructor + VECMEM_HOST_AND_DEVICE + device_atomic_ref(const device_atomic_ref& parent); + + /// Disable the assignment operator + device_atomic_ref& operator=(const device_atomic_ref&) = delete; + + /// @name Value setter/getter functions + /// @{ + + /// Assigns a value desired to the referenced object + /// + /// @see vecmem::device_atomic_ref::store + /// + VECMEM_HOST_AND_DEVICE + value_type operator=(value_type data) const; + + /// Set the variable to the desired value + VECMEM_HOST_AND_DEVICE + void store(value_type data, + memory_order order = memory_order::seq_cst) const; + /// Get the value of the variable + VECMEM_HOST_AND_DEVICE + value_type load(memory_order order = memory_order::seq_cst) const; + + /// Exchange the current value of the variable with a different one + VECMEM_HOST_AND_DEVICE + value_type exchange(value_type data, + memory_order order = memory_order::seq_cst) const; + + /// Compare against the current value, and exchange only if different + VECMEM_HOST_AND_DEVICE + bool compare_exchange_strong(reference expected, value_type desired, + memory_order success, + memory_order failure) const; + /// Compare against the current value, and exchange only if different + VECMEM_HOST_AND_DEVICE + bool compare_exchange_strong( + reference expected, value_type desired, + memory_order order = memory_order::seq_cst) const; + + /// @} + + /// @name Value modifier functions + /// @{ + + /// Add a chosen value to the stored variable + VECMEM_HOST_AND_DEVICE + value_type fetch_add(value_type data, + memory_order order = memory_order::seq_cst) const; + /// Substitute a chosen value from the stored variable + VECMEM_HOST_AND_DEVICE + value_type fetch_sub(value_type data, + memory_order order = memory_order::seq_cst) const; + + /// Replace the current value with the specified value AND-ed to it + VECMEM_HOST_AND_DEVICE + value_type fetch_and(value_type data, + memory_order order = memory_order::seq_cst) const; + /// Replace the current value with the specified value OR-d to it + VECMEM_HOST_AND_DEVICE + value_type fetch_or(value_type data, + memory_order order = memory_order::seq_cst) const; + /// Replace the current value with the specified value XOR-d to it + VECMEM_HOST_AND_DEVICE + value_type fetch_xor(value_type data, + memory_order order = memory_order::seq_cst) const; + + /// @} + +private: + /// Pointer to the value to perform atomic operations on + pointer m_ptr; + +}; // class device_atomic_ref + +} // namespace vecmem + +// Include the implementation. +#include "vecmem/memory/impl/device_atomic_ref.ipp" + +#endif // Platform selection diff --git a/core/include/vecmem/memory/impl/device_atomic_ref.ipp b/core/include/vecmem/memory/impl/device_atomic_ref.ipp new file mode 100644 index 00000000..ae0e46cf --- /dev/null +++ b/core/include/vecmem/memory/impl/device_atomic_ref.ipp @@ -0,0 +1,220 @@ +/* + * VecMem project, part of the ACTS project (R&D line) + * + * (c) 2022 CERN for the benefit of the ACTS project + * + * Mozilla Public License Version 2.0 + */ +#pragma once + +// SYCL include(s). +#if defined(CL_SYCL_LANGUAGE_VERSION) || defined(SYCL_LANGUAGE_VERSION) +#include +#endif + +/// Helpers for explicit calls to the SYCL atomic functions +#if defined(CL_SYCL_LANGUAGE_VERSION) || defined(SYCL_LANGUAGE_VERSION) +#define __VECMEM_SYCL_ATOMIC_CALL0(FNAME, PTR) \ + cl::sycl::atomic_##FNAME( \ + cl::sycl::atomic(cl::sycl::global_ptr(PTR))) +#define __VECMEM_SYCL_ATOMIC_CALL1(FNAME, PTR, ARG1) \ + cl::sycl::atomic_##FNAME( \ + cl::sycl::atomic(cl::sycl::global_ptr(PTR)), \ + ARG1) +#define __VECMEM_SYCL_ATOMIC_CALL2(FNAME, PTR, ARG1, ARG2) \ + cl::sycl::atomic_##FNAME( \ + cl::sycl::atomic(cl::sycl::global_ptr(PTR)), \ + ARG1, ARG2) +#endif + +namespace vecmem { + +template +VECMEM_HOST_AND_DEVICE device_atomic_ref::device_atomic_ref(reference ref) + : m_ptr(&ref) {} + +template +VECMEM_HOST_AND_DEVICE device_atomic_ref::device_atomic_ref( + const device_atomic_ref& parent) + : m_ptr(parent.m_ptr) {} + +template +VECMEM_HOST_AND_DEVICE auto device_atomic_ref::operator=( + value_type data) const -> value_type { + + store(data); + return load(); +} + +template +VECMEM_HOST_AND_DEVICE void device_atomic_ref::store(value_type data, + memory_order) const { + +#if (defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__)) && \ + (!(defined(SYCL_LANGUAGE_VERSION) || defined(CL_SYCL_LANGUAGE_VERSION))) + volatile pointer addr = m_ptr; + __threadfence(); + *addr = data; +#elif defined(CL_SYCL_LANGUAGE_VERSION) || defined(SYCL_LANGUAGE_VERSION) + __VECMEM_SYCL_ATOMIC_CALL1(store, m_ptr, data); +#else + *m_ptr = data; +#endif +} + +template +VECMEM_HOST_AND_DEVICE auto device_atomic_ref::load(memory_order) const + -> value_type { + +#if (defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__)) && \ + (!(defined(SYCL_LANGUAGE_VERSION) || defined(CL_SYCL_LANGUAGE_VERSION))) + volatile pointer addr = m_ptr; + __threadfence(); + const value_type value = *addr; + __threadfence(); + return value; +#elif defined(CL_SYCL_LANGUAGE_VERSION) || defined(SYCL_LANGUAGE_VERSION) + return __VECMEM_SYCL_ATOMIC_CALL0(load, m_ptr); +#else + return *m_ptr; +#endif +} + +template +VECMEM_HOST_AND_DEVICE auto device_atomic_ref::exchange(value_type data, + memory_order) const + -> value_type { + +#if (defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__)) && \ + (!(defined(SYCL_LANGUAGE_VERSION) || defined(CL_SYCL_LANGUAGE_VERSION))) + return atomicExch(m_ptr, data); +#elif defined(CL_SYCL_LANGUAGE_VERSION) || defined(SYCL_LANGUAGE_VERSION) + return __VECMEM_SYCL_ATOMIC_CALL1(exchange, m_ptr, data); +#else + value_type current_value = *m_ptr; + *m_ptr = data; + return current_value; +#endif +} + +template +VECMEM_HOST_AND_DEVICE bool device_atomic_ref::compare_exchange_strong( + reference expected, value_type desired, memory_order, memory_order) const { + + return compare_exchange_strong(expected, desired); +} + +template +VECMEM_HOST_AND_DEVICE bool device_atomic_ref::compare_exchange_strong( + reference expected, value_type desired, memory_order) const { + +#if (defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__)) && \ + (!(defined(SYCL_LANGUAGE_VERSION) || defined(CL_SYCL_LANGUAGE_VERSION))) + return atomicCAS(m_ptr, expected, desired); +#elif defined(CL_SYCL_LANGUAGE_VERSION) || defined(SYCL_LANGUAGE_VERSION) + return __VECMEM_SYCL_ATOMIC_CALL2(compare_exchange_strong, m_ptr, expected, + desired); +#else + if (*m_ptr == expected) { + *m_ptr = desired; + return true; + } else { + expected = *m_ptr; + return false; + } +#endif +} + +template +VECMEM_HOST_AND_DEVICE auto device_atomic_ref::fetch_add(value_type data, + memory_order) const + -> value_type { + +#if (defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__)) && \ + (!(defined(SYCL_LANGUAGE_VERSION) || defined(CL_SYCL_LANGUAGE_VERSION))) + return atomicAdd(m_ptr, data); +#elif defined(CL_SYCL_LANGUAGE_VERSION) || defined(SYCL_LANGUAGE_VERSION) + return __VECMEM_SYCL_ATOMIC_CALL1(fetch_add, m_ptr, data); +#else + const value_type result = *m_ptr; + *m_ptr += data; + return result; +#endif +} + +template +VECMEM_HOST_AND_DEVICE auto device_atomic_ref::fetch_sub(value_type data, + memory_order) const + -> value_type { + +#if (defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__)) && \ + (!(defined(SYCL_LANGUAGE_VERSION) || defined(CL_SYCL_LANGUAGE_VERSION))) + return atomicSub(m_ptr, data); +#elif defined(CL_SYCL_LANGUAGE_VERSION) || defined(SYCL_LANGUAGE_VERSION) + return __VECMEM_SYCL_ATOMIC_CALL1(fetch_sub, m_ptr, data); +#else + const value_type result = *m_ptr; + *m_ptr -= data; + return result; +#endif +} + +template +VECMEM_HOST_AND_DEVICE auto device_atomic_ref::fetch_and(value_type data, + memory_order) const + -> value_type { + +#if (defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__)) && \ + (!(defined(SYCL_LANGUAGE_VERSION) || defined(CL_SYCL_LANGUAGE_VERSION))) + return atomicAnd(m_ptr, data); +#elif defined(CL_SYCL_LANGUAGE_VERSION) || defined(SYCL_LANGUAGE_VERSION) + return __VECMEM_SYCL_ATOMIC_CALL1(fetch_and, m_ptr, data); +#else + const value_type result = *m_ptr; + *m_ptr &= data; + return result; +#endif +} + +template +VECMEM_HOST_AND_DEVICE auto device_atomic_ref::fetch_or(value_type data, + memory_order) const + -> value_type { + +#if (defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__)) && \ + (!(defined(SYCL_LANGUAGE_VERSION) || defined(CL_SYCL_LANGUAGE_VERSION))) + return atomicOr(m_ptr, data); +#elif defined(CL_SYCL_LANGUAGE_VERSION) || defined(SYCL_LANGUAGE_VERSION) + return __VECMEM_SYCL_ATOMIC_CALL1(fetch_or, m_ptr, data); +#else + const value_type result = *m_ptr; + *m_ptr |= data; + return result; +#endif +} + +template +VECMEM_HOST_AND_DEVICE auto device_atomic_ref::fetch_xor(value_type data, + memory_order) const + -> value_type { + +#if (defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__)) && \ + (!(defined(SYCL_LANGUAGE_VERSION) || defined(CL_SYCL_LANGUAGE_VERSION))) + return atomicXor(m_ptr, data); +#elif defined(CL_SYCL_LANGUAGE_VERSION) || defined(SYCL_LANGUAGE_VERSION) + return __VECMEM_SYCL_ATOMIC_CALL1(fetch_xor, m_ptr, data); +#else + const value_type result = *m_ptr; + *m_ptr ^= data; + return result; +#endif +} + +} // namespace vecmem + +// Clean up after the SYCL macros. +#if defined(CL_SYCL_LANGUAGE_VERSION) || defined(SYCL_LANGUAGE_VERSION) +#undef __VECMEM_SYCL_ATOMIC_CALL0 +#undef __VECMEM_SYCL_ATOMIC_CALL1 +#undef __VECMEM_SYCL_ATOMIC_CALL2 +#endif diff --git a/tests/cuda/test_cuda_containers.cpp b/tests/cuda/test_cuda_containers.cpp index 5cca733f..be40b67e 100644 --- a/tests/cuda/test_cuda_containers.cpp +++ b/tests/cuda/test_cuda_containers.cpp @@ -167,7 +167,7 @@ TEST_F(cuda_containers_test, atomic_memory) { // Check the output. for (int value : vec) { - EXPECT_EQ(static_cast(value), 2 * ITERATIONS); + EXPECT_EQ(static_cast(value), 4 * ITERATIONS); } } diff --git a/tests/cuda/test_cuda_containers_kernels.cu b/tests/cuda/test_cuda_containers_kernels.cu index 6e2abe52..d7512bf2 100644 --- a/tests/cuda/test_cuda_containers_kernels.cu +++ b/tests/cuda/test_cuda_containers_kernels.cu @@ -15,6 +15,7 @@ #include "vecmem/containers/jagged_device_vector.hpp" #include "vecmem/containers/static_array.hpp" #include "vecmem/memory/atomic.hpp" +#include "vecmem/memory/device_atomic_ref.hpp" // System include(s). #include @@ -83,12 +84,19 @@ __global__ void atomicTransformKernel(std::size_t iterations, assert(array_index < data.size()); int* ptr = data.ptr() + array_index; - // Do some simple stuff with it. + // Do some simple stuff with it using vecmem::atomic. vecmem::atomic a(ptr); a.fetch_add(4); a.fetch_sub(2); a.fetch_and(0xffffffff); a.fetch_or(0x00000000); + + // Do the same simple stuff with it using vecmem::atomic_ref. + vecmem::device_atomic_ref a2(*ptr); + a2.fetch_add(4); + a2.fetch_sub(2); + a2.fetch_and(0xffffffff); + a2.fetch_or(0x00000000); return; } diff --git a/tests/hip/test_hip_containers_kernels.hip b/tests/hip/test_hip_containers_kernels.hip index a93f2dcd..8d499fa8 100644 --- a/tests/hip/test_hip_containers_kernels.hip +++ b/tests/hip/test_hip_containers_kernels.hip @@ -1,6 +1,6 @@ /** VecMem project, part of the ACTS project (R&D line) * - * (c) 2021 CERN for the benefit of the ACTS project + * (c) 2021-2022 CERN for the benefit of the ACTS project * * Mozilla Public License Version 2.0 */ @@ -18,6 +18,7 @@ #include "vecmem/containers/jagged_device_vector.hpp" #include "vecmem/containers/static_array.hpp" #include "vecmem/memory/atomic.hpp" +#include "vecmem/memory/device_atomic_ref.hpp" /// Kernel performing a linear transformation using the vector helper types __global__ void linearTransformKernel( @@ -127,10 +128,19 @@ __global__ void atomicTransformKernel(std::size_t iterations, assert(array_index < data.size()); int* ptr = data.ptr() + array_index; - // Do some simple stuff with it. + // Do some simple stuff with it using vecmem::atomic. vecmem::atomic a(ptr); - a.fetch_add(1); - a.fetch_add(3); + a.fetch_add(4); + a.fetch_sub(2); + a.fetch_and(0xffffffff); + a.fetch_or(0x00000000); + + // Do the same simple stuff with it using vecmem::atomic_ref. + vecmem::device_atomic_ref a2(*ptr); + a2.fetch_add(4); + a2.fetch_sub(2); + a2.fetch_and(0xffffffff); + a2.fetch_or(0x00000000); return; } diff --git a/tests/sycl/test_sycl_containers.sycl b/tests/sycl/test_sycl_containers.sycl index dd869a05..5c70b028 100644 --- a/tests/sycl/test_sycl_containers.sycl +++ b/tests/sycl/test_sycl_containers.sycl @@ -1,6 +1,6 @@ /** VecMem project, part of the ACTS project (R&D line) * - * (c) 2021 CERN for the benefit of the ACTS project + * (c) 2021-2022 CERN for the benefit of the ACTS project * * Mozilla Public License Version 2.0 */ @@ -17,6 +17,7 @@ #include "vecmem/containers/static_array.hpp" #include "vecmem/containers/vector.hpp" #include "vecmem/memory/atomic.hpp" +#include "vecmem/memory/device_atomic_ref.hpp" #include "vecmem/memory/sycl/device_memory_resource.hpp" #include "vecmem/memory/sycl/host_memory_resource.hpp" #include "vecmem/memory/sycl/shared_memory_resource.hpp" @@ -188,19 +189,26 @@ TEST_F(sycl_containers_test, atomic_memory) { const std::size_t index = i % buffer.size(); int* ptr = buffer.ptr() + index; - // Do some simple stuff with it. + // Do some simple stuff with it using vecmem::atomic. vecmem::atomic a(ptr); a.fetch_add(4); a.fetch_sub(2); a.fetch_and(0xffffffff); a.fetch_or(0x00000000); + + // Do the same simple stuff with it using vecmem::atomic_ref. + vecmem::device_atomic_ref a2(*ptr); + a2.fetch_add(4); + a2.fetch_sub(2); + a2.fetch_and(0xffffffff); + a2.fetch_or(0x00000000); }); }); queue.wait_and_throw(); // Check the output. for (int value : buffer) { - EXPECT_EQ(value, ITERATIONS * 2); + EXPECT_EQ(value, ITERATIONS * 4); } }