diff --git a/R/build.R b/R/build.R
index 95cbf5e8..b57aef0e 100644
--- a/R/build.R
+++ b/R/build.R
@@ -1,6 +1,6 @@
-# Output the CXX flags. These flags are propagated to sourceCpp via the
+# Output the CXX flags. These flags are propagated to sourceCpp via the
# inlineCxxPlugin (defined below) and to packages via a line in Makevars[.win]
# like this:
#
@@ -12,7 +12,7 @@ CxxFlags <- function() {
# Output the LD flags for building against TBB. These flags are propagated
-# to sourceCpp via the inlineCxxPlugin (defined below) and to packages
+# to sourceCpp via the inlineCxxPlugin (defined below) and to packages
# via a line in Makevars[.win] like this:
#
# PKG_LIBS += $(shell "${R_HOME}/bin${R_ARCH_BIN}/Rscript.exe" -e "RcppParallel::LdFlags()")
@@ -41,13 +41,13 @@ inlineCxxPlugin <- function() {
}
tbbCxxFlags <- function() {
-
+
flags <- c()
-
+
# opt-in to TBB on Windows
if (Sys.info()['sysname'] == "Windows")
flags <- paste(flags, "-DRCPP_PARALLEL_USE_TBB=1")
-
+
flags
}
@@ -66,8 +66,8 @@ tbbLdFlags <- function() {
tbbLibPath <- function(suffix = "") {
sysname <- Sys.info()['sysname']
tbbSupported <- list(
- "Darwin" = paste("libtbb", suffix, ".dylib", sep = ""),
- "Linux" = paste("libtbb", suffix, ".so.2", sep = ""),
+ "Darwin" = paste("libtbb", suffix, ".dylib", sep = ""),
+ "Linux" = paste("libtbb", suffix, ".so.2", sep = ""),
"Windows" = paste("tbb", suffix, ".dll", sep = ""),
"SunOS" = paste("libtbb", suffix, ".so", sep = "")
)
@@ -75,7 +75,7 @@ tbbLibPath <- function(suffix = "") {
libDir <- "lib/"
if (sysname == "Windows")
libDir <- paste(libDir, .Platform$r_arch, "/", sep="")
- system.file(paste(libDir, tbbSupported[[sysname]], sep = ""),
+ system.file(paste(libDir, tbbSupported[[sysname]], sep = ""),
package = "RcppParallel")
} else {
NULL
diff --git a/inst/include/index.html b/inst/include/index.html
index b0962e01..c8698efb 100644
--- a/inst/include/index.html
+++ b/inst/include/index.html
@@ -15,7 +15,7 @@
Directories
Up to parent directory
-Copyright © 2005-2017 Intel Corporation. All Rights Reserved.
+Copyright © 2005-2020 Intel Corporation. All Rights Reserved.
Intel is a registered trademark or trademark of Intel Corporation
or its subsidiaries in the United States and other countries.
diff --git a/inst/include/serial/tbb/parallel_for.h b/inst/include/serial/tbb/parallel_for.h
index e5959c22..4e6111b7 100644
--- a/inst/include/serial/tbb/parallel_for.h
+++ b/inst/include/serial/tbb/parallel_for.h
@@ -1,5 +1,5 @@
/*
- Copyright (c) 2005-2018 Intel Corporation
+ Copyright (c) 2005-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -12,11 +12,18 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
+*/
+#include "../../tbb/internal/_deprecated_header_message_guard.h"
+#if !defined(__TBB_show_deprecation_message_parallel_for_H) && defined(__TBB_show_deprecated_header_message)
+#define __TBB_show_deprecation_message_parallel_for_H
+#pragma message("TBB Warning: serial/tbb/parallel_for.h is deprecated. For details, please see Deprecated Features appendix in the TBB reference manual.")
+#endif
-
-*/
+#if defined(__TBB_show_deprecated_header_message)
+#undef __TBB_show_deprecated_header_message
+#endif
#ifndef __TBB_SERIAL_parallel_for_H
#define __TBB_SERIAL_parallel_for_H
@@ -98,35 +105,35 @@ void start_for< Range, Body, Partitioner >::execute() {
//! Parallel iteration over range with default partitioner.
/** @ingroup algorithms **/
template
-void parallel_for( const Range& range, const Body& body ) {
+__TBB_DEPRECATED_IN_VERBOSE_MODE void parallel_for( const Range& range, const Body& body ) {
serial::interface9::start_for::run(range,body,__TBB_DEFAULT_PARTITIONER());
}
//! Parallel iteration over range with simple partitioner.
/** @ingroup algorithms **/
template
-void parallel_for( const Range& range, const Body& body, const simple_partitioner& partitioner ) {
+__TBB_DEPRECATED_IN_VERBOSE_MODE void parallel_for( const Range& range, const Body& body, const simple_partitioner& partitioner ) {
serial::interface9::start_for::run(range,body,partitioner);
}
//! Parallel iteration over range with auto_partitioner.
/** @ingroup algorithms **/
template
-void parallel_for( const Range& range, const Body& body, const auto_partitioner& partitioner ) {
+__TBB_DEPRECATED_IN_VERBOSE_MODE void parallel_for( const Range& range, const Body& body, const auto_partitioner& partitioner ) {
serial::interface9::start_for::run(range,body,partitioner);
}
//! Parallel iteration over range with static_partitioner.
/** @ingroup algorithms **/
template
-void parallel_for( const Range& range, const Body& body, const static_partitioner& partitioner ) {
+__TBB_DEPRECATED_IN_VERBOSE_MODE void parallel_for( const Range& range, const Body& body, const static_partitioner& partitioner ) {
serial::interface9::start_for::run(range,body,partitioner);
}
//! Parallel iteration over range with affinity_partitioner.
/** @ingroup algorithms **/
template
-void parallel_for( const Range& range, const Body& body, affinity_partitioner& partitioner ) {
+__TBB_DEPRECATED_IN_VERBOSE_MODE void parallel_for( const Range& range, const Body& body, affinity_partitioner& partitioner ) {
serial::interface9::start_for::run(range,body,partitioner);
}
@@ -154,53 +161,53 @@ void parallel_for_impl(Index first, Index last, Index step, const Function& f, P
//! Parallel iteration over a range of integers with explicit step and default partitioner
template
-void parallel_for(Index first, Index last, Index step, const Function& f) {
+__TBB_DEPRECATED_IN_VERBOSE_MODE void parallel_for(Index first, Index last, Index step, const Function& f) {
parallel_for_impl(first, last, step, f, auto_partitioner());
}
//! Parallel iteration over a range of integers with explicit step and simple partitioner
template
-void parallel_for(Index first, Index last, Index step, const Function& f, const simple_partitioner& p) {
+__TBB_DEPRECATED_IN_VERBOSE_MODE void parallel_for(Index first, Index last, Index step, const Function& f, const simple_partitioner& p) {
parallel_for_impl(first, last, step, f, p);
}
//! Parallel iteration over a range of integers with explicit step and auto partitioner
template
-void parallel_for(Index first, Index last, Index step, const Function& f, const auto_partitioner& p) {
+__TBB_DEPRECATED_IN_VERBOSE_MODE void parallel_for(Index first, Index last, Index step, const Function& f, const auto_partitioner& p) {
parallel_for_impl(first, last, step, f, p);
}
//! Parallel iteration over a range of integers with explicit step and static partitioner
template
-void parallel_for(Index first, Index last, Index step, const Function& f, const static_partitioner& p) {
+__TBB_DEPRECATED_IN_VERBOSE_MODE void parallel_for(Index first, Index last, Index step, const Function& f, const static_partitioner& p) {
parallel_for_impl(first, last, step, f, p);
}
//! Parallel iteration over a range of integers with explicit step and affinity partitioner
template
-void parallel_for(Index first, Index last, Index step, const Function& f, affinity_partitioner& p) {
+__TBB_DEPRECATED_IN_VERBOSE_MODE void parallel_for(Index first, Index last, Index step, const Function& f, affinity_partitioner& p) {
parallel_for_impl(first, last, step, f, p);
}
//! Parallel iteration over a range of integers with default step and default partitioner
template
-void parallel_for(Index first, Index last, const Function& f) {
+__TBB_DEPRECATED_IN_VERBOSE_MODE void parallel_for(Index first, Index last, const Function& f) {
parallel_for_impl(first, last, static_cast(1), f, auto_partitioner());
}
//! Parallel iteration over a range of integers with default step and simple partitioner
template
-void parallel_for(Index first, Index last, const Function& f, const simple_partitioner& p) {
+__TBB_DEPRECATED_IN_VERBOSE_MODE void parallel_for(Index first, Index last, const Function& f, const simple_partitioner& p) {
parallel_for_impl(first, last, static_cast(1), f, p);
}
//! Parallel iteration over a range of integers with default step and auto partitioner
template
- void parallel_for(Index first, Index last, const Function& f, const auto_partitioner& p) {
+__TBB_DEPRECATED_IN_VERBOSE_MODE void parallel_for(Index first, Index last, const Function& f, const auto_partitioner& p) {
parallel_for_impl(first, last, static_cast(1), f, p);
}
//! Parallel iteration over a range of integers with default step and static partitioner
template
-void parallel_for(Index first, Index last, const Function& f, const static_partitioner& p) {
+__TBB_DEPRECATED_IN_VERBOSE_MODE void parallel_for(Index first, Index last, const Function& f, const static_partitioner& p) {
parallel_for_impl(first, last, static_cast(1), f, p);
}
//! Parallel iteration over a range of integers with default step and affinity_partitioner
template
-void parallel_for(Index first, Index last, const Function& f, affinity_partitioner& p) {
+__TBB_DEPRECATED_IN_VERBOSE_MODE void parallel_for(Index first, Index last, const Function& f, affinity_partitioner& p) {
parallel_for_impl(first, last, static_cast(1), f, p);
}
diff --git a/inst/include/serial/tbb/tbb_annotate.h b/inst/include/serial/tbb/tbb_annotate.h
index c16defea..3e67e4c4 100644
--- a/inst/include/serial/tbb/tbb_annotate.h
+++ b/inst/include/serial/tbb/tbb_annotate.h
@@ -1,5 +1,5 @@
/*
- Copyright (c) 2005-2018 Intel Corporation
+ Copyright (c) 2005-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -12,10 +12,6 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-
-
-
-
*/
#ifndef __TBB_annotate_H
diff --git a/inst/include/tbb/aggregator.h b/inst/include/tbb/aggregator.h
index 6aecbb74..786c52c8 100644
--- a/inst/include/tbb/aggregator.h
+++ b/inst/include/tbb/aggregator.h
@@ -1,5 +1,5 @@
/*
- Copyright (c) 2005-2017 Intel Corporation
+ Copyright (c) 2005-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -12,15 +12,14 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-
-
-
-
*/
#ifndef __TBB__aggregator_H
#define __TBB__aggregator_H
+#define __TBB_aggregator_H_include_area
+#include "internal/_warning_suppress_enable_notice.h"
+
#if !TBB_PREVIEW_AGGREGATOR
#error Set TBB_PREVIEW_AGGREGATOR before including aggregator.h
#endif
@@ -199,4 +198,7 @@ using interface6::aggregator_operation;
} // namespace tbb
+#include "internal/_warning_suppress_disable_notice.h"
+#undef __TBB_aggregator_H_include_area
+
#endif // __TBB__aggregator_H
diff --git a/inst/include/tbb/aligned_space.h b/inst/include/tbb/aligned_space.h
index 56fd85f3..1b047f97 100644
--- a/inst/include/tbb/aligned_space.h
+++ b/inst/include/tbb/aligned_space.h
@@ -1,5 +1,5 @@
/*
- Copyright (c) 2005-2017 Intel Corporation
+ Copyright (c) 2005-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -12,15 +12,25 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
+*/
+#include "internal/_deprecated_header_message_guard.h"
+#if !defined(__TBB_show_deprecation_message_aligned_space_H) && defined(__TBB_show_deprecated_header_message)
+#define __TBB_show_deprecation_message_aligned_space_H
+#pragma message("TBB Warning: tbb/aligned_space.h is deprecated. For details, please see Deprecated Features appendix in the TBB reference manual.")
+#endif
-
-*/
+#if defined(__TBB_show_deprecated_header_message)
+#undef __TBB_show_deprecated_header_message
+#endif
#ifndef __TBB_aligned_space_H
#define __TBB_aligned_space_H
+#define __TBB_aligned_space_H_include_area
+#include "internal/_warning_suppress_enable_notice.h"
+
#include "tbb_stddef.h"
#include "tbb_machine.h"
@@ -30,18 +40,21 @@ namespace tbb {
/** The elements are not constructed or destroyed by this class.
@ingroup memory_allocation */
template
-class aligned_space {
+class __TBB_DEPRECATED_IN_VERBOSE_MODE_MSG("tbb::aligned_space is deprecated, use std::aligned_storage") aligned_space {
private:
typedef __TBB_TypeWithAlignmentAtLeastAsStrict(T) element_type;
element_type array[(sizeof(T)*N+sizeof(element_type)-1)/sizeof(element_type)];
public:
//! Pointer to beginning of array
- T* begin() {return internal::punned_cast(this);}
+ T* begin() const {return internal::punned_cast(this);}
//! Pointer to one past last element in array.
- T* end() {return begin()+N;}
+ T* end() const {return begin()+N;}
};
} // namespace tbb
+#include "internal/_warning_suppress_disable_notice.h"
+#undef __TBB_aligned_space_H_include_area
+
#endif /* __TBB_aligned_space_H */
diff --git a/inst/include/tbb/atomic.h b/inst/include/tbb/atomic.h
index 72ec534e..e602306f 100644
--- a/inst/include/tbb/atomic.h
+++ b/inst/include/tbb/atomic.h
@@ -1,5 +1,5 @@
/*
- Copyright (c) 2005-2017 Intel Corporation
+ Copyright (c) 2005-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -12,15 +12,25 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
+*/
+#include "internal/_deprecated_header_message_guard.h"
+#if !defined(__TBB_show_deprecation_message_atomic_H) && defined(__TBB_show_deprecated_header_message)
+#define __TBB_show_deprecation_message_atomic_H
+#pragma message("TBB Warning: tbb/atomic.h is deprecated. For details, please see Deprecated Features appendix in the TBB reference manual.")
+#endif
-
-*/
+#if defined(__TBB_show_deprecated_header_message)
+#undef __TBB_show_deprecated_header_message
+#endif
#ifndef __TBB_atomic_H
#define __TBB_atomic_H
+#define __TBB_atomic_H_include_area
+#include "internal/_warning_suppress_enable_notice.h"
+
#include
#if _MSC_VER
@@ -404,10 +414,12 @@ struct atomic_impl_with_arithmetic: atomic_impl {
/** See the Reference for details.
@ingroup synchronization */
template
-struct atomic: internal::atomic_impl {
+struct __TBB_DEPRECATED_IN_VERBOSE_MODE_MSG("tbb::atomic is deprecated, use std::atomic")
+atomic: internal::atomic_impl {
#if __TBB_ATOMIC_CTORS
atomic() = default;
constexpr atomic(T arg): internal::atomic_impl(arg) {}
+ constexpr atomic(const atomic& rhs): internal::atomic_impl(rhs) {}
#endif
T operator=( T rhs ) {
// "this" required here in strict ISO C++ because store_with_release is a dependent name
@@ -418,16 +430,20 @@ struct atomic: internal::atomic_impl {
#if __TBB_ATOMIC_CTORS
#define __TBB_DECL_ATOMIC(T) \
- template<> struct atomic: internal::atomic_impl_with_arithmetic { \
+ template<> struct __TBB_DEPRECATED_IN_VERBOSE_MODE_MSG("tbb::atomic is deprecated, use std::atomic") \
+ atomic: internal::atomic_impl_with_arithmetic { \
atomic() = default; \
constexpr atomic(T arg): internal::atomic_impl_with_arithmetic(arg) {} \
+ constexpr atomic(const atomic& rhs): \
+ internal::atomic_impl_with_arithmetic(rhs) {} \
\
T operator=( T rhs ) {return store_with_release(rhs);} \
atomic& operator=( const atomic& rhs ) {store_with_release(rhs); return *this;} \
};
#else
#define __TBB_DECL_ATOMIC(T) \
- template<> struct atomic: internal::atomic_impl_with_arithmetic { \
+ template<> struct __TBB_DEPRECATED_IN_VERBOSE_MODE_MSG("tbb::atomic is deprecated, use std::atomic") \
+ atomic: internal::atomic_impl_with_arithmetic { \
T operator=( T rhs ) {return store_with_release(rhs);} \
atomic& operator=( const atomic& rhs ) {store_with_release(rhs); return *this;} \
};
@@ -451,15 +467,20 @@ __TBB_DECL_ATOMIC(unsigned long)
type synonyms on the platform. Type U should be the wider variant of T from the
perspective of /Wp64. */
#define __TBB_DECL_ATOMIC_ALT(T,U) \
- template<> struct atomic: internal::atomic_impl_with_arithmetic { \
+ template<> struct __TBB_DEPRECATED_IN_VERBOSE_MODE_MSG("tbb::atomic is deprecated, use std::atomic") \
+ atomic: internal::atomic_impl_with_arithmetic { \
atomic() = default ; \
constexpr atomic(T arg): internal::atomic_impl_with_arithmetic(arg) {} \
+ constexpr atomic(const atomic& rhs): \
+ internal::atomic_impl_with_arithmetic(rhs) {} \
+ \
T operator=( U rhs ) {return store_with_release(T(rhs));} \
atomic& operator=( const atomic& rhs ) {store_with_release(rhs); return *this;} \
};
#else
#define __TBB_DECL_ATOMIC_ALT(T,U) \
- template<> struct atomic: internal::atomic_impl_with_arithmetic { \
+ template<> struct __TBB_DEPRECATED_IN_VERBOSE_MODE_MSG("tbb::atomic is deprecated, use std::atomic") \
+ atomic: internal::atomic_impl_with_arithmetic { \
T operator=( U rhs ) {return store_with_release(T(rhs));} \
atomic& operator=( const atomic& rhs ) {store_with_release(rhs); return *this;} \
};
@@ -482,10 +503,12 @@ __TBB_DECL_ATOMIC(wchar_t)
#endif /* _MSC_VER||!defined(_NATIVE_WCHAR_T_DEFINED) */
//! Specialization for atomic with arithmetic and operator->.
-template struct atomic: internal::atomic_impl_with_arithmetic {
+template struct __TBB_DEPRECATED_IN_VERBOSE_MODE_MSG("tbb::atomic is deprecated, use std::atomic")
+atomic: internal::atomic_impl_with_arithmetic {
#if __TBB_ATOMIC_CTORS
atomic() = default ;
constexpr atomic(T* arg): internal::atomic_impl_with_arithmetic(arg) {}
+ constexpr atomic(const atomic& rhs): internal::atomic_impl_with_arithmetic(rhs) {}
#endif
T* operator=( T* rhs ) {
// "this" required here in strict ISO C++ because store_with_release is a dependent name
@@ -500,10 +523,12 @@ template struct atomic: internal::atomic_impl_with_arithmetic, for sake of not allowing arithmetic or operator->.
-template<> struct atomic: internal::atomic_impl {
+template<> struct __TBB_DEPRECATED_IN_VERBOSE_MODE_MSG("tbb::atomic is deprecated, use std::atomic")
+atomic: internal::atomic_impl {
#if __TBB_ATOMIC_CTORS
atomic() = default ;
constexpr atomic(void* arg): internal::atomic_impl(arg) {}
+ constexpr atomic(const atomic& rhs): internal::atomic_impl(rhs) {}
#endif
void* operator=( void* rhs ) {
// "this" required here in strict ISO C++ because store_with_release is a dependent name
@@ -555,4 +580,7 @@ inline atomic& as_atomic( T& t ) {
#pragma warning (pop)
#endif // warnings are restored
+#include "internal/_warning_suppress_disable_notice.h"
+#undef __TBB_atomic_H_include_area
+
#endif /* __TBB_atomic_H */
diff --git a/inst/include/tbb/blocked_range.h b/inst/include/tbb/blocked_range.h
index 9f24cd2b..b77e7e0a 100644
--- a/inst/include/tbb/blocked_range.h
+++ b/inst/include/tbb/blocked_range.h
@@ -1,5 +1,5 @@
/*
- Copyright (c) 2005-2017 Intel Corporation
+ Copyright (c) 2005-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -12,10 +12,6 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-
-
-
-
*/
#ifndef __TBB_blocked_range_H
@@ -25,6 +21,15 @@
namespace tbb {
+namespace internal {
+
+// blocked_rangeNd_impl forward declaration in tbb::internal namespace to
+// name it as a friend for a tbb::blocked_range.
+template
+class blocked_rangeNd_impl;
+
+} // namespace internal
+
/** \page range_req Requirements on range concept
Class \c R implementing the concept of range must define:
- \code R::R( const R& ); \endcode Copy constructor
@@ -47,9 +52,11 @@ class blocked_range {
//! Type for size of a range
typedef std::size_t size_type;
- //! Construct range with default-constructed values for begin and end.
+#if __TBB_DEPRECATED_BLOCKED_RANGE_DEFAULT_CTOR
+ //! Construct range with default-constructed values for begin, end, and grainsize.
/** Requires that Value have a default constructor. */
- blocked_range() : my_end(), my_begin() {}
+ blocked_range() : my_end(), my_begin(), my_grainsize() {}
+#endif
//! Construct range over half-open interval [begin,end), with the given grainsize.
blocked_range( Value begin_, Value end_, size_type grainsize_=1 ) :
@@ -115,13 +122,12 @@ class blocked_range {
#endif /* __TBB_USE_PROPORTIONAL_SPLIT_IN_BLOCKED_RANGES */
private:
- /** NOTE: my_end MUST be declared before my_begin, otherwise the forking constructor will break. */
+ /** NOTE: my_end MUST be declared before my_begin, otherwise the splitting constructor will break. */
Value my_end;
Value my_begin;
size_type my_grainsize;
- //! Auxiliary function used by forking constructor.
- /** Using this function lets us not require that Value support assignment or default construction. */
+ //! Auxiliary function used by the splitting constructor.
static Value do_split( blocked_range& r, split )
{
__TBB_ASSERT( r.is_divisible(), "cannot split blocked_range that is not divisible" );
@@ -152,6 +158,9 @@ class blocked_range {
template
friend class blocked_range3d;
+
+ template
+ friend class internal::blocked_rangeNd_impl;
};
} // namespace tbb
diff --git a/inst/include/tbb/blocked_range2d.h b/inst/include/tbb/blocked_range2d.h
index f1b9f35d..2498e046 100644
--- a/inst/include/tbb/blocked_range2d.h
+++ b/inst/include/tbb/blocked_range2d.h
@@ -1,5 +1,5 @@
/*
- Copyright (c) 2005-2017 Intel Corporation
+ Copyright (c) 2005-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -12,10 +12,6 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-
-
-
-
*/
#ifndef __TBB_blocked_range2d_H
@@ -45,19 +41,17 @@ class blocked_range2d {
ColValue col_begin, ColValue col_end, typename col_range_type::size_type col_grainsize ) :
my_rows(row_begin,row_end,row_grainsize),
my_cols(col_begin,col_end,col_grainsize)
- {
- }
+ {}
blocked_range2d( RowValue row_begin, RowValue row_end,
ColValue col_begin, ColValue col_end ) :
my_rows(row_begin,row_end),
my_cols(col_begin,col_end)
- {
- }
+ {}
//! True if range is empty
bool empty() const {
- // Yes, it is a logical OR here, not AND.
+ // Range is empty if at least one dimension is empty.
return my_rows.empty() || my_cols.empty();
}
@@ -86,6 +80,14 @@ class blocked_range2d {
}
#endif /* __TBB_USE_PROPORTIONAL_SPLIT_IN_BLOCKED_RANGES */
+ //! The rows of the iteration space
+ const row_range_type& rows() const {return my_rows;}
+
+ //! The columns of the iteration space
+ const col_range_type& cols() const {return my_cols;}
+
+private:
+
template
void do_split( blocked_range2d& r, Split& split_obj )
{
@@ -95,12 +97,6 @@ class blocked_range2d {
my_rows.my_begin = row_range_type::do_split(r.my_rows, split_obj);
}
}
-
- //! The rows of the iteration space
- const row_range_type& rows() const {return my_rows;}
-
- //! The columns of the iteration space
- const col_range_type& cols() const {return my_cols;}
};
} // namespace tbb
diff --git a/inst/include/tbb/blocked_range3d.h b/inst/include/tbb/blocked_range3d.h
index c62565ee..15f93130 100644
--- a/inst/include/tbb/blocked_range3d.h
+++ b/inst/include/tbb/blocked_range3d.h
@@ -1,5 +1,5 @@
/*
- Copyright (c) 2005-2017 Intel Corporation
+ Copyright (c) 2005-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -12,10 +12,6 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-
-
-
-
*/
#ifndef __TBB_blocked_range3d_H
@@ -49,8 +45,7 @@ class blocked_range3d {
my_pages(page_begin,page_end),
my_rows(row_begin,row_end),
my_cols(col_begin,col_end)
- {
- }
+ {}
blocked_range3d( PageValue page_begin, PageValue page_end, typename page_range_type::size_type page_grainsize,
RowValue row_begin, RowValue row_end, typename row_range_type::size_type row_grainsize,
@@ -58,12 +53,11 @@ class blocked_range3d {
my_pages(page_begin,page_end,page_grainsize),
my_rows(row_begin,row_end,row_grainsize),
my_cols(col_begin,col_end,col_grainsize)
- {
- }
+ {}
//! True if range is empty
bool empty() const {
- // Yes, it is a logical OR here, not AND.
+ // Range is empty if at least one dimension is empty.
return my_pages.empty() || my_rows.empty() || my_cols.empty();
}
@@ -94,6 +88,17 @@ class blocked_range3d {
}
#endif /* __TBB_USE_PROPORTIONAL_SPLIT_IN_BLOCKED_RANGES */
+ //! The pages of the iteration space
+ const page_range_type& pages() const {return my_pages;}
+
+ //! The rows of the iteration space
+ const row_range_type& rows() const {return my_rows;}
+
+ //! The columns of the iteration space
+ const col_range_type& cols() const {return my_cols;}
+
+private:
+
template
void do_split( blocked_range3d& r, Split& split_obj)
{
@@ -103,7 +108,7 @@ class blocked_range3d {
} else {
my_rows.my_begin = row_range_type::do_split(r.my_rows, split_obj);
}
- } else {
+ } else {
if ( my_pages.size()*double(my_cols.grainsize()) < my_cols.size()*double(my_pages.grainsize()) ) {
my_cols.my_begin = col_range_type::do_split(r.my_cols, split_obj);
} else {
@@ -111,16 +116,6 @@ class blocked_range3d {
}
}
}
-
- //! The pages of the iteration space
- const page_range_type& pages() const {return my_pages;}
-
- //! The rows of the iteration space
- const row_range_type& rows() const {return my_rows;}
-
- //! The columns of the iteration space
- const col_range_type& cols() const {return my_cols;}
-
};
} // namespace tbb
diff --git a/inst/include/tbb/blocked_rangeNd.h b/inst/include/tbb/blocked_rangeNd.h
new file mode 100644
index 00000000..922c77c6
--- /dev/null
+++ b/inst/include/tbb/blocked_rangeNd.h
@@ -0,0 +1,150 @@
+/*
+ Copyright (c) 2017-2020 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+#ifndef __TBB_blocked_rangeNd_H
+#define __TBB_blocked_rangeNd_H
+
+#if ! TBB_PREVIEW_BLOCKED_RANGE_ND
+ #error Set TBB_PREVIEW_BLOCKED_RANGE_ND to include blocked_rangeNd.h
+#endif
+
+#include "tbb_config.h"
+
+// tbb::blocked_rangeNd requires C++11 support
+#if __TBB_CPP11_PRESENT && __TBB_CPP11_ARRAY_PRESENT && __TBB_CPP11_TEMPLATE_ALIASES_PRESENT
+
+#include "internal/_template_helpers.h" // index_sequence, make_index_sequence
+
+#include
+#include // std::any_of
+#include // std::is_same, std::enable_if
+
+#include "tbb/blocked_range.h"
+
+namespace tbb {
+namespace internal {
+
+/*
+ The blocked_rangeNd_impl uses make_index_sequence to automatically generate a ctor with
+ exactly N arguments of the type tbb::blocked_range. Such ctor provides an opportunity
+ to use braced-init-list parameters to initialize each dimension.
+ Use of parameters, whose representation is a braced-init-list, but they're not
+ std::initializer_list or a reference to one, produces a non-deduced context
+ within template argument deduction.
+
+ NOTE: blocked_rangeNd must be exactly a templated alias to the blocked_rangeNd_impl
+ (and not e.g. a derived class), otherwise it would need to declare its own ctor
+ facing the same problem that the impl class solves.
+*/
+
+template>
+class blocked_rangeNd_impl;
+
+template
+class blocked_rangeNd_impl> {
+public:
+ //! Type of a value.
+ using value_type = Value;
+
+private:
+
+ //! Helper type to construct range with N tbb::blocked_range objects.
+ template
+ using dim_type_helper = tbb::blocked_range;
+
+public:
+ blocked_rangeNd_impl() = delete;
+
+ //! Constructs N-dimensional range over N half-open intervals each represented as tbb::blocked_range.
+ blocked_rangeNd_impl(const dim_type_helper&... args) : my_dims{ {args...} } {}
+
+ //! Dimensionality of a range.
+ static constexpr unsigned int ndims() { return N; }
+
+ //! Range in certain dimension.
+ const tbb::blocked_range& dim(unsigned int dimension) const {
+ __TBB_ASSERT(dimension < N, "out of bound");
+ return my_dims[dimension];
+ }
+
+ //------------------------------------------------------------------------
+ // Methods that implement Range concept
+ //------------------------------------------------------------------------
+
+ //! True if at least one dimension is empty.
+ bool empty() const {
+ return std::any_of(my_dims.begin(), my_dims.end(), [](const tbb::blocked_range& d) {
+ return d.empty();
+ });
+ }
+
+ //! True if at least one dimension is divisible.
+ bool is_divisible() const {
+ return std::any_of(my_dims.begin(), my_dims.end(), [](const tbb::blocked_range& d) {
+ return d.is_divisible();
+ });
+ }
+
+#if __TBB_USE_PROPORTIONAL_SPLIT_IN_BLOCKED_RANGES
+ //! Static field to support proportional split.
+ static const bool is_splittable_in_proportion = true;
+
+ blocked_rangeNd_impl(blocked_rangeNd_impl& r, proportional_split proportion) : my_dims(r.my_dims) {
+ do_split(r, proportion);
+ }
+#endif
+
+ blocked_rangeNd_impl(blocked_rangeNd_impl& r, split proportion) : my_dims(r.my_dims) {
+ do_split(r, proportion);
+ }
+
+private:
+ __TBB_STATIC_ASSERT(N != 0, "zero dimensional blocked_rangeNd can't be constructed");
+
+ //! Ranges in each dimension.
+ std::array, N> my_dims;
+
+ template
+ void do_split(blocked_rangeNd_impl& r, split_type proportion) {
+ __TBB_STATIC_ASSERT((is_same_type::value
+ || is_same_type::value),
+ "type of split object is incorrect");
+ __TBB_ASSERT(r.is_divisible(), "can't split not divisible range");
+
+ auto my_it = std::max_element(my_dims.begin(), my_dims.end(), [](const tbb::blocked_range& first, const tbb::blocked_range& second) {
+ return (first.size() * second.grainsize() < second.size() * first.grainsize());
+ });
+
+ auto r_it = r.my_dims.begin() + (my_it - my_dims.begin());
+
+ my_it->my_begin = tbb::blocked_range::do_split(*r_it, proportion);
+
+ // (!(my_it->my_begin < r_it->my_end) && !(r_it->my_end < my_it->my_begin)) equals to
+ // (my_it->my_begin == r_it->my_end), but we can't use operator== due to Value concept
+ __TBB_ASSERT(!(my_it->my_begin < r_it->my_end) && !(r_it->my_end < my_it->my_begin),
+ "blocked_range has been split incorrectly");
+ }
+};
+
+} // namespace internal
+
+template
+using blocked_rangeNd = internal::blocked_rangeNd_impl;
+
+} // namespace tbb
+
+#endif /* __TBB_CPP11_PRESENT && __TBB_CPP11_ARRAY_PRESENT && __TBB_CPP11_TEMPLATE_ALIASES_PRESENT */
+#endif /* __TBB_blocked_rangeNd_H */
diff --git a/inst/include/tbb/cache_aligned_allocator.h b/inst/include/tbb/cache_aligned_allocator.h
index d435e785..5b4897c4 100644
--- a/inst/include/tbb/cache_aligned_allocator.h
+++ b/inst/include/tbb/cache_aligned_allocator.h
@@ -1,5 +1,5 @@
/*
- Copyright (c) 2005-2017 Intel Corporation
+ Copyright (c) 2005-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -12,10 +12,6 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-
-
-
-
*/
#ifndef __TBB_cache_aligned_allocator_H
@@ -24,7 +20,11 @@
#include
#include "tbb_stddef.h"
#if __TBB_ALLOCATOR_CONSTRUCT_VARIADIC
- #include // std::forward
+#include // std::forward
+#endif
+
+#if __TBB_CPP17_MEMORY_RESOURCE_PRESENT
+#include
#endif
namespace tbb {
@@ -69,7 +69,6 @@ class cache_aligned_allocator {
template struct rebind {
typedef cache_aligned_allocator other;
};
-
cache_aligned_allocator() throw() {}
cache_aligned_allocator( const cache_aligned_allocator& ) throw() {}
template cache_aligned_allocator(const cache_aligned_allocator&) throw() {}
@@ -132,6 +131,79 @@ inline bool operator==( const cache_aligned_allocator&, const cache_aligned_a
template
inline bool operator!=( const cache_aligned_allocator&, const cache_aligned_allocator& ) {return false;}
+#if __TBB_CPP17_MEMORY_RESOURCE_PRESENT
+
+//! C++17 memory resource wrapper to ensure cache line size alignment
+class cache_aligned_resource : public std::pmr::memory_resource {
+public:
+ cache_aligned_resource() : cache_aligned_resource(std::pmr::get_default_resource()) {}
+ explicit cache_aligned_resource(std::pmr::memory_resource* upstream) : m_upstream(upstream) {}
+
+ std::pmr::memory_resource* upstream_resource() const {
+ return m_upstream;
+ }
+
+private:
+ //! We don't know what memory resource set. Use padding to guarantee alignment
+ void* do_allocate(size_t bytes, size_t alignment) override {
+ size_t cache_line_alignment = correct_alignment(alignment);
+ uintptr_t base = (uintptr_t)m_upstream->allocate(correct_size(bytes) + cache_line_alignment);
+ __TBB_ASSERT(base != 0, "Upstream resource returned NULL.");
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+ // unary minus operator applied to unsigned type, result still unsigned
+ #pragma warning(push)
+ #pragma warning(disable: 4146 4706)
+#endif
+ // Round up to the next cache line (align the base address)
+ uintptr_t result = (base + cache_line_alignment) & -cache_line_alignment;
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+ #pragma warning(pop)
+#endif
+ // Record where block actually starts.
+ ((uintptr_t*)result)[-1] = base;
+ return (void*)result;
+ }
+
+ void do_deallocate(void* ptr, size_t bytes, size_t alignment) override {
+ if (ptr) {
+ // Recover where block actually starts
+ uintptr_t base = ((uintptr_t*)ptr)[-1];
+ m_upstream->deallocate((void*)base, correct_size(bytes) + correct_alignment(alignment));
+ }
+ }
+
+ bool do_is_equal(const std::pmr::memory_resource& other) const noexcept override {
+ if (this == &other) { return true; }
+#if __TBB_USE_OPTIONAL_RTTI
+ const cache_aligned_resource* other_res = dynamic_cast(&other);
+ return other_res && (this->upstream_resource() == other_res->upstream_resource());
+#else
+ return false;
+#endif
+ }
+
+ size_t correct_alignment(size_t alignment) {
+ __TBB_ASSERT(tbb::internal::is_power_of_two(alignment), "Alignment is not a power of 2");
+#if __TBB_CPP17_HW_INTERFERENCE_SIZE_PRESENT
+ size_t cache_line_size = std::hardware_destructive_interference_size;
+#else
+ size_t cache_line_size = internal::NFS_GetLineSize();
+#endif
+ return alignment < cache_line_size ? cache_line_size : alignment;
+ }
+
+ size_t correct_size(size_t bytes) {
+ // To handle the case, when small size requested. There could be not
+ // enough space to store the original pointer.
+ return bytes < sizeof(uintptr_t) ? sizeof(uintptr_t) : bytes;
+ }
+
+ std::pmr::memory_resource* m_upstream;
+};
+
+#endif /* __TBB_CPP17_MEMORY_RESOURCE_PRESENT */
+
} // namespace tbb
#endif /* __TBB_cache_aligned_allocator_H */
+
diff --git a/inst/include/tbb/combinable.h b/inst/include/tbb/combinable.h
index 0063dbb4..aa8d24b1 100644
--- a/inst/include/tbb/combinable.h
+++ b/inst/include/tbb/combinable.h
@@ -1,5 +1,5 @@
/*
- Copyright (c) 2005-2017 Intel Corporation
+ Copyright (c) 2005-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -12,15 +12,14 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-
-
-
-
*/
#ifndef __TBB_combinable_H
#define __TBB_combinable_H
+#define __TBB_combinable_H_include_area
+#include "internal/_warning_suppress_enable_notice.h"
+
#include "enumerable_thread_specific.h"
#include "cache_aligned_allocator.h"
@@ -82,4 +81,8 @@ namespace tbb {
};
} // namespace tbb
+
+#include "internal/_warning_suppress_disable_notice.h"
+#undef __TBB_combinable_H_include_area
+
#endif /* __TBB_combinable_H */
diff --git a/inst/include/tbb/compat/condition_variable b/inst/include/tbb/compat/condition_variable
index 43edfc03..a6967817 100644
--- a/inst/include/tbb/compat/condition_variable
+++ b/inst/include/tbb/compat/condition_variable
@@ -1,5 +1,5 @@
/*
- Copyright (c) 2005-2017 Intel Corporation
+ Copyright (c) 2005-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -12,21 +12,31 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
+*/
+#include "../internal/_deprecated_header_message_guard.h"
+#if !defined(__TBB_show_deprecation_message_condition_variable_H) && defined(__TBB_show_deprecated_header_message)
+#define __TBB_show_deprecation_message_condition_variable_H
+#pragma message("TBB Warning: tbb/compat/condition_variable is deprecated. For details, please see Deprecated Features appendix in the TBB reference manual.")
+#endif
-
-*/
+#if defined(__TBB_show_deprecated_header_message)
+#undef __TBB_show_deprecated_header_message
+#endif
#ifndef __TBB_condition_variable_H
#define __TBB_condition_variable_H
+#define __TBB_condition_variable_H_include_area
+#include "../internal/_warning_suppress_enable_notice.h"
+
#if _WIN32||_WIN64
#include "../machine/windows_api.h"
-namespace tbb {
+namespace tbb {
namespace interface5 {
-namespace internal {
+namespace internal {
struct condition_variable_using_event
{
//! Event for blocking waiting threads.
@@ -69,17 +79,17 @@ namespace interface5 {
// C++0x standard working draft 30.4.3
// Lock tag types
-struct defer_lock_t { }; //! do not acquire ownership of the mutex
-struct try_to_lock_t { }; //! try to acquire ownership of the mutex without blocking
-struct adopt_lock_t { }; //! assume the calling thread has already
-const defer_lock_t defer_lock = {};
-const try_to_lock_t try_to_lock = {};
-const adopt_lock_t adopt_lock = {};
+struct __TBB_DEPRECATED_IN_VERBOSE_MODE defer_lock_t { }; //! do not acquire ownership of the mutex
+struct __TBB_DEPRECATED_IN_VERBOSE_MODE try_to_lock_t { }; //! try to acquire ownership of the mutex without blocking
+struct __TBB_DEPRECATED_IN_VERBOSE_MODE adopt_lock_t { }; //! assume the calling thread has already
+__TBB_DEPRECATED_IN_VERBOSE_MODE const defer_lock_t defer_lock = {};
+__TBB_DEPRECATED_IN_VERBOSE_MODE const try_to_lock_t try_to_lock = {};
+__TBB_DEPRECATED_IN_VERBOSE_MODE const adopt_lock_t adopt_lock = {};
// C++0x standard working draft 30.4.3.1
-//! lock_guard
+//! lock_guard
template
-class lock_guard : tbb::internal::no_copy {
+class __TBB_DEPRECATED_IN_VERBOSE_MODE lock_guard : tbb::internal::no_copy {
public:
//! mutex type
typedef M mutex_type;
@@ -88,7 +98,7 @@ public:
/** precondition: If mutex_type is not a recursive mutex, the calling thread
does not own the mutex m. */
explicit lock_guard(mutex_type& m) : pm(m) {m.lock();}
-
+
//! Adopt_lock constructor
/** precondition: the calling thread owns the mutex m. */
lock_guard(mutex_type& m, adopt_lock_t) : pm(m) {}
@@ -100,9 +110,9 @@ private:
};
// C++0x standard working draft 30.4.3.2
-//! unique_lock
+//! unique_lock
template
-class unique_lock : tbb::internal::no_copy {
+class __TBB_DEPRECATED_IN_VERBOSE_MODE unique_lock : tbb::internal::no_copy {
friend class condition_variable;
public:
typedef M mutex_type;
@@ -136,7 +146,7 @@ public:
unique_lock(mutex_type& m, adopt_lock_t) : pm(&m), owns(true) {}
//! Timed unique_lock acquisition.
- /** To avoid requiring support for namespace chrono, this method deviates from the working draft in that
+ /** To avoid requiring support for namespace chrono, this method deviates from the working draft in that
it uses tbb::tick_count::interval_t to specify the time duration. */
unique_lock(mutex_type& m, const tick_count::interval_t &i) : pm(&m) {owns = try_lock_for( i );}
@@ -169,14 +179,14 @@ public:
if( !owns ) {
pm->lock();
owns = true;
- } else
+ } else
throw_exception_v4( tbb::internal::eid_possible_deadlock );
- } else
+ } else
throw_exception_v4( tbb::internal::eid_operation_not_permitted );
__TBB_ASSERT( owns, NULL );
}
- //! Try to lock the mutex.
+ //! Try to lock the mutex.
/** If successful, note that this lock owns it. Otherwise, set it false. */
bool try_lock() {
if( pm ) {
@@ -184,17 +194,17 @@ public:
owns = pm->try_lock();
else
throw_exception_v4( tbb::internal::eid_possible_deadlock );
- } else
+ } else
throw_exception_v4( tbb::internal::eid_operation_not_permitted );
return owns;
}
-
- //! Try to lock the mutex.
+
+ //! Try to lock the mutex.
bool try_lock_for( const tick_count::interval_t &i );
//! Unlock the mutex
/** And note that this lock no longer owns it. */
- void unlock() {
+ void unlock() {
if( owns ) {
pm->unlock();
owns = false;
@@ -212,10 +222,10 @@ public:
//! Release control over the mutex.
mutex_type* release() {
- mutex_type* o_pm = pm;
- pm = NULL;
- owns = false;
- return o_pm;
+ mutex_type* o_pm = pm;
+ pm = NULL;
+ owns = false;
+ return o_pm;
}
// 30.4.3.2.4 observers
@@ -235,12 +245,12 @@ private:
};
template
-bool unique_lock::try_lock_for( const tick_count::interval_t &i)
-{
+__TBB_DEPRECATED_IN_VERBOSE_MODE bool unique_lock::try_lock_for( const tick_count::interval_t &i)
+{
const int unique_lock_tick = 100; /* microseconds; 0.1 milliseconds */
// the smallest wait-time is 0.1 milliseconds.
bool res = pm->try_lock();
- int duration_in_micro;
+ int duration_in_micro;
if( !res && (duration_in_micro=int(i.seconds()*1e6))>unique_lock_tick ) {
tick_count::interval_t i_100( double(unique_lock_tick)/1e6 /* seconds */); // 100 microseconds = 0.1*10E-3
do {
@@ -252,7 +262,7 @@ bool unique_lock::try_lock_for( const tick_count::interval_t &i)
return (owns=res);
}
-//! Swap the two unique locks that have the mutexes of same type
+//! Swap the two unique locks that have the mutexes of same type
template
void swap(unique_lock& x, unique_lock& y) { x.swap( y ); }
@@ -277,24 +287,24 @@ typedef pthread_cond_t condvar_impl_t;
//! cv_status
/** C++0x standard working draft 30.5 */
-enum cv_status { no_timeout, timeout };
+enum cv_status { no_timeout, timeout };
//! condition variable
-/** C++0x standard working draft 30.5.1
+/** C++0x standard working draft 30.5.1
@ingroup synchronization */
-class condition_variable : tbb::internal::no_copy {
+class __TBB_DEPRECATED_IN_VERBOSE_MODE condition_variable : tbb::internal::no_copy {
public:
//! Constructor
- condition_variable() {
+ condition_variable() {
#if _WIN32||_WIN64
- internal_initialize_condition_variable( my_cv );
+ internal_initialize_condition_variable( my_cv );
#else
pthread_cond_init( &my_cv, NULL );
#endif
}
//! Destructor
- ~condition_variable() {
+ ~condition_variable() {
//precondition: There shall be no thread blocked on *this.
#if _WIN32||_WIN64
internal_destroy_condition_variable( my_cv );
@@ -304,18 +314,18 @@ public:
}
//! Notify one thread and wake it up
- void notify_one() {
+ void notify_one() {
#if _WIN32||_WIN64
- internal_condition_variable_notify_one( my_cv );
+ internal_condition_variable_notify_one( my_cv );
#else
pthread_cond_signal( &my_cv );
#endif
}
- //! Notify all threads
- void notify_all() {
+ //! Notify all threads
+ void notify_all() {
#if _WIN32||_WIN64
- internal_condition_variable_notify_all( my_cv );
+ internal_condition_variable_notify_all( my_cv );
#else
pthread_cond_broadcast( &my_cv );
#endif
@@ -449,7 +459,7 @@ inline cv_status condition_variable::wait_for( unique_lock& lock, const t
__TBB_DEFINE_PROFILING_SET_NAME(interface5::condition_variable)
-} // namespace tbb
+} // namespace tbb
#if TBB_IMPLEMENT_CPP0X
@@ -469,8 +479,11 @@ using tbb::interface5::cv_status;
using tbb::interface5::timeout;
using tbb::interface5::no_timeout;
-} // namespace std
+} // namespace std
#endif /* TBB_IMPLEMENT_CPP0X */
+#include "../internal/_warning_suppress_disable_notice.h"
+#undef __TBB_condition_variable_H_include_area
+
#endif /* __TBB_condition_variable_H */
diff --git a/inst/include/tbb/compat/ppl.h b/inst/include/tbb/compat/ppl.h
index 840dfb22..f441b038 100644
--- a/inst/include/tbb/compat/ppl.h
+++ b/inst/include/tbb/compat/ppl.h
@@ -1,5 +1,5 @@
/*
- Copyright (c) 2005-2017 Intel Corporation
+ Copyright (c) 2005-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -12,15 +12,25 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
+*/
+#include "../internal/_deprecated_header_message_guard.h"
+#if !defined(__TBB_show_deprecation_message_ppl_H) && defined(__TBB_show_deprecated_header_message)
+#define __TBB_show_deprecation_message_ppl_H
+#pragma message("TBB Warning: tbb/compat/ppl.h is deprecated. For details, please see Deprecated Features appendix in the TBB reference manual.")
+#endif
-
-*/
+#if defined(__TBB_show_deprecated_header_message)
+#undef __TBB_show_deprecated_header_message
+#endif
#ifndef __TBB_compat_ppl_H
#define __TBB_compat_ppl_H
+#define __TBB_ppl_H_include_area
+#include "../internal/_warning_suppress_enable_notice.h"
+
#include "../task_group.h"
#include "../parallel_invoke.h"
#include "../parallel_for_each.h"
@@ -59,4 +69,7 @@ namespace Concurrency {
} // namespace Concurrency
+#include "../internal/_warning_suppress_disable_notice.h"
+#undef __TBB_ppl_H_include_area
+
#endif /* __TBB_compat_ppl_H */
diff --git a/inst/include/tbb/compat/thread b/inst/include/tbb/compat/thread
index 0edd9289..8b8a13d7 100644
--- a/inst/include/tbb/compat/thread
+++ b/inst/include/tbb/compat/thread
@@ -1,5 +1,5 @@
/*
- Copyright (c) 2005-2017 Intel Corporation
+ Copyright (c) 2005-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -12,15 +12,25 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
+*/
+#include "../internal/_deprecated_header_message_guard.h"
+#if !defined(__TBB_show_deprecation_message_thread_H) && defined(__TBB_show_deprecated_header_message)
+#define __TBB_show_deprecation_message_thread_H
+#pragma message("TBB Warning: tbb/compat/thread is deprecated. For details, please see Deprecated Features appendix in the TBB reference manual.")
+#endif
-
-*/
+#if defined(__TBB_show_deprecated_header_message)
+#undef __TBB_show_deprecated_header_message
+#endif
#ifndef __TBB_thread_H
#define __TBB_thread_H
+#define __TBB_thread_H_include_area
+#include "../internal/_warning_suppress_enable_notice.h"
+
#include "../tbb_config.h"
#if TBB_IMPLEMENT_CPP0X
@@ -35,7 +45,7 @@ namespace this_thread {
using tbb::this_tbb_thread::get_id;
using tbb::this_tbb_thread::yield;
- inline void sleep_for(const tbb::tick_count::interval_t& rel_time) {
+ __TBB_DEPRECATED_IN_VERBOSE_MODE inline void sleep_for(const tbb::tick_count::interval_t& rel_time) {
tbb::internal::thread_sleep_v3( rel_time );
}
}
@@ -50,6 +60,9 @@ namespace this_thread {
#endif /* TBB_IMPLEMENT_CPP0X */
+#include "../internal/_warning_suppress_disable_notice.h"
+#undef __TBB_thread_H_include_area
+
#else /* __TBB_thread_H */
#if __TBB_COMPAT_THREAD_RECURSION_PROTECTOR
diff --git a/inst/include/tbb/compat/tuple b/inst/include/tbb/compat/tuple
index 5767c49e..c568ef3d 100644
--- a/inst/include/tbb/compat/tuple
+++ b/inst/include/tbb/compat/tuple
@@ -1,5 +1,5 @@
/*
- Copyright (c) 2005-2017 Intel Corporation
+ Copyright (c) 2005-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -12,15 +12,25 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
+*/
+#include "../internal/_deprecated_header_message_guard.h"
+#if !defined(__TBB_show_deprecation_message_tuple_H) && defined(__TBB_show_deprecated_header_message)
+#define __TBB_show_deprecation_message_tuple_H
+#pragma message("TBB Warning: tbb/compat/tuple is deprecated. For details, please see Deprecated Features appendix in the TBB reference manual.")
+#endif
-
-*/
+#if defined(__TBB_show_deprecated_header_message)
+#undef __TBB_show_deprecated_header_message
+#endif
#ifndef __TBB_tuple_H
#define __TBB_tuple_H
+#define __TBB_tuple_H_include_area
+#include "../internal/_warning_suppress_enable_notice.h"
+
#include
#include "../tbb_stddef.h"
@@ -214,7 +224,7 @@ struct cons{
typedef __HT head_type;
typedef __TT tail_type;
- head_type head;
+ head_type head;
tail_type tail;
static const int length = 1 + tail_type::length;
@@ -276,12 +286,12 @@ struct cons{
template
-struct cons<__HT,null_type> {
+struct cons<__HT,null_type> {
typedef __HT head_type;
typedef null_type tail_type;
- head_type head;
+ head_type head;
static const int length = 1;
@@ -350,7 +360,7 @@ inline const __T wrap_dcons(__T*) { return __T(); }
// tuple definition
template
-class tuple : public internal::tuple_traits<__T0, __T1, __T2, __T3, __T4 __TBB_T_PACK >::U {
+class __TBB_DEPRECATED_IN_VERBOSE_MODE tuple : public internal::tuple_traits<__T0, __T1, __T2, __T3, __T4 __TBB_T_PACK >::U {
// friends
template friend class tuple_size;
template friend struct tuple_element;
@@ -366,7 +376,7 @@ class tuple : public internal::tuple_traits<__T0, __T1, __T2, __T3, __T4 __TBB_T
typedef typename internal::tuple_traits<__T0,__T1,__T2,__T3, __T4 __TBB_T_PACK >::U my_cons;
public:
- tuple(const __T0& t0=internal::wrap_dcons((__T0*)NULL)
+ __TBB_DEPRECATED_IN_VERBOSE_MODE tuple(const __T0& t0=internal::wrap_dcons((__T0*)NULL)
,const __T1& t1=internal::wrap_dcons((__T1*)NULL)
,const __T2& t2=internal::wrap_dcons((__T2*)NULL)
,const __T3& t3=internal::wrap_dcons((__T3*)NULL)
@@ -425,7 +435,7 @@ public:
// empty tuple
template<>
-class tuple : public null_type {
+class __TBB_DEPRECATED_IN_VERBOSE_MODE tuple : public null_type {
};
// helper classes
@@ -437,7 +447,7 @@ public:
};
template <>
-class tuple_size > {
+class tuple_size > {
public:
static const size_t value = 0;
};
@@ -484,5 +494,8 @@ namespace tbb {
#undef __TBB_CONST_REF_T_PARAM_PACK
#undef __TBB_T_PARAM_LIST_PACK
#undef __TBB_CONST_NULL_REF_PACK
-
+
+#include "../internal/_warning_suppress_disable_notice.h"
+#undef __TBB_tuple_H_include_area
+
#endif /* __TBB_tuple_H */
diff --git a/inst/include/tbb/concurrent_hash_map.h b/inst/include/tbb/concurrent_hash_map.h
index 8497c838..80bad97b 100644
--- a/inst/include/tbb/concurrent_hash_map.h
+++ b/inst/include/tbb/concurrent_hash_map.h
@@ -1,5 +1,5 @@
/*
- Copyright (c) 2005-2017 Intel Corporation
+ Copyright (c) 2005-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -12,39 +12,29 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-
-
-
-
*/
#ifndef __TBB_concurrent_hash_map_H
#define __TBB_concurrent_hash_map_H
-#include "tbb_stddef.h"
-
-#if !TBB_USE_EXCEPTIONS && _MSC_VER
- // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers
- #pragma warning (push)
- #pragma warning (disable: 4530)
-#endif
+#define __TBB_concurrent_hash_map_H_include_area
+#include "internal/_warning_suppress_enable_notice.h"
+#include "tbb_stddef.h"
#include
#include // Need std::pair
#include // Need std::memset
#include __TBB_STD_SWAP_HEADER
-#if !TBB_USE_EXCEPTIONS && _MSC_VER
- #pragma warning (pop)
-#endif
-
-#include "cache_aligned_allocator.h"
#include "tbb_allocator.h"
#include "spin_rw_mutex.h"
#include "atomic.h"
#include "tbb_exception.h"
#include "tbb_profiling.h"
+#include "aligned_space.h"
#include "internal/_tbb_hash_compare_impl.h"
+#include "internal/_template_helpers.h"
+#include "internal/_allocator_traits.h"
#if __TBB_INITIALIZER_LISTS_PRESENT
#include
#endif
@@ -54,12 +44,17 @@
#if __TBB_STATISTICS
#include
#endif
+#if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_TUPLE_PRESENT
+// Definition of __TBB_CPP11_RVALUE_REF_PRESENT includes __TBB_CPP11_TUPLE_PRESENT
+// for most of platforms, tuple present macro was added for logical correctness
+#include
+#endif
namespace tbb {
namespace interface5 {
- template, typename A = tbb_allocator > >
+ template, typename A = tbb_allocator > >
class concurrent_hash_map;
//! @cond INTERNAL
@@ -130,9 +125,10 @@ namespace interface5 {
#endif
//! Constructor
hash_map_base() {
- std::memset( (void*) this, 0, pointers_per_table*sizeof(segment_ptr_t) // 32*4=128 or 64*8=512
- + sizeof(my_size) + sizeof(my_mask) // 4+4 or 8+8
- + embedded_buckets*sizeof(bucket) ); // n*8 or n*16
+ std::memset(my_table, 0, sizeof(my_table));
+ my_mask = 0;
+ my_size = 0;
+ std::memset(my_embedded_segment, 0, sizeof(my_embedded_segment));
for( size_type i = 0; i < embedded_block; i++ ) // fill the table
my_table[i] = my_embedded_segment + segment_base(i);
my_mask = embedded_buckets - 1;
@@ -166,7 +162,7 @@ namespace interface5 {
//! Initialize buckets
static void init_buckets( segment_ptr_t ptr, size_type sz, bool is_initial ) {
- if( is_initial ) std::memset((void*) ptr, 0, sz*sizeof(bucket) );
+ if( is_initial ) std::memset( static_cast(ptr), 0, sz*sizeof(bucket) );
else for(size_type i = 0; i < sz; i++, ptr++) {
*reinterpret_cast(&ptr->mutex) = 0;
ptr->node_list = rehash_req;
@@ -190,22 +186,25 @@ namespace interface5 {
};
//! Enable segment
- void enable_segment( segment_index_t k, bool is_initial = false ) {
+ template
+ void enable_segment( segment_index_t k, const Allocator& allocator, bool is_initial = false ) {
+ typedef typename tbb::internal::allocator_rebind::type bucket_allocator_type;
+ typedef tbb::internal::allocator_traits bucket_allocator_traits;
+ bucket_allocator_type bucket_allocator(allocator);
__TBB_ASSERT( k, "Zero segment must be embedded" );
enable_segment_failsafe watchdog( my_table, k );
- cache_aligned_allocator alloc;
size_type sz;
__TBB_ASSERT( !is_valid(my_table[k]), "Wrong concurrent assignment");
if( k >= first_block ) {
sz = segment_size( k );
- segment_ptr_t ptr = alloc.allocate( sz );
+ segment_ptr_t ptr = bucket_allocator_traits::allocate(bucket_allocator, sz);
init_buckets( ptr, sz, is_initial );
itt_hide_store_word( my_table[k], ptr );
sz <<= 1;// double it to get entire capacity of the container
} else { // the first block
__TBB_ASSERT( k == embedded_block, "Wrong segment index" );
sz = segment_size( first_block );
- segment_ptr_t ptr = alloc.allocate( sz - embedded_buckets );
+ segment_ptr_t ptr = bucket_allocator_traits::allocate(bucket_allocator, sz - embedded_buckets);
init_buckets( ptr, sz - embedded_buckets, is_initial );
ptr -= segment_base(embedded_block);
for(segment_index_t i = embedded_block; i < first_block; i++) // calc the offsets
@@ -215,6 +214,22 @@ namespace interface5 {
watchdog.my_segment_ptr = 0;
}
+ template
+ void delete_segment(segment_index_t s, const Allocator& allocator) {
+ typedef typename tbb::internal::allocator_rebind::type bucket_allocator_type;
+ typedef tbb::internal::allocator_traits bucket_allocator_traits;
+ bucket_allocator_type bucket_allocator(allocator);
+ segment_ptr_t buckets_ptr = my_table[s];
+ size_type sz = segment_size( s ? s : 1 );
+
+ if( s >= first_block) // the first segment or the next
+ bucket_allocator_traits::deallocate(bucket_allocator, buckets_ptr, sz);
+ else if( s == embedded_block && embedded_block != first_block )
+ bucket_allocator_traits::deallocate(bucket_allocator, buckets_ptr,
+ segment_size(first_block) - embedded_buckets);
+ if( s >= embedded_block ) my_table[s] = 0;
+ }
+
//! Get bucket by (masked) hashcode
bucket *get_bucket( hashcode_t h ) const throw() { // TODO: add throw() everywhere?
segment_index_t s = segment_index_of( h );
@@ -283,11 +298,12 @@ namespace interface5 {
}
//! Prepare enough segments for number of buckets
- void reserve(size_type buckets) {
+ template
+ void reserve(size_type buckets, const Allocator& allocator) {
if( !buckets-- ) return;
bool is_initial = !my_size;
for( size_type m = my_mask; buckets > m; m = my_mask )
- enable_segment( segment_index_of( m+1 ), is_initial );
+ enable_segment( segment_index_of( m+1 ), allocator, is_initial );
}
//! Swap hash_map_bases
void internal_swap(hash_map_base &table) {
@@ -299,6 +315,25 @@ namespace interface5 {
for(size_type i = embedded_block; i < pointers_per_table; i++)
swap(this->my_table[i], table.my_table[i]);
}
+
+#if __TBB_CPP11_RVALUE_REF_PRESENT
+ void internal_move(hash_map_base&& other) {
+ my_mask = other.my_mask;
+ other.my_mask = embedded_buckets - 1;
+ my_size = other.my_size;
+ other.my_size = 0;
+
+ for(size_type i = 0; i < embedded_buckets; ++i) {
+ my_embedded_segment[i].node_list = other.my_embedded_segment[i].node_list;
+ other.my_embedded_segment[i].node_list = NULL;
+ }
+
+ for(size_type i = embedded_block; i < pointers_per_table; ++i) {
+ my_table[i] = other.my_table[i];
+ other.my_table[i] = NULL;
+ }
+ }
+#endif // __TBB_CPP11_RVALUE_REF_PRESENT
};
template
@@ -376,9 +411,17 @@ namespace interface5 {
my_bucket(other.my_bucket),
my_node(other.my_node)
{}
+
+ hash_map_iterator& operator=( const hash_map_iterator &other ) {
+ my_map = other.my_map;
+ my_index = other.my_index;
+ my_bucket = other.my_bucket;
+ my_node = other.my_node;
+ return *this;
+ }
Value& operator*() const {
__TBB_ASSERT( hash_map_base::is_valid(my_node), "iterator uninitialized or at end of container?" );
- return my_node->item;
+ return my_node->value();
}
Value* operator->() const {return &operator*();}
hash_map_iterator& operator++();
@@ -564,62 +607,80 @@ class concurrent_hash_map : protected internal::hash_map_base {
protected:
friend class const_accessor;
- struct node;
- typedef typename Allocator::template rebind::other node_allocator_type;
+ class node;
+ typedef typename tbb::internal::allocator_rebind::type node_allocator_type;
+ typedef tbb::internal::allocator_traits node_allocator_traits;
node_allocator_type my_allocator;
HashCompare my_hash_compare;
- struct node : public node_base {
- value_type item;
- node( const Key &key ) : item(key, T()) {}
- node( const Key &key, const T &t ) : item(key, t) {}
-#if __TBB_CPP11_RVALUE_REF_PRESENT
- node( const Key &key, T &&t ) : item(key, std::move(t)) {}
- node( value_type&& i ) : item(std::move(i)){}
-#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
- template
- node( Args&&... args ) : item(std::forward(args)...) {}
-#if __TBB_COPY_FROM_NON_CONST_REF_BROKEN
- node( value_type& i ) : item(const_cast(i)) {}
-#endif //__TBB_COPY_FROM_NON_CONST_REF_BROKEN
-#endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
-#endif //__TBB_CPP11_RVALUE_REF_PRESENT
- node( const value_type& i ) : item(i) {}
-
- // exception-safe allocation, see C++ Standard 2003, clause 5.3.4p17
- void *operator new( size_t /*size*/, node_allocator_type &a ) {
- void *ptr = a.allocate(1);
- if(!ptr)
- tbb::internal::throw_exception(tbb::internal::eid_bad_alloc);
- return ptr;
- }
- // match placement-new form above to be called if exception thrown in constructor
- void operator delete( void *ptr, node_allocator_type &a ) { a.deallocate(static_cast(ptr),1); }
+ class node : public node_base {
+ tbb::aligned_space my_value;
+ public:
+ value_type* storage() { return my_value.begin(); }
+ value_type& value() { return *storage(); }
};
void delete_node( node_base *n ) {
- my_allocator.destroy( static_cast(n) );
- my_allocator.deallocate( static_cast(n), 1);
+ node_allocator_traits::destroy(my_allocator, static_cast(n)->storage());
+ node_allocator_traits::destroy(my_allocator, static_cast(n));
+ node_allocator_traits::deallocate(my_allocator, static_cast(n), 1);
+ }
+
+ struct node_scoped_guard : tbb::internal::no_copy {
+ node* my_node;
+ node_allocator_type& my_alloc;
+
+ node_scoped_guard(node* n, node_allocator_type& alloc) : my_node(n), my_alloc(alloc) {}
+ ~node_scoped_guard() {
+ if(my_node) {
+ node_allocator_traits::destroy(my_alloc, my_node);
+ node_allocator_traits::deallocate(my_alloc, my_node, 1);
+ }
+ }
+ void dismiss() { my_node = NULL; }
+ };
+
+#if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
+ template
+ static node* create_node(node_allocator_type& allocator, Args&&... args)
+#else
+ template
+ static node* create_node(node_allocator_type& allocator, __TBB_FORWARDING_REF(Arg1) arg1, __TBB_FORWARDING_REF(Arg2) arg2)
+#endif
+ {
+ node* node_ptr = node_allocator_traits::allocate(allocator, 1);
+ node_scoped_guard guard(node_ptr, allocator);
+ node_allocator_traits::construct(allocator, node_ptr);
+#if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
+ node_allocator_traits::construct(allocator, node_ptr->storage(), std::forward(args)...);
+#else
+ node_allocator_traits::construct(allocator, node_ptr->storage(), tbb::internal::forward(arg1), tbb::internal::forward(arg2));
+#endif
+ guard.dismiss();
+ return node_ptr;
}
static node* allocate_node_copy_construct(node_allocator_type& allocator, const Key &key, const T * t){
- return new( allocator ) node(key, *t);
+ return create_node(allocator, key, *t);
}
#if __TBB_CPP11_RVALUE_REF_PRESENT
static node* allocate_node_move_construct(node_allocator_type& allocator, const Key &key, const T * t){
- return new( allocator ) node(key, std::move(*const_cast(t)));
+ return create_node(allocator, key, std::move(*const_cast(t)));
}
-#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
- template
- static node* allocate_node_emplace_construct(node_allocator_type& allocator, Args&&... args){
- return new( allocator ) node(std::forward(args)...);
- }
-#endif //#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
#endif
static node* allocate_node_default_construct(node_allocator_type& allocator, const Key &key, const T * ){
- return new( allocator ) node(key);
+#if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_TUPLE_PRESENT
+ // Emplace construct an empty T object inside the pair
+ return create_node(allocator, std::piecewise_construct,
+ std::forward_as_tuple(key), std::forward_as_tuple());
+#else
+ // Use of a temporary object is impossible, because create_node takes a non-const reference.
+ // copy-initialization is possible because T is already required to be CopyConstructible.
+ T obj = T();
+ return create_node(allocator, key, tbb::internal::move(obj));
+#endif
}
static node* do_not_allocate_node(node_allocator_type& , const Key &, const T * ){
@@ -629,7 +690,7 @@ class concurrent_hash_map : protected internal::hash_map_base {
node *search_bucket( const key_type &key, bucket *b ) const {
node *n = static_cast( b->node_list );
- while( is_valid(n) && !my_hash_compare.equal(key, n->item.first) )
+ while( is_valid(n) && !my_hash_compare.equal(key, n->value().first) )
n = static_cast( n->next );
__TBB_ASSERT(n != internal::rehash_req, "Search can be executed only for rehashed bucket");
return n;
@@ -674,7 +735,7 @@ class concurrent_hash_map : protected internal::hash_map_base {
__TBB_ASSERT( (mask&(mask+1))==0 && (h & mask) == h, NULL );
restart:
for( node_base **p = &b_old()->node_list, *n = __TBB_load_with_acquire(*p); is_valid(n); n = *p ) {
- hashcode_t c = my_hash_compare.hash( static_cast(n)->item.first );
+ hashcode_t c = my_hash_compare.hash( static_cast(n)->value().first );
#if TBB_USE_ASSERT
hashcode_t bmask = h & (mask>>1);
bmask = bmask==0? 1 : ( 1u<<(__TBB_Log2( bmask )+1 ) ) - 1; // minimal mask of parent bucket
@@ -726,7 +787,7 @@ class concurrent_hash_map : protected internal::hash_map_base {
//! Return reference to associated value in hash table.
const_reference operator*() const {
__TBB_ASSERT( my_node, "attempt to dereference empty accessor" );
- return my_node->item;
+ return my_node->value();
}
//! Return pointer to associated value in hash table.
@@ -756,7 +817,7 @@ class concurrent_hash_map : protected internal::hash_map_base {
//! Return reference to associated value in hash table.
reference operator*() const {
__TBB_ASSERT( this->my_node, "attempt to dereference empty accessor" );
- return this->my_node->item;
+ return this->my_node->value();
}
//! Return pointer to associated value in hash table.
@@ -770,18 +831,39 @@ class concurrent_hash_map : protected internal::hash_map_base {
: internal::hash_map_base(), my_allocator(a)
{}
+ explicit concurrent_hash_map( const HashCompare& compare, const allocator_type& a = allocator_type() )
+ : internal::hash_map_base(), my_allocator(a), my_hash_compare(compare)
+ {}
+
//! Construct empty table with n preallocated buckets. This number serves also as initial concurrency level.
concurrent_hash_map( size_type n, const allocator_type &a = allocator_type() )
- : my_allocator(a)
+ : internal::hash_map_base(), my_allocator(a)
+ {
+ reserve( n, my_allocator );
+ }
+
+ concurrent_hash_map( size_type n, const HashCompare& compare, const allocator_type& a = allocator_type() )
+ : internal::hash_map_base(), my_allocator(a), my_hash_compare(compare)
{
- reserve( n );
+ reserve( n, my_allocator );
}
//! Copy constructor
- concurrent_hash_map( const concurrent_hash_map &table, const allocator_type &a = allocator_type() )
+ concurrent_hash_map( const concurrent_hash_map &table )
+ : internal::hash_map_base(),
+ my_allocator(node_allocator_traits::select_on_container_copy_construction(table.get_allocator()))
+ {
+ call_clear_on_leave scope_guard(this);
+ internal_copy(table);
+ scope_guard.dismiss();
+ }
+
+ concurrent_hash_map( const concurrent_hash_map &table, const allocator_type &a)
: internal::hash_map_base(), my_allocator(a)
{
+ call_clear_on_leave scope_guard(this);
internal_copy(table);
+ scope_guard.dismiss();
}
#if __TBB_CPP11_RVALUE_REF_PRESENT
@@ -789,7 +871,7 @@ class concurrent_hash_map : protected internal::hash_map_base {
concurrent_hash_map( concurrent_hash_map &&table )
: internal::hash_map_base(), my_allocator(std::move(table.get_allocator()))
{
- swap(table);
+ internal_move(std::move(table));
}
//! Move constructor
@@ -797,10 +879,10 @@ class concurrent_hash_map : protected internal::hash_map_base {
: internal::hash_map_base(), my_allocator(a)
{
if (a == table.get_allocator()){
- this->swap(table);
+ internal_move(std::move(table));
}else{
call_clear_on_leave scope_guard(this);
- internal_copy(std::make_move_iterator(table.begin()), std::make_move_iterator(table.end()));
+ internal_copy(std::make_move_iterator(table.begin()), std::make_move_iterator(table.end()), table.size());
scope_guard.dismiss();
}
}
@@ -809,19 +891,38 @@ class concurrent_hash_map : protected internal::hash_map_base {
//! Construction with copying iteration range and given allocator instance
template
concurrent_hash_map( I first, I last, const allocator_type &a = allocator_type() )
- : my_allocator(a)
+ : internal::hash_map_base(), my_allocator(a)
+ {
+ call_clear_on_leave scope_guard(this);
+ internal_copy(first, last, std::distance(first, last));
+ scope_guard.dismiss();
+ }
+
+ template
+ concurrent_hash_map( I first, I last, const HashCompare& compare, const allocator_type& a = allocator_type() )
+ : internal::hash_map_base(), my_allocator(a), my_hash_compare(compare)
{
- reserve( std::distance(first, last) ); // TODO: load_factor?
- internal_copy(first, last);
+ call_clear_on_leave scope_guard(this);
+ internal_copy(first, last, std::distance(first, last));
+ scope_guard.dismiss();
}
#if __TBB_INITIALIZER_LISTS_PRESENT
//! Construct empty table with n preallocated buckets. This number serves also as initial concurrency level.
concurrent_hash_map( std::initializer_list il, const allocator_type &a = allocator_type() )
- : my_allocator(a)
+ : internal::hash_map_base(), my_allocator(a)
{
- reserve(il.size());
- internal_copy(il.begin(), il.end());
+ call_clear_on_leave scope_guard(this);
+ internal_copy(il.begin(), il.end(), il.size());
+ scope_guard.dismiss();
+ }
+
+ concurrent_hash_map( std::initializer_list il, const HashCompare& compare, const allocator_type& a = allocator_type() )
+ : internal::hash_map_base(), my_allocator(a), my_hash_compare(compare)
+ {
+ call_clear_on_leave scope_guard(this);
+ internal_copy(il.begin(), il.end(), il.size());
+ scope_guard.dismiss();
}
#endif //__TBB_INITIALIZER_LISTS_PRESENT
@@ -829,7 +930,9 @@ class concurrent_hash_map : protected internal::hash_map_base {
//! Assignment
concurrent_hash_map& operator=( const concurrent_hash_map &table ) {
if( this!=&table ) {
+ typedef typename node_allocator_traits::propagate_on_container_copy_assignment pocca_type;
clear();
+ tbb::internal::allocator_copy_assignment(my_allocator, table.my_allocator, pocca_type());
internal_copy(table);
}
return *this;
@@ -838,17 +941,9 @@ class concurrent_hash_map : protected internal::hash_map_base {
#if __TBB_CPP11_RVALUE_REF_PRESENT
//! Move Assignment
concurrent_hash_map& operator=( concurrent_hash_map &&table ) {
- if(this != &table){
- typedef typename tbb::internal::allocator_traits::propagate_on_container_move_assignment pocma_t;
- if(pocma_t::value || this->my_allocator == table.my_allocator) {
- concurrent_hash_map trash (std::move(*this));
- //TODO: swapping allocators here may be a problem, replace with single direction moving iff pocma is set
- this->swap(table);
- } else {
- //do per element move
- concurrent_hash_map moved_copy(std::move(table), this->my_allocator);
- this->swap(moved_copy);
- }
+ if(this != &table) {
+ typedef typename node_allocator_traits::propagate_on_container_move_assignment pocma_type;
+ internal_move_assign(std::move(table), pocma_type());
}
return *this;
}
@@ -858,8 +953,7 @@ class concurrent_hash_map : protected internal::hash_map_base {
//! Assignment
concurrent_hash_map& operator=( std::initializer_list il ) {
clear();
- reserve(il.size());
- internal_copy(il.begin(), il.end());
+ internal_copy(il.begin(), il.end(), il.size());
return *this;
}
#endif //__TBB_INITIALIZER_LISTS_PRESENT
@@ -1067,8 +1161,8 @@ class concurrent_hash_map : protected internal::hash_map_base {
template
bool generic_emplace( Accessor && result, Args &&... args ) {
result.release();
- node * node_ptr = allocate_node_emplace_construct(my_allocator, std::forward(args)...);
- return lookup(/*insert*/true, node_ptr->item.first, NULL, accessor_location(result), is_write_access_needed(result), &do_not_allocate_node, node_ptr );
+ node * node_ptr = create_node(my_allocator, std::forward(args)...);
+ return lookup(/*insert*/true, node_ptr->value().first, NULL, accessor_location(result), is_write_access_needed(result), &do_not_allocate_node, node_ptr );
}
#endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
#endif //__TBB_CPP11_RVALUE_REF_PRESENT
@@ -1084,7 +1178,24 @@ class concurrent_hash_map : protected internal::hash_map_base {
void internal_copy( const concurrent_hash_map& source );
template
- void internal_copy( I first, I last );
+ void internal_copy( I first, I last, size_type reserve_size );
+
+#if __TBB_CPP11_RVALUE_REF_PRESENT
+ // A compile-time dispatch to allow move assignment of containers with non-movable value_type if POCMA is true_type
+ void internal_move_assign(concurrent_hash_map&& other, tbb::internal::traits_true_type) {
+ tbb::internal::allocator_move_assignment(my_allocator, other.my_allocator, tbb::internal::traits_true_type());
+ internal_move(std::move(other));
+ }
+
+ void internal_move_assign(concurrent_hash_map&& other, tbb::internal::traits_false_type) {
+ if (this->my_allocator == other.my_allocator) {
+ internal_move(std::move(other));
+ } else {
+ //do per element move
+ internal_copy(std::make_move_iterator(other.begin()), std::make_move_iterator(other.end()), other.size());
+ }
+ }
+#endif
//! Fast find when no concurrent erasure is used. For internal use inside TBB only!
/** Return pointer to item with given key, or NULL if no such item exists.
@@ -1109,13 +1220,40 @@ class concurrent_hash_map : protected internal::hash_map_base {
}
n = search_bucket( key, b );
if( n )
- return &n->item;
+ return n->storage();
else if( check_mask_race( h, m ) )
goto restart;
return 0;
}
};
+#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT
+namespace internal {
+using namespace tbb::internal;
+
+template typename Map, typename Key, typename T, typename... Args>
+using hash_map_t = Map<
+ Key, T,
+ std::conditional_t< (sizeof...(Args)>0) && !is_allocator_v< pack_element_t<0, Args...> >,
+ pack_element_t<0, Args...>, tbb_hash_compare >,
+ std::conditional_t< (sizeof...(Args)>0) && is_allocator_v< pack_element_t >,
+ pack_element_t, tbb_allocator > >
+>;
+}
+
+// Deduction guide for the constructor from two iterators and hash_compare/ allocator
+template
+concurrent_hash_map(I, I, Args...)
+-> internal::hash_map_t,internal::iterator_mapped_t, Args...>;
+
+// Deduction guide for the constructor from an initializer_list and hash_compare/ allocator
+// Deduction guide for an initializer_list, hash_compare and allocator is implicit
+template
+concurrent_hash_map(std::initializer_list>, CompareOrAllocator)
+-> internal::hash_map_t;
+
+#endif /* __TBB_CPP17_DEDUCTION_GUIDES_PRESENT */
+
template
bool concurrent_hash_map::lookup( bool op_insert, const Key &key, const T *t, const_accessor *result, bool write, node* (*allocate_node)(node_allocator_type& , const Key&, const T*), node *tmp_n ) {
__TBB_ASSERT( !result || !result->my_node, NULL );
@@ -1188,7 +1326,7 @@ bool concurrent_hash_map::lookup( bool op_insert, const Key
#if __TBB_STATISTICS
my_info_resizes++; // concurrent ones
#endif
- enable_segment( grow_segment );
+ enable_segment( grow_segment, my_allocator );
}
if( tmp_n ) // if op_insert only
delete_node( tmp_n );
@@ -1256,7 +1394,7 @@ bool concurrent_hash_map::erase( const Key &key ) {
search:
node_base **p = &b()->node_list;
n = *p;
- while( is_valid(n) && !my_hash_compare.equal(key, static_cast(n)->item.first ) ) {
+ while( is_valid(n) && !my_hash_compare.equal(key, static_cast(n)->value().first ) ) {
p = &n->next;
n = *p;
}
@@ -1283,16 +1421,18 @@ bool concurrent_hash_map::erase( const Key &key ) {
template
void concurrent_hash_map::swap(concurrent_hash_map &table) {
- //TODO: respect C++11 allocator_traits::propogate_on_constainer_swap
- using std::swap;
- swap(this->my_allocator, table.my_allocator);
- swap(this->my_hash_compare, table.my_hash_compare);
- internal_swap(table);
+ typedef typename node_allocator_traits::propagate_on_container_swap pocs_type;
+ if (this != &table && (pocs_type::value || my_allocator == table.my_allocator)) {
+ using std::swap;
+ tbb::internal::allocator_swap(this->my_allocator, table.my_allocator, pocs_type());
+ swap(this->my_hash_compare, table.my_hash_compare);
+ internal_swap(table);
+ }
}
template
void concurrent_hash_map::rehash(size_type sz) {
- reserve( sz ); // TODO: add reduction of number of buckets as well
+ reserve( sz, my_allocator ); // TODO: add reduction of number of buckets as well
hashcode_t mask = my_mask;
hashcode_t b = (mask+1)>>1; // size or first index of the last segment
__TBB_ASSERT((b&(b-1))==0, NULL); // zero or power of 2
@@ -1311,7 +1451,7 @@ void concurrent_hash_map::rehash(size_type sz) {
// now h - is index of the root rehashed bucket b_old
mark_rehashed_levels( h ); // mark all non-rehashed children recursively across all segments
for( node_base **p = &b_old->node_list, *q = *p; is_valid(q); q = *p ) {
- hashcode_t c = my_hash_compare.hash( static_cast(q)->item.first );
+ hashcode_t c = my_hash_compare.hash( static_cast(q)->value().first );
if( (c & mask) != h ) { // should be rehashed
*p = q->next; // exclude from b_old
bucket *b_new = get_bucket( c & mask );
@@ -1338,7 +1478,7 @@ void concurrent_hash_map::rehash(size_type sz) {
#endif
#if TBB_USE_ASSERT
for( ; is_valid(n); n = n->next ) {
- hashcode_t h = my_hash_compare.hash( static_cast(n)->item.first ) & mask;
+ hashcode_t h = my_hash_compare.hash( static_cast(n)->value().first ) & mask;
__TBB_ASSERT( h == b, "hash() function changed for key in table or internal error" );
}
#endif
@@ -1385,7 +1525,7 @@ void concurrent_hash_map::clear() {
#endif
#if __TBB_EXTRA_DEBUG
for(; is_valid(n); n = n->next ) {
- hashcode_t h = my_hash_compare.hash( static_cast(n)->item.first );
+ hashcode_t h = my_hash_compare.hash( static_cast(n)->value().first );
h &= m;
__TBB_ASSERT( h == b || get_bucket(h)->node_list == internal::rehash_req, "hash() function changed for key in table or internal error" );
}
@@ -1415,11 +1555,10 @@ void concurrent_hash_map::clear() {
reported = true;
}
#endif
-#endif//TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
+#endif // TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
my_size = 0;
segment_index_t s = segment_index_of( m );
__TBB_ASSERT( s+1 == pointers_per_table || !my_table[s+1], "wrong mask or concurrent grow" );
- cache_aligned_allocator